repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/fffner/utils/convert_checkpoint_huggingface.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts pre-trained pytorch checkpoint into a tf encoder checkpoint."""
import os
from absl import app
import numpy as np
import tensorflow as tf
import transformers
from official.modeling import tf_utils
from official.projects.fffner.fffner import FFFNerEncoderConfig
from official.projects.fffner.fffner_encoder import FFFNerEncoder
def _get_huggingface_bert_model_and_config(huggingface_model_name_or_path):
model = transformers.AutoModel.from_pretrained(huggingface_model_name_or_path)
return {n: p.data.numpy() for n, p in model.named_parameters()}, model.config
def _create_fffner_model(huggingface_bert_config):
"""Creates a Longformer model."""
encoder_cfg = FFFNerEncoderConfig()
encoder = FFFNerEncoder(
vocab_size=huggingface_bert_config.vocab_size,
hidden_size=huggingface_bert_config.hidden_size,
num_layers=huggingface_bert_config.num_hidden_layers,
num_attention_heads=huggingface_bert_config.num_attention_heads,
inner_dim=huggingface_bert_config.intermediate_size,
inner_activation=tf_utils.get_activation(
huggingface_bert_config.hidden_act),
output_dropout=huggingface_bert_config.hidden_dropout_prob,
attention_dropout=huggingface_bert_config.attention_probs_dropout_prob,
max_sequence_length=huggingface_bert_config.max_position_embeddings,
type_vocab_size=huggingface_bert_config.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=huggingface_bert_config.hidden_size,
norm_first=encoder_cfg.norm_first)
return encoder
# pylint: disable=protected-access
def convert(encoder, bert_model):
"""Convert a Huggingface transformers bert encoder to the one in the codebase.
"""
num_layers = encoder._config["num_layers"]
num_attention_heads = encoder._config["num_attention_heads"]
hidden_size = encoder._config["hidden_size"]
head_size = hidden_size // num_attention_heads
assert head_size * num_attention_heads == hidden_size
encoder._embedding_layer.set_weights(
[bert_model["embeddings.word_embeddings.weight"]])
encoder._embedding_norm_layer.set_weights([
bert_model["embeddings.LayerNorm.weight"],
bert_model["embeddings.LayerNorm.bias"]
])
encoder._type_embedding_layer.set_weights(
[bert_model["embeddings.token_type_embeddings.weight"]])
encoder._position_embedding_layer.set_weights(
[bert_model["embeddings.position_embeddings.weight"]])
for layer_num in range(num_layers):
encoder._transformer_layers[
layer_num]._attention_layer._key_dense.set_weights([
bert_model[f"encoder.layer.{layer_num}.attention.self.key.weight"].T
.reshape((hidden_size, num_attention_heads, head_size)),
bert_model[f"encoder.layer.{layer_num}.attention.self.key.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._query_dense.set_weights([
bert_model[f"encoder.layer.{layer_num}.attention.self.query.weight"]
.T.reshape((hidden_size, num_attention_heads, head_size)),
bert_model[f"encoder.layer.{layer_num}.attention.self.query.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._value_dense.set_weights([
bert_model[f"encoder.layer.{layer_num}.attention.self.value.weight"]
.T.reshape((hidden_size, num_attention_heads, head_size)),
bert_model[f"encoder.layer.{layer_num}.attention.self.value.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._output_dense.set_weights([
bert_model[
f"encoder.layer.{layer_num}.attention.output.dense.weight"].T
.reshape((num_attention_heads, head_size, hidden_size)),
bert_model[f"encoder.layer.{layer_num}.attention.output.dense.bias"]
])
encoder._transformer_layers[layer_num]._attention_layer_norm.set_weights([
bert_model[
f"encoder.layer.{layer_num}.attention.output.LayerNorm.weight"],
bert_model[f"encoder.layer.{layer_num}.attention.output.LayerNorm.bias"]
])
encoder._transformer_layers[layer_num]._intermediate_dense.set_weights([
bert_model[f"encoder.layer.{layer_num}.intermediate.dense.weight"].T,
bert_model[f"encoder.layer.{layer_num}.intermediate.dense.bias"]
])
encoder._transformer_layers[layer_num]._output_dense.set_weights([
bert_model[f"encoder.layer.{layer_num}.output.dense.weight"].T,
bert_model[f"encoder.layer.{layer_num}.output.dense.bias"]
])
encoder._transformer_layers[layer_num]._output_layer_norm.set_weights([
bert_model[f"encoder.layer.{layer_num}.output.LayerNorm.weight"],
bert_model[f"encoder.layer.{layer_num}.output.LayerNorm.bias"]
])
def convert_checkpoint(huggingface_model_name_or_path, output_path):
"""Converts and save the checkpoint."""
output_dir, _ = os.path.split(output_path)
tf.io.gfile.makedirs(output_dir)
huggingface_bert_model, huggingface_bert_config = _get_huggingface_bert_model_and_config(
huggingface_model_name_or_path)
encoder = _create_fffner_model(huggingface_bert_config)
sequence_length = 128
batch_size = 2
word_id_data = np.random.randint(
10, size=(batch_size, sequence_length), dtype=np.int32)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
type_id_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
is_entity_token_pos = np.zeros((batch_size, 1), dtype=np.int32)
entity_type_token_pos = np.ones((batch_size, 1), dtype=np.int32)
inputs = {
"input_word_ids": word_id_data,
"input_mask": mask_data,
"input_type_ids": type_id_data,
"is_entity_token_pos": is_entity_token_pos,
"entity_type_token_pos": entity_type_token_pos,
}
encoder(inputs)
convert(encoder, huggingface_bert_model)
tf.train.Checkpoint(encoder=encoder).write(output_path)
def main(_):
convert_checkpoint("bert-base-uncased", "bert-uncased")
if __name__ == "__main__":
app.run(main)
| 6,992 | 42.434783 | 91 | py |
models | models-master/official/projects/fffner/utils/convert_checkpoint_tensorflow.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts pre-trained encoder into a fffner encoder checkpoint."""
import os
from absl import app
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from official.projects.fffner.fffner import FFFNerEncoderConfig
from official.projects.fffner.fffner_encoder import FFFNerEncoder
def _get_tensorflow_bert_model_and_config(tfhub_handle_encoder):
"""Gets the BERT model name-parameters pairs and configurations."""
bert_model = hub.KerasLayer(tfhub_handle_encoder)
bert_model_weights_name = [w.name for w in bert_model.weights]
bert_model_weights = bert_model.get_weights()
named_parameters = {
n: p for n, p in zip(bert_model_weights_name, bert_model_weights)
}
config = {}
config["num_attention_heads"], _, config["hidden_size"] = named_parameters[
"transformer/layer_0/self_attention/attention_output/kernel:0"].shape
_, config["intermediate_size"] = named_parameters[
"transformer/layer_0/intermediate/kernel:0"].shape
num_hidden_layers = 0
while f"transformer/layer_{num_hidden_layers}/self_attention/query/kernel:0" in named_parameters:
num_hidden_layers += 1
config["num_hidden_layers"] = num_hidden_layers
config["vocab_size"], _ = named_parameters[
"word_embeddings/embeddings:0"].shape
config["max_position_embeddings"], _ = named_parameters[
"position_embedding/embeddings:0"].shape
config["type_vocab_size"], _ = named_parameters[
"type_embeddings/embeddings:0"].shape
return named_parameters, config
def _create_fffner_model(bert_config):
"""Creates a Longformer model."""
encoder_cfg = FFFNerEncoderConfig()
encoder = FFFNerEncoder(
vocab_size=bert_config["vocab_size"],
hidden_size=bert_config["hidden_size"],
num_layers=bert_config["num_hidden_layers"],
num_attention_heads=bert_config["num_attention_heads"],
inner_dim=bert_config["intermediate_size"],
max_sequence_length=bert_config["max_position_embeddings"],
type_vocab_size=bert_config["type_vocab_size"],
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=bert_config["hidden_size"],
norm_first=encoder_cfg.norm_first)
return encoder
# pylint: disable=protected-access
def convert(encoder, bert_model):
"""Convert a Tensorflow transformers bert encoder to the one in the codebase.
"""
num_layers = encoder._config["num_layers"]
num_attention_heads = encoder._config["num_attention_heads"]
hidden_size = encoder._config["hidden_size"]
head_size = hidden_size // num_attention_heads
assert head_size * num_attention_heads == hidden_size
encoder._embedding_layer.set_weights(
[bert_model["word_embeddings/embeddings:0"]])
encoder._embedding_norm_layer.set_weights([
bert_model["embeddings/layer_norm/gamma:0"],
bert_model["embeddings/layer_norm/beta:0"]
])
encoder._type_embedding_layer.set_weights(
[bert_model["type_embeddings/embeddings:0"]])
encoder._position_embedding_layer.set_weights(
[bert_model["position_embedding/embeddings:0"]])
for layer_num in range(num_layers):
encoder._transformer_layers[
layer_num]._attention_layer._key_dense.set_weights([
bert_model[
f"transformer/layer_{layer_num}/self_attention/key/kernel:0"],
bert_model[
f"transformer/layer_{layer_num}/self_attention/key/bias:0"]
])
encoder._transformer_layers[
layer_num]._attention_layer._query_dense.set_weights([
bert_model[
f"transformer/layer_{layer_num}/self_attention/query/kernel:0"],
bert_model[
f"transformer/layer_{layer_num}/self_attention/query/bias:0"]
])
encoder._transformer_layers[
layer_num]._attention_layer._value_dense.set_weights([
bert_model[
f"transformer/layer_{layer_num}/self_attention/value/kernel:0"],
bert_model[
f"transformer/layer_{layer_num}/self_attention/value/bias:0"]
])
encoder._transformer_layers[layer_num]._attention_layer._output_dense.set_weights([
bert_model[
f"transformer/layer_{layer_num}/self_attention/attention_output/kernel:0"],
bert_model[
f"transformer/layer_{layer_num}/self_attention/attention_output/bias:0"]
])
encoder._transformer_layers[layer_num]._attention_layer_norm.set_weights([
bert_model[
f"transformer/layer_{layer_num}/self_attention_layer_norm/gamma:0"],
bert_model[
f"transformer/layer_{layer_num}/self_attention_layer_norm/beta:0"]
])
encoder._transformer_layers[layer_num]._intermediate_dense.set_weights([
bert_model[f"transformer/layer_{layer_num}/intermediate/kernel:0"],
bert_model[f"transformer/layer_{layer_num}/intermediate/bias:0"]
])
encoder._transformer_layers[layer_num]._output_dense.set_weights([
bert_model[f"transformer/layer_{layer_num}/output/kernel:0"],
bert_model[f"transformer/layer_{layer_num}/output/bias:0"]
])
encoder._transformer_layers[layer_num]._output_layer_norm.set_weights([
bert_model[f"transformer/layer_{layer_num}/output_layer_norm/gamma:0"],
bert_model[f"transformer/layer_{layer_num}/output_layer_norm/beta:0"]
])
def convert_checkpoint(output_path, tfhub_handle_encoder):
"""Converts and save the checkpoint."""
output_dir, _ = os.path.split(output_path)
tf.io.gfile.makedirs(output_dir)
bert_model, bert_config = _get_tensorflow_bert_model_and_config(
tfhub_handle_encoder)
encoder = _create_fffner_model(bert_config)
sequence_length = 128
batch_size = 2
word_id_data = np.random.randint(
10, size=(batch_size, sequence_length), dtype=np.int32)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
type_id_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
is_entity_token_pos = np.zeros((batch_size, 1), dtype=np.int32)
entity_type_token_pos = np.ones((batch_size, 1), dtype=np.int32)
inputs = {
"input_word_ids": word_id_data,
"input_mask": mask_data,
"input_type_ids": type_id_data,
"is_entity_token_pos": is_entity_token_pos,
"entity_type_token_pos": entity_type_token_pos,
}
encoder(inputs)
convert(encoder, bert_model)
tf.train.Checkpoint(encoder=encoder).write(output_path)
def main(_):
convert_checkpoint(
output_path="tf-bert-uncased",
tfhub_handle_encoder="https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3"
)
if __name__ == "__main__":
app.run(main)
| 7,350 | 39.838889 | 99 | py |
models | models-master/official/projects/edgetpu/nlp/mobilebert_edgetpu_trainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distillation trainer for EdgeTPU-BERT."""
import enum
import os
from typing import Optional
from absl import logging
import orbit
import tensorflow as tf
from official.modeling import optimization
from official.nlp import modeling
from official.nlp.data import data_loader_factory
from official.projects.edgetpu.nlp.configs import params
class DistillationMode(enum.Enum):
"""enum.Enum class for different distillation mode.
A state machine is used to control the training progress. When the training
job starts from the beginning or resumes from a preemption, the state is INIT.
Then depends on the 'self.current_step', the state switches to either
'LAYER_WISE' or 'END2END'.
Options:
UNKNOWN: Unknown status, always raise errors.
INIT: The trainer is initialized or restarted from the preemption.
LAYER_WISE: Layer-wise distillation for each transformer layers.
END2END: End-to-end distillation after layer-wise distillaiton is done.
"""
UNKNOWN = 0
INIT = 1
LAYER_WISE = 2
END2END = 3
def _get_distribution_losses(teacher, student):
"""Returns the beta and gamma distall losses for feature distribution."""
teacher_mean = tf.math.reduce_mean(teacher, axis=-1, keepdims=True)
student_mean = tf.math.reduce_mean(student, axis=-1, keepdims=True)
teacher_var = tf.math.reduce_variance(teacher, axis=-1, keepdims=True)
student_var = tf.math.reduce_variance(student, axis=-1, keepdims=True)
beta_loss = tf.math.squared_difference(student_mean, teacher_mean)
beta_loss = tf.math.reduce_mean(beta_loss, axis=None, keepdims=False)
gamma_loss = tf.math.abs(student_var - teacher_var)
gamma_loss = tf.math.reduce_mean(gamma_loss, axis=None, keepdims=False)
return beta_loss, gamma_loss
def _get_attention_loss(teacher_score, student_score):
"""Function to calculate attention loss for transformer layers."""
# Note that the definition of KLDivergence here is a little different from
# the original one (tf.keras.losses.KLDivergence). We adopt this approach
# to stay consistent with the TF1 implementation.
teacher_weight = tf.keras.activations.softmax(teacher_score, axis=-1)
student_log_weight = tf.nn.log_softmax(student_score, axis=-1)
kl_divergence = -(teacher_weight * student_log_weight)
kl_divergence = tf.math.reduce_sum(kl_divergence, axis=-1, keepdims=True)
kl_divergence = tf.math.reduce_mean(kl_divergence, axis=None,
keepdims=False)
return kl_divergence
def _build_sub_encoder(encoder, stage_number):
"""Builds a partial model containing the first few transformer layers."""
input_ids = encoder.inputs[0]
input_mask = encoder.inputs[1]
type_ids = encoder.inputs[2]
attention_mask = modeling.layers.SelfAttentionMask()(
inputs=input_ids, to_mask=input_mask)
embedding_output = encoder.embedding_layer(input_ids, type_ids)
layer_output = embedding_output
attention_score = None
for layer_idx in range(stage_number + 1):
layer_output, attention_score = encoder.transformer_layers[layer_idx](
layer_output, attention_mask, return_attention_scores=True)
return tf.keras.Model(
inputs=[input_ids, input_mask, type_ids],
outputs=[layer_output, attention_score])
class MobileBERTEdgeTPUDistillationTrainer(orbit.StandardTrainer,
orbit.StandardEvaluator):
"""Orbit based distillation training pipeline for MobileBERT-EdgeTPU models."""
def __init__(self,
teacher_model: modeling.models.BertPretrainerV2,
student_model: modeling.models.BertPretrainerV2,
strategy: tf.distribute.Strategy,
experiment_params: params.EdgeTPUBERTCustomParams,
export_ckpt_path: Optional[str] = None,
reuse_teacher_embedding: Optional[bool] = True):
self.teacher_model = teacher_model
self.student_model = student_model
self.strategy = strategy
self.layer_wise_distill_config = experiment_params.layer_wise_distillation
self.e2e_distill_config = experiment_params.end_to_end_distillation
self.optimizer_config = experiment_params.optimizer
self.train_dataset_config = experiment_params.train_datasest
self.eval_dataset_config = experiment_params.eval_dataset
self.word_vocab_size = experiment_params.student_model.encoder.mobilebert.word_vocab_size
self.distill_gt_ratio = experiment_params.end_to_end_distillation.distill_ground_truth_ratio
self.teacher_transformer_layers = experiment_params.teacher_model.encoder.mobilebert.num_blocks
self.student_transformer_layers = experiment_params.student_model.encoder.mobilebert.num_blocks
self.exported_ckpt_path = export_ckpt_path
self.current_step = orbit.utils.create_global_step()
self.current_step.assign(0)
# Stage is updated every time when the distillation is done for one
# transformer layer. self.stage is updated at the train_loop_begin()
# function. After the last stage is done, the self.mode is changed to
# 'e2e'.
self.stage = 0
self.mode = DistillationMode.INIT
# Number of transformer layers in teacher should be equal (or divisible)
# by the number of transformer layers in student.
if self.teacher_transformer_layers % self.student_transformer_layers != 0:
raise ValueError(
'Number of transformer layer must be equal or divisible.')
self.ratio = (self.teacher_transformer_layers //
self.student_transformer_layers)
# Create optimizers for different training stage.
self.layer_wise_optimizer = self.build_optimizer(
self.layer_wise_distill_config)
self.e2e_optimizer = self.build_optimizer(self.e2e_distill_config)
self.current_optimizer = self.layer_wise_optimizer
# A non-trainable layer for feature normalization for transfer loss.
self._layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
beta_initializer='zeros',
gamma_initializer='ones',
trainable=False)
self.build_dataset()
self.build_metrics()
# Create an empty exported checkpoint manager, it will be initialized once
# the training mode enters END2END.
self.exported_ckpt_manager = None
# Reuse the teacher's embedding table in student model.
if reuse_teacher_embedding:
logging.info('Copy word embedding from teacher model to student.')
teacher_encoder = self.teacher_model.encoder_network
student_encoder = self.student_model.encoder_network
embedding_weights = teacher_encoder.embedding_layer.get_weights()
student_encoder.embedding_layer.set_weights(embedding_weights)
orbit.StandardTrainer.__init__(self, self.train_dataset)
orbit.StandardEvaluator.__init__(self, self.eval_dataset)
def build_dataset(self):
"""Creates the training and evaluation dataset."""
# Returns None when the input_path is 'dummy'.
if self.train_dataset_config.input_path == 'dummy':
self.train_dataset = None
self.eval_dataset = None
return
# None distributed dataset.
train_dataset = data_loader_factory.get_data_loader(
self.train_dataset_config).load()
eval_dataset = data_loader_factory.get_data_loader(
self.eval_dataset_config).load()
# Ddistributed dataset.
self.train_dataset = orbit.utils.make_distributed_dataset(
self.strategy, train_dataset)
self.eval_dataset = orbit.utils.make_distributed_dataset(
self.strategy, eval_dataset)
def build_model(self):
"""Creates the fused model from teacher/student model."""
self.teacher_model.trainable = False
if self.mode == DistillationMode.LAYER_WISE:
# Build a model that outputs teacher's and student's transformer outputs.
inputs = self.student_model.encoder_network.inputs
student_sub_encoder = _build_sub_encoder(
encoder=self.student_model.encoder_network,
stage_number=self.stage)
student_output_feature, student_attention_score = student_sub_encoder(
inputs)
teacher_sub_encoder = _build_sub_encoder(
encoder=self.teacher_model.encoder_network,
stage_number=int(self.stage * self.ratio))
teacher_output_feature, teacher_attention_score = teacher_sub_encoder(
inputs)
return tf.keras.Model(
inputs=inputs,
outputs=dict(
student_output_feature=student_output_feature,
student_attention_score=student_attention_score,
teacher_output_feature=teacher_output_feature,
teacher_attention_score=teacher_attention_score))
elif self.mode == DistillationMode.END2END:
# Build a model that outputs teacher's and student's MLM/NSP outputs.
inputs = self.student_model.inputs
student_pretrainer_outputs = self.student_model(inputs)
teacher_pretrainer_outputs = self.teacher_model(inputs)
model = tf.keras.Model(
inputs=inputs,
outputs=dict(
student_pretrainer_outputs=student_pretrainer_outputs,
teacher_pretrainer_outputs=teacher_pretrainer_outputs,
))
# Checkpoint the student encoder which is the goal of distillation.
model.checkpoint_items = self.student_model.checkpoint_items
return model
else:
raise ValueError(f'Unknown distillation mode: {self.mode}.')
def build_optimizer(self, config):
"""Creates optimier for the fused model."""
optimizer_config = self.optimizer_config.replace(
learning_rate={
'polynomial': {
'decay_steps': config.decay_steps,
'initial_learning_rate': config.initial_learning_rate,
'end_learning_rate': config.end_learning_rate,
}
},
warmup={
'type': 'linear',
'linear': {
'warmup_steps': config.warmup_steps,
}
})
logging.info('The optimizer config is: %s', optimizer_config.as_dict())
optimizer_factory = optimization.OptimizerFactory(optimizer_config)
return optimizer_factory.build_optimizer(
optimizer_factory.build_learning_rate())
def build_metrics(self):
"""Creates metrics functions for the training."""
self.train_metrics = {
'feature_transfer_mse': tf.keras.metrics.Mean(),
'beta_transfer_loss': tf.keras.metrics.Mean(),
'gamma_transfer_loss': tf.keras.metrics.Mean(),
'attention_transfer_loss': tf.keras.metrics.Mean(),
'masked_lm_accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'lm_example_loss': tf.keras.metrics.Mean(),
'total_loss': tf.keras.metrics.Mean(),
'next_sentence_accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'next_sentence_loss': tf.keras.metrics.Mean(),
}
self.eval_metrics = {
'masked_lm_accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'next_sentence_accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
}
def build_exported_ckpt_manager(self):
"""Creates checkpoint manager for exported models."""
if self.exported_ckpt_path is None:
logging.warn('exported_ckpt_path is not specified. The saved model'
'can not be used for downstreaming tasks.')
return
checkpoint = tf.train.Checkpoint(global_step=self.current_step,
model=self.model,
optimizer=self.current_optimizer,
**self.model.checkpoint_items)
self.exported_ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=os.path.join(self.exported_ckpt_path, 'exported_ckpt'),
max_to_keep=2,
step_counter=self.current_step,
checkpoint_interval=20000,
init_fn=None)
def calculate_loss_metrics(self, labels, outputs):
"""Calculates loss and metrics.
Args:
labels: Ground truth from dataset.
outputs: fused outputs from teacher model and student model.
Returns:
total loss value.
"""
if self.mode == DistillationMode.LAYER_WISE:
teacher_feature = outputs['teacher_output_feature']
student_feature = outputs['student_output_feature']
feature_transfer_loss = tf.keras.losses.mean_squared_error(
self._layer_norm(teacher_feature), self._layer_norm(student_feature))
# feature_transfer_loss = tf.reduce_mean(feature_transfer_loss)
feature_transfer_loss *= self.layer_wise_distill_config.hidden_distill_factor
beta_loss, gamma_loss = _get_distribution_losses(teacher_feature,
student_feature)
beta_loss *= self.layer_wise_distill_config.beta_distill_factor
gamma_loss *= self.layer_wise_distill_config.gamma_distill_factor
total_loss = feature_transfer_loss + beta_loss + gamma_loss
teacher_attention = outputs['teacher_attention_score']
student_attention = outputs['student_attention_score']
attention_loss = _get_attention_loss(teacher_attention, student_attention)
attention_loss *= self.layer_wise_distill_config.attention_distill_factor
total_loss += attention_loss
total_loss /= tf.cast((self.stage + 1), tf.float32)
elif self.mode == DistillationMode.END2END:
lm_label = labels['masked_lm_ids']
# Shape: [batch, max_predictions_per_seq, word_vocab_size]
lm_label = tf.one_hot(indices=lm_label,
depth=self.word_vocab_size,
on_value=1.0,
off_value=0.0,
axis=-1,
dtype=tf.float32)
lm_label_weights = labels['masked_lm_weights']
teacher_mlm_logits = outputs['teacher_pretrainer_outputs']['mlm_logits']
teacher_labels = tf.nn.softmax(teacher_mlm_logits, axis=-1)
gt_label = self.distill_gt_ratio * lm_label
teacher_label = (1 - self.distill_gt_ratio) * teacher_labels
lm_label = gt_label + teacher_label
student_pretrainer_output = outputs['student_pretrainer_outputs']
# Shape: [batch, max_predictions_per_seq, word_vocab_size]
student_lm_log_probs = tf.nn.log_softmax(
student_pretrainer_output['mlm_logits'], axis=-1)
# Shape: [batch * max_predictions_per_seq]
per_example_loss = tf.reshape(
-tf.reduce_sum(student_lm_log_probs * lm_label, axis=[-1]), [-1])
lm_label_weights = tf.reshape(labels['masked_lm_weights'], [-1])
lm_numerator_loss = tf.reduce_sum(per_example_loss * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
total_loss = mlm_loss
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
student_pretrainer_output['next_sentence'], dtype=tf.float32)
sentence_loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
total_loss += sentence_loss
else:
raise ValueError('Training mode has to be LAYER-WISE or END2END.')
if self.mode == DistillationMode.LAYER_WISE:
self.train_metrics['feature_transfer_mse'].update_state(
feature_transfer_loss)
self.train_metrics['beta_transfer_loss'].update_state(beta_loss)
self.train_metrics['gamma_transfer_loss'].update_state(gamma_loss)
self.train_metrics['attention_transfer_loss'].update_state(attention_loss)
elif self.mode == DistillationMode.END2END:
self.train_metrics['lm_example_loss'].update_state(mlm_loss)
self.train_metrics['next_sentence_loss'].update_state(sentence_loss)
self.train_metrics['total_loss'].update_state(total_loss)
return total_loss
def calculate_accuracy_metrics(self, labels, outputs, metrics):
"""Calculates metrics that are not related to the losses."""
if self.mode == DistillationMode.END2END:
student_pretrainer_output = outputs['student_pretrainer_outputs']
metrics['masked_lm_accuracy'].update_state(
labels['masked_lm_ids'],
student_pretrainer_output['mlm_logits'],
labels['masked_lm_weights'])
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'],
student_pretrainer_output['next_sentence'])
def _rebuild_training_graph(self):
"""Rebuilds the training graph when one stage/step is done."""
self.stage = (self.current_step.numpy() //
self.layer_wise_distill_config.num_steps)
logging.info('Start distillation training for the %d stage', self.stage)
self.model = self.build_model()
self.layer_wise_optimizer = self.build_optimizer(
self.layer_wise_distill_config)
# Rebuild the dataset which can significantly improve the training
# accuracy.
logging.info('Rebuild the training dataset.')
self.build_dataset()
# Setting `self._train_loop_fn` and `self._eval_loop_fn` to None will
# rebuild the train and eval functions with the updated loss function.
logging.info('Rebuild the training and evaluation graph.')
self._train_loop_fn = None
self._eval_loop_fn = None
def train_loop_begin(self):
"""A train loop is similar with the concept of an epoch."""
self.train_metrics['feature_transfer_mse'].reset_states()
self.train_metrics['beta_transfer_loss'].reset_states()
self.train_metrics['gamma_transfer_loss'].reset_states()
self.train_metrics['attention_transfer_loss'].reset_states()
self.train_metrics['total_loss'].reset_states()
self.train_metrics['lm_example_loss'].reset_states()
self.train_metrics['next_sentence_loss'].reset_states()
self.train_metrics['masked_lm_accuracy'].reset_states()
self.train_metrics['next_sentence_accuracy'].reset_states()
if self.mode == DistillationMode.INIT:
if (self.current_step.numpy() < self.layer_wise_distill_config.num_steps *
self.student_transformer_layers):
logging.info('Start or resume layer-wise training.')
self.mode = DistillationMode.LAYER_WISE
self.stage = (self.current_step.numpy() //
self.layer_wise_distill_config.num_steps)
self.model = self.build_model()
self.build_dataset()
self.current_optimizer = self.layer_wise_optimizer
else:
self.mode = DistillationMode.END2END
logging.info('Start or resume e2e training.')
self.model = self.build_model()
self.current_optimizer = self.e2e_optimizer
elif self.mode == DistillationMode.LAYER_WISE:
if (self.current_step.numpy() < self.layer_wise_distill_config.num_steps *
self.student_transformer_layers):
if (self.current_step.numpy() %
self.layer_wise_distill_config.num_steps) == 0:
self._rebuild_training_graph()
self.current_optimizer = self.layer_wise_optimizer
else:
self.mode = DistillationMode.END2END
self.model = self.build_model()
logging.info('Start e2e distillation training.')
self.current_optimizer = self.e2e_optimizer
logging.info('Rebuild the training dataset.')
self.build_dataset()
logging.info('Rebuild the training and evaluation graph.')
self._train_loop_fn = None
self._eval_loop_fn = None
def train_step(self, iterator):
"""A single step of train."""
def step_fn(inputs):
with tf.GradientTape() as tape:
outputs = self.model(inputs, training=True)
loss = self.calculate_loss_metrics(inputs, outputs)
self.calculate_accuracy_metrics(inputs, outputs, self.train_metrics)
grads = tape.gradient(loss, self.model.trainable_variables)
self.current_optimizer.apply_gradients(
zip(grads, self.model.trainable_variables))
self.current_step.assign_add(1)
self.strategy.run(step_fn, args=(next(iterator),))
def train_loop_end(self):
"""A train loop is similar with the concept of an epoch."""
if self.mode == DistillationMode.END2END:
# Save the exported checkpoint (used for downstreaming tasks) after every
# 'checkpoint_interval' steps. And only export checkpoints after entering
# e2e distillation training stage.
if self.exported_ckpt_manager is None:
self.build_exported_ckpt_manager()
self.exported_ckpt_manager.save(
checkpoint_number=self.current_step.numpy(),
check_interval=True)
return {
'feature_transfer_mse':
self.train_metrics['feature_transfer_mse'].result(),
'beta_transfer_loss':
self.train_metrics['beta_transfer_loss'].result(),
'gamma_transfer_loss':
self.train_metrics['gamma_transfer_loss'].result(),
'attention_transfer_loss':
self.train_metrics['attention_transfer_loss'].result(),
'total_loss':
self.train_metrics['total_loss'].result(),
'lm_example_loss':
self.train_metrics['lm_example_loss'].result(),
'next_sentence_loss':
self.train_metrics['next_sentence_loss'].result(),
'masked_lm_accuracy':
self.train_metrics['masked_lm_accuracy'].result(),
'next_sentence_accuracy':
self.train_metrics['next_sentence_accuracy'].result(),
'learning_rate':
self.current_optimizer.learning_rate(
self.current_optimizer.iterations),
'current_step':
self.current_step,
'optimizer_step':
self.current_optimizer.iterations,
}
# TODO(longy): We only run evaluation on downstream tasks.
def eval_begin(self):
self.eval_metrics['masked_lm_accuracy'].reset_states()
self.eval_metrics['next_sentence_accuracy'].reset_states()
def eval_step(self, iterator):
def step_fn(inputs):
outputs = self.model(inputs, training=False)
self.calculate_accuracy_metrics(inputs, outputs, self.eval_metrics)
self.strategy.run(step_fn, args=(next(iterator),))
def eval_end(self):
return {'masked_lm_accuracy':
self.eval_metrics['masked_lm_accuracy'].result(),
'next_sentence_accuracy':
self.eval_metrics['next_sentence_accuracy'].result()}
| 23,030 | 43.290385 | 99 | py |
models | models-master/official/projects/edgetpu/nlp/serving/export_tflite_squad_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for export_tflite_squad."""
import tensorflow as tf
from official.nlp.modeling import models
from official.projects.edgetpu.nlp.configs import params
from official.projects.edgetpu.nlp.modeling import model_builder
from official.projects.edgetpu.nlp.serving import export_tflite_squad
class ExportTfliteSquadTest(tf.test.TestCase):
def setUp(self):
super(ExportTfliteSquadTest, self).setUp()
experiment_params = params.EdgeTPUBERTCustomParams()
pretrainer_model = model_builder.build_bert_pretrainer(
experiment_params.student_model, name='pretrainer')
encoder_network = pretrainer_model.encoder_network
self.span_labeler = models.BertSpanLabeler(
network=encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01))
def test_model_input_output(self):
test_model = export_tflite_squad.build_model_for_serving(self.span_labeler)
# Test model input order, names, and shape.
self.assertEqual(test_model.input[0].name, 'input_word_ids')
self.assertEqual(test_model.input[1].name, 'input_type_ids')
self.assertEqual(test_model.input[2].name, 'input_mask')
self.assertEqual(test_model.input[0].shape, (1, 384))
self.assertEqual(test_model.input[1].shape, (1, 384))
self.assertEqual(test_model.input[2].shape, (1, 384))
# Test model output order, name, and shape.
self.assertEqual(test_model.output[0].name, 'start_positions/Identity:0')
self.assertEqual(test_model.output[1].name, 'end_positions/Identity:0')
self.assertEqual(test_model.output[0].shape, (1, 384))
self.assertEqual(test_model.output[1].shape, (1, 384))
if __name__ == '__main__':
tf.test.main()
| 2,306 | 38.775862 | 79 | py |
models | models-master/official/projects/edgetpu/nlp/serving/export_tflite_squad.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Export tflite for MobileBERT-EdgeTPU with SQUAD head.
Example usage:
python3 export_tflite_squad.py \
--config_file=official/projects/edgetpu/nlp/experiments/mobilebert_edgetpu_xs.yaml \
--export_path=/tmp/ \
--quantization_method=full-integer
"""
# pylint: enable=line-too-long
import os
import tempfile
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
import orbit
import tensorflow as tf
from official.common import flags as tfm_flags
from official.nlp.data import data_loader_factory
from official.nlp.data import question_answering_dataloader
from official.nlp.modeling import models
from official.projects.edgetpu.nlp.configs import params
from official.projects.edgetpu.nlp.modeling import model_builder
from official.projects.edgetpu.nlp.utils import utils
FLAGS = flags.FLAGS
SQUAD_TRAIN_SPLIT = 'gs://**/tp/bert/squad_v1.1/train.tf_record'
flags.DEFINE_string('export_path', '/tmp/',
'File path to store tflite model.')
flags.DEFINE_enum('quantization_method', 'float',
['full-integer', 'hybrid', 'float'], 'Quantization method.')
flags.DEFINE_integer('batch_size', 1,
'Fixed batch size for exported TFLite model.')
flags.DEFINE_integer('sequence_length', 384,
'Fixed sequence length.')
flags.DEFINE_string('model_checkpoint', None,
'Checkpoint path for the model. Model will be initialized'
'with random weights if path is None.')
def build_model_for_serving(model: tf.keras.Model,
sequence_length: int = 384,
batch_size: int = 1) -> tf.keras.Model:
"""Builds MLPerf evaluation compatible models.
To run the model on device, the model input/output datatype and node names
need to match the MLPerf setup.
Args:
model: Input keras model.
sequence_length: BERT model sequence length.
batch_size: Inference batch size.
Returns:
Keras model with new input/output nodes.
"""
word_ids = tf.keras.Input(shape=(sequence_length,),
batch_size=batch_size,
dtype=tf.int32,
name='input_word_ids')
mask = tf.keras.Input(shape=(sequence_length,),
batch_size=batch_size,
dtype=tf.int32, name='input_mask')
type_ids = tf.keras.Input(shape=(sequence_length,),
batch_size=batch_size,
dtype=tf.int32, name='input_type_ids')
model_output = model([word_ids, type_ids, mask])
# Use identity layers wrapped in lambdas to explicitly name the output
# tensors.
start_logits = tf.keras.layers.Lambda(
tf.identity, name='start_positions')(
model_output[0])
end_logits = tf.keras.layers.Lambda(
tf.identity, name='end_positions')(
model_output[1])
model = tf.keras.Model(
inputs=[word_ids, type_ids, mask],
outputs=[start_logits, end_logits])
return model
def build_inputs(data_params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
return data_loader_factory.get_data_loader(data_params).load(input_context)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Set up experiment params and load the configs from file/files.
experiment_params = params.EdgeTPUBERTCustomParams()
experiment_params = utils.config_override(experiment_params, FLAGS)
# change the input mask type to tf.float32 to avoid additional casting op.
experiment_params.student_model.encoder.mobilebert.input_mask_dtype = 'float32'
# Experiments indicate using -120 as the mask value for Softmax is good enough
# for both int8 and bfloat. So we set quantization_friendly to True for both
# quant and float model.
pretrainer_model = model_builder.build_bert_pretrainer(
experiment_params.student_model,
name='pretrainer',
quantization_friendly=True)
encoder_network = pretrainer_model.encoder_network
model = models.BertSpanLabeler(
network=encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01))
# Load model weights.
if FLAGS.model_checkpoint is not None:
checkpoint_dict = {'model': model}
checkpoint = tf.train.Checkpoint(**checkpoint_dict)
checkpoint.restore(FLAGS.model_checkpoint).assert_existing_objects_matched()
model_for_serving = build_model_for_serving(model, FLAGS.sequence_length,
FLAGS.batch_size)
model_for_serving.summary()
# TODO(b/194449109): Need to save the model to file and then convert tflite
# with 'tf.lite.TFLiteConverter.from_saved_model()' to get the expected
# accuracy
tmp_dir = tempfile.TemporaryDirectory().name
model_for_serving.save(tmp_dir)
def _representative_dataset():
dataset_params = question_answering_dataloader.QADataConfig()
dataset_params.input_path = SQUAD_TRAIN_SPLIT
dataset_params.drop_remainder = False
dataset_params.global_batch_size = 1
dataset_params.is_training = True
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
build_inputs, dataset_params)
for example in dataset.take(100):
inputs = example[0]
input_word_ids = inputs['input_word_ids']
input_mask = inputs['input_mask']
input_type_ids = inputs['input_type_ids']
yield [input_word_ids, input_mask, input_type_ids]
converter = tf.lite.TFLiteConverter.from_saved_model(tmp_dir)
if FLAGS.quantization_method in ['full-integer', 'hybrid']:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if FLAGS.quantization_method in ['full-integer']:
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.float32
converter.representative_dataset = _representative_dataset
tflite_quant_model = converter.convert()
export_model_path = os.path.join(FLAGS.export_path, 'model.tflite')
with tf.io.gfile.GFile(export_model_path, 'wb') as f:
f.write(tflite_quant_model)
logging.info('Successfully save the tflite to %s', FLAGS.export_path)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 7,070 | 37.639344 | 84 | py |
models | models-master/official/projects/edgetpu/nlp/utils/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import os
import pprint
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.edgetpu.nlp.configs import params
def serialize_config(experiment_params: params.EdgeTPUBERTCustomParams,
model_dir: str):
"""Serializes and saves the experiment config."""
params_save_path = os.path.join(model_dir, 'params.yaml')
logging.info('Saving experiment configuration to %s', params_save_path)
tf.io.gfile.makedirs(model_dir)
hyperparams.save_params_dict_to_yaml(experiment_params, params_save_path)
# Note: Do not call this utility function unless you load the `flags`
# module in your script.
def config_override(experiment_params, flags_obj):
"""Overrides ExperimentConfig according to flags."""
if not hasattr(flags_obj, 'tpu'):
raise ModuleNotFoundError(
'`tpu` is not found in FLAGS. Need to load flags.py first.')
# Change runtime.tpu to the real tpu.
experiment_params.override({
'runtime': {
'tpu_address': flags_obj.tpu,
}
})
# Get the first level of override from `--config_file`.
# `--config_file` is typically used as a template that specifies the common
# override for a particular experiment.
for config_file in flags_obj.config_file or []:
experiment_params = hyperparams.override_params_dict(
experiment_params, config_file, is_strict=True)
# Get the second level of override from `--params_override`.
# `--params_override` is typically used as a further override over the
# template. For example, one may define a particular template for training
# ResNet50 on ImageNet in a config file and pass it via `--config_file`,
# then define different learning rates and pass it via `--params_override`.
if flags_obj.params_override:
experiment_params = hyperparams.override_params_dict(
experiment_params, flags_obj.params_override, is_strict=True)
experiment_params.validate()
experiment_params.lock()
pp = pprint.PrettyPrinter()
logging.info('Final experiment parameters: %s',
pp.pformat(experiment_params.as_dict()))
model_dir = get_model_dir(experiment_params, flags_obj)
if flags_obj.mode is not None:
if 'train' in flags_obj.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
serialize_config(experiment_params, model_dir)
return experiment_params
def get_model_dir(experiment_params, flags_obj):
"""Gets model dir from Flags."""
del experiment_params
return flags_obj.model_dir
def load_checkpoint(model: tf.keras.Model, ckpt_path: str):
"""Initializes model with the checkpoint."""
ckpt_dir_or_file = ckpt_path
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Makes sure the pretrainer variables are created.
_ = model(model.inputs)
checkpoint = tf.train.Checkpoint(
**model.checkpoint_items)
checkpoint.read(ckpt_dir_or_file).expect_partial()
logging.info('Successfully load parameters for %s model', model.name)
| 3,792 | 36.554455 | 79 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/model_builder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilebert_edgetpu.model_builder.py."""
import tensorflow as tf
from official.nlp import modeling
from official.nlp.configs import encoders
from official.projects.edgetpu.nlp.configs import params
from official.projects.edgetpu.nlp.modeling import model_builder
class ModelBuilderTest(tf.test.TestCase):
def setUp(self):
super(ModelBuilderTest, self).setUp()
self.pretrainer_config = params.PretrainerModelParams(
encoder=encoders.EncoderConfig(type='mobilebert'))
def test_default_initialization(self):
"""Initializes pretrainer model from stratch."""
pretrainer = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.pretrainer_config,
name='test_model')
# Makes sure the pretrainer variables are created.
_ = pretrainer(pretrainer.inputs)
self.assertEqual(pretrainer.name, 'test_model')
encoder = pretrainer.encoder_network
default_number_layer = encoders.MobileBertEncoderConfig().num_blocks
encoder_transformer_layer_counter = 0
for layer in encoder.layers:
if isinstance(layer, modeling.layers.MobileBertTransformer):
encoder_transformer_layer_counter += 1
self.assertEqual(default_number_layer, encoder_transformer_layer_counter)
def test_initialization_with_encoder(self):
"""Initializes pretrainer model with an existing encoder network."""
encoder = encoders.build_encoder(
config=encoders.EncoderConfig(type='mobilebert'))
pretrainer = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.pretrainer_config,
encoder=encoder)
encoder_network = pretrainer.encoder_network
self.assertEqual(encoder_network, encoder)
def test_initialization_with_mlm(self):
"""Initializes pretrainer model with an existing MLM head."""
embedding = modeling.layers.MobileBertEmbedding(
word_vocab_size=30522,
word_embed_size=128,
type_vocab_size=2,
output_embed_size=encoders.MobileBertEncoderConfig().hidden_size)
dummy_input = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32)
_ = embedding(dummy_input)
embedding_table = embedding.word_embedding.embeddings
mlm_layer = modeling.layers.MobileBertMaskedLM(
embedding_table=embedding_table)
pretrainer = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.pretrainer_config,
masked_lm=mlm_layer)
mlm_network = pretrainer.masked_lm
self.assertEqual(mlm_network, mlm_layer)
if __name__ == '__main__':
tf.test.main()
| 3,142 | 38.2875 | 77 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/pretrainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BERT pretrainer model."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.projects.edgetpu.nlp.modeling import pretrainer
class MobileBERTEdgeTPUPretrainerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product([True, False],
[True, False],
[True, False]))
def test_mobilebert_edgetpu_pretrainer(
self,
dict_outputs,
return_all_encoder_outputs,
use_customized_masked_lm):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
hidden_size = 48
num_layers = 2
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=num_layers,
hidden_size=hidden_size,
max_sequence_length=sequence_length,
return_all_encoder_outputs=return_all_encoder_outputs,
dict_outputs=dict_outputs)
# Create a BERT trainer with the created network.
if use_customized_masked_lm:
customized_masked_lm = layers.MaskedLM(
embedding_table=test_network.get_embedding_table())
else:
customized_masked_lm = None
bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=test_network, customized_masked_lm=customized_masked_lm)
num_token_predictions = 20
# Create a set of 2-dimensional inputs (the first dimension is implicit).
inputs = dict(
input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32))
inputs['masked_lm_positions'] = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model(inputs)
has_encoder_outputs = dict_outputs or return_all_encoder_outputs
expected_keys = ['sequence_output', 'pooled_output']
if has_encoder_outputs:
expected_keys.append('encoder_outputs')
expected_keys.append('mlm_logits')
self.assertSameElements(outputs.keys(), expected_keys)
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
self.assertAllEqual(expected_lm_shape,
outputs['mlm_logits'].shape.as_list())
expected_sequence_output_shape = [None, sequence_length, hidden_size]
self.assertAllEqual(expected_sequence_output_shape,
outputs['sequence_output'].shape.as_list())
expected_pooled_output_shape = [None, hidden_size]
self.assertAllEqual(expected_pooled_output_shape,
outputs['pooled_output'].shape.as_list())
def test_multiple_cls_outputs(self):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
hidden_size = 48
num_layers = 2
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=num_layers,
hidden_size=hidden_size,
max_sequence_length=sequence_length,
dict_outputs=True)
bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=test_network,
classification_heads=[layers.MultiClsHeads(
inner_dim=5, cls_list=[('foo', 2), ('bar', 3)])])
num_token_predictions = 20
# Create a set of 2-dimensional inputs (the first dimension is implicit).
inputs = dict(
input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
masked_lm_positions=tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32))
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model(inputs)
self.assertEqual(outputs['foo'].shape.as_list(), [None, 2])
self.assertEqual(outputs['bar'].shape.as_list(), [None, 3])
def test_v2_serialize_deserialize(self):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=test_network)
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 6,190 | 40.550336 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileBERT text encoder network."""
import tensorflow as tf
from official.nlp import modeling
from official.nlp.modeling import layers
from official.projects.edgetpu.nlp.modeling import edgetpu_layers
@tf.keras.utils.register_keras_serializable(package='Text')
class MobileBERTEncoder(tf.keras.Model):
"""A Keras functional API implementation for MobileBERT encoder."""
def __init__(self,
word_vocab_size=30522,
word_embed_size=128,
type_vocab_size=2,
max_sequence_length=512,
num_blocks=24,
hidden_size=512,
num_attention_heads=4,
intermediate_size=512,
intermediate_act_fn='relu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
intra_bottleneck_size=128,
initializer_range=0.02,
use_bottleneck_attention=False,
key_query_shared_bottleneck=True,
num_feedforward_networks=4,
normalization_type='no_norm',
classifier_activation=False,
input_mask_dtype='int32',
quantization_friendly=True,
**kwargs):
"""Class initialization.
Args:
word_vocab_size: Number of words in the vocabulary.
word_embed_size: Word embedding size.
type_vocab_size: Number of word types.
max_sequence_length: Maximum length of input sequence.
num_blocks: Number of transformer block in the encoder model.
hidden_size: Hidden size for the transformer block.
num_attention_heads: Number of attention heads in the transformer block.
intermediate_size: The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: Dropout probability for the hidden layers.
attention_probs_dropout_prob: Dropout probability of the attention
probabilities.
intra_bottleneck_size: Size of bottleneck.
initializer_range: The stddev of the `truncated_normal_initializer` for
initializing all weight matrices.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: Whether to share linear transformation for
keys and queries.
num_feedforward_networks: Number of stacked feed-forward networks.
normalization_type: The type of normalization_type, only `no_norm` and
`layer_norm` are supported. `no_norm` represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. `layer_norm` is used for the teacher model.
classifier_activation: If using the tanh activation for the final
representation of the `[CLS]` token in fine-tuning.
input_mask_dtype: The dtype of `input_mask` tensor, which is one of the
input tensors of this encoder. Defaults to `int32`. If you want
to use `tf.lite` quantization, which does not support `Cast` op,
please set this argument to `tf.float32` and feed `input_mask`
tensor with values in `float32` to avoid `tf.cast` in the computation.
quantization_friendly: If enabled, the model calss EdgeTPU mobile
transformer. The difference is we have a customized softmax
ops which use -120 as the mask value, which is more stable for post-
training quantization.
**kwargs: Other keyworded and arguments.
"""
self._self_setattr_tracking = False
initializer = tf.keras.initializers.TruncatedNormal(
stddev=initializer_range)
# layer instantiation
self.embedding_layer = layers.MobileBertEmbedding(
word_vocab_size=word_vocab_size,
word_embed_size=word_embed_size,
type_vocab_size=type_vocab_size,
output_embed_size=hidden_size,
max_sequence_length=max_sequence_length,
normalization_type=normalization_type,
initializer=initializer,
dropout_rate=hidden_dropout_prob)
self._transformer_layers = []
transformer_layer_args = dict(
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
intermediate_act_fn=intermediate_act_fn,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
intra_bottleneck_size=intra_bottleneck_size,
use_bottleneck_attention=use_bottleneck_attention,
key_query_shared_bottleneck=key_query_shared_bottleneck,
num_feedforward_networks=num_feedforward_networks,
normalization_type=normalization_type,
initializer=initializer,
)
for layer_idx in range(num_blocks):
if quantization_friendly:
transformer = edgetpu_layers.EdgetpuMobileBertTransformer(
name=f'transformer_layer_{layer_idx}',
**transformer_layer_args)
else:
transformer = layers.MobileBertTransformer(
name=f'transformer_layer_{layer_idx}',
**transformer_layer_args)
self._transformer_layers.append(transformer)
# input tensor
input_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
input_mask = tf.keras.layers.Input(
shape=(None,), dtype=input_mask_dtype, name='input_mask')
self.inputs = [input_ids, input_mask, type_ids]
# The dtype of `attention_mask` will the same as the dtype of `input_mask`.
attention_mask = modeling.layers.SelfAttentionMask()(input_mask, input_mask)
# build the computation graph
all_layer_outputs = []
all_attention_scores = []
embedding_output = self.embedding_layer(input_ids, type_ids)
all_layer_outputs.append(embedding_output)
prev_output = embedding_output
for layer_idx in range(num_blocks):
layer_output, attention_score = self._transformer_layers[layer_idx](
prev_output,
attention_mask,
return_attention_scores=True)
all_layer_outputs.append(layer_output)
all_attention_scores.append(attention_score)
prev_output = layer_output
first_token = tf.squeeze(prev_output[:, 0:1, :], axis=1)
if classifier_activation:
self._pooler_layer = tf.keras.layers.EinsumDense(
'ab,bc->ac',
output_shape=hidden_size,
activation=tf.tanh,
bias_axes='c',
kernel_initializer=initializer,
name='pooler')
first_token = self._pooler_layer(first_token)
else:
self._pooler_layer = None
outputs = dict(
sequence_output=prev_output,
pooled_output=first_token,
encoder_outputs=all_layer_outputs,
attention_scores=all_attention_scores)
super(MobileBERTEncoder, self).__init__(
inputs=self.inputs, outputs=outputs, **kwargs)
def get_embedding_table(self):
return self.embedding_layer.word_embedding.embeddings
def get_embedding_layer(self):
return self.embedding_layer.word_embedding
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
| 8,251 | 40.467337 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/edgetpu_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for custom layers used by MobileBERT-EdgeTPU."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.edgetpu.nlp.modeling import edgetpu_layers
keras = tf.keras
class MultiHeadAttentionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("key_value_same_proj", None, None, [40, 80]),
("key_value_different_proj", 32, 60, [40, 60]),
)
def test_non_masked_attention(self, value_dim, output_shape, output_dims):
"""Test that the attention layer can be created without a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12,
key_dim=64,
value_dim=value_dim,
output_shape=output_shape)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
value = keras.Input(shape=(20, 80))
output = test_layer(query=query, value=value)
self.assertEqual(output.shape.as_list(), [None] + output_dims)
def test_non_masked_self_attention(self):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_attention_scores(self):
"""Test attention outputs with coefficients."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
output, coef = test_layer(query, query, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40])
def test_attention_scores_with_values(self):
"""Test attention outputs with coefficients."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
value = keras.Input(shape=(60, 80))
output, coef = test_layer(query, value, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 60])
@parameterized.named_parameters(("with_bias", True), ("no_bias", False))
def test_masked_attention(self, use_bias):
"""Test with a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2, use_bias=use_bias)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = keras.Input(shape=(4, 8))
value = keras.Input(shape=(2, 8))
mask_tensor = keras.Input(shape=(4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor)
# Create a model containing the test layer.
model = keras.Model([query, value, mask_tensor], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict([from_data, to_data, mask_data])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict([from_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Tests the layer with three inputs: Q, K, V.
key = keras.Input(shape=(2, 8))
output = test_layer(query, value=value, key=key, attention_mask=mask_tensor)
model = keras.Model([query, value, key, mask_tensor], output)
masked_output_data = model.predict([from_data, to_data, to_data, mask_data])
unmasked_output_data = model.predict(
[from_data, to_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
if use_bias:
self.assertLen(test_layer._query_dense.trainable_variables, 2)
self.assertLen(test_layer._output_dense.trainable_variables, 2)
else:
self.assertLen(test_layer._query_dense.trainable_variables, 1)
self.assertLen(test_layer._output_dense.trainable_variables, 1)
def test_initializer(self):
"""Test with a specified initializer."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12,
key_dim=64,
kernel_initializer=keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_masked_attention_with_scores(self):
"""Test with a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = keras.Input(shape=(4, 8))
value = keras.Input(shape=(2, 8))
mask_tensor = keras.Input(shape=(4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor)
# Create a model containing the test layer.
model = keras.Model([query, value, mask_tensor], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict([from_data, to_data, mask_data])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict([from_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Create a model containing attention scores.
output, scores = test_layer(
query=query, value=value, attention_mask=mask_tensor,
return_attention_scores=True)
model = keras.Model([query, value, mask_tensor], [output, scores])
masked_output_data_score, masked_score = model.predict(
[from_data, to_data, mask_data])
unmasked_output_data_score, unmasked_score = model.predict(
[from_data, to_data, null_mask_data])
self.assertNotAllClose(masked_output_data_score, unmasked_output_data_score)
self.assertAllClose(masked_output_data, masked_output_data_score)
self.assertAllClose(unmasked_output_data, unmasked_output_data_score)
self.assertNotAllClose(masked_score, unmasked_score)
@parameterized.named_parameters(
("4d_inputs_1freebatch_mask2", [3, 4], [3, 2], [4, 2],
(2,)), ("4d_inputs_1freebatch_mask3", [3, 4], [3, 2], [3, 4, 2], (2,)),
("4d_inputs_1freebatch_mask4", [3, 4], [3, 2], [3, 2, 4, 2],
(2,)), ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)),
("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3)),
("5D_inputs_2D_attention_fullmask", [5, 3, 4], [5, 3, 2], [5, 3, 4, 3, 2],
(2, 3)))
def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes):
"""Test with a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2, attention_axes=attention_axes)
batch_size, hidden_size = 3, 8
# Generate data for the input (non-mask) tensors.
query_shape = [batch_size] + q_dims + [hidden_size]
value_shape = [batch_size] + v_dims + [hidden_size]
mask_shape = [batch_size] + mask_dims
query = 10 * np.random.random_sample(query_shape)
value = 10 * np.random.random_sample(value_shape)
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=mask_shape).astype("bool")
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones(mask_shape)
# Because one data is masked and one is not, the outputs should not be the
# same.
query_tensor = keras.Input(query_shape[1:], name="query")
value_tensor = keras.Input(value_shape[1:], name="value")
mask_tensor = keras.Input(mask_shape[1:], name="mask")
output = test_layer(query=query_tensor, value=value_tensor,
attention_mask=mask_tensor)
model = keras.Model([query_tensor, value_tensor, mask_tensor], output)
self.assertNotAllClose(
model.predict([query, value, mask_data]),
model.predict([query, value, null_mask_data]))
def test_dropout(self):
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2, dropout=0.5)
# Generate data for the input (non-mask) tensors.
from_data = keras.backend.ones(shape=(32, 4, 8))
to_data = keras.backend.ones(shape=(32, 2, 8))
train_out = test_layer(from_data, to_data, None, None, None, True)
test_out = test_layer(from_data, to_data, None, None, None, False)
# Output should be close when not in training mode,
# and should not be close when enabling dropout in training mode.
self.assertNotAllClose(
keras.backend.eval(train_out),
keras.backend.eval(test_out))
if __name__ == "__main__":
tf.test.main()
| 10,672 | 43.844538 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/model_builder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build MobileBERT-EdgeTPU model."""
from typing import Optional
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp import modeling
from official.projects.edgetpu.nlp.configs import params
from official.projects.edgetpu.nlp.modeling import encoder as edgetpu_encoder
from official.projects.edgetpu.nlp.modeling import pretrainer as edgetpu_pretrainer
def build_bert_pretrainer(pretrainer_cfg: params.PretrainerModelParams,
encoder: Optional[tf.keras.Model] = None,
masked_lm: Optional[tf.keras.Model] = None,
quantization_friendly: Optional[bool] = False,
name: Optional[str] = None) -> tf.keras.Model:
"""Builds pretrainer.
Args:
pretrainer_cfg: configs for the pretrainer model.
encoder: (Optional) The encoder network for the pretrainer model.
masked_lm: (Optional) The masked_lm network for the pretrainer model.
quantization_friendly: (Optional) If enabled, the model will use EdgeTPU
mobilebert transformer. The difference is we have a customized softmax
ops which use -120 as the mask value, which is more stable for post-
training quantization.
name: (Optional) Name of the pretrainer model.
Returns:
The pretrainer model.
"""
encoder_cfg = pretrainer_cfg.encoder.mobilebert
encoder = encoder or edgetpu_encoder.MobileBERTEncoder(
word_vocab_size=encoder_cfg.word_vocab_size,
word_embed_size=encoder_cfg.word_embed_size,
type_vocab_size=encoder_cfg.type_vocab_size,
max_sequence_length=encoder_cfg.max_sequence_length,
num_blocks=encoder_cfg.num_blocks,
hidden_size=encoder_cfg.hidden_size,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_act_fn=encoder_cfg.hidden_activation,
hidden_dropout_prob=encoder_cfg.hidden_dropout_prob,
attention_probs_dropout_prob=encoder_cfg.attention_probs_dropout_prob,
intra_bottleneck_size=encoder_cfg.intra_bottleneck_size,
initializer_range=encoder_cfg.initializer_range,
use_bottleneck_attention=encoder_cfg.use_bottleneck_attention,
key_query_shared_bottleneck=encoder_cfg.key_query_shared_bottleneck,
num_feedforward_networks=encoder_cfg.num_feedforward_networks,
normalization_type=encoder_cfg.normalization_type,
classifier_activation=encoder_cfg.classifier_activation,
input_mask_dtype=encoder_cfg.input_mask_dtype,
quantization_friendly=quantization_friendly)
if pretrainer_cfg.cls_heads:
cls_heads = [
modeling.layers.ClassificationHead(**cfg.as_dict())
for cfg in pretrainer_cfg.cls_heads
]
else:
cls_heads = []
# Get the embedding table from the encoder model.
def _get_embedding_table(encoder):
for layer in encoder.layers:
if layer.name.startswith('mobile_bert_embedding'):
return layer.word_embedding.embeddings
raise ValueError('Can not find embedding layer in the encoder.')
masked_lm = masked_lm or modeling.layers.MobileBertMaskedLM(
embedding_table=_get_embedding_table(encoder),
activation=tf_utils.get_activation(pretrainer_cfg.mlm_activation),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=pretrainer_cfg.mlm_initializer_range),
output_weights_use_proj=pretrainer_cfg.mlm_output_weights_use_proj,
name='cls/predictions')
pretrainer = edgetpu_pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=encoder,
classification_heads=cls_heads,
customized_masked_lm=masked_lm,
name=name)
return pretrainer
| 4,298 | 42.867347 | 83 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/pretrainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Pre-training model."""
# pylint: disable=g-classes-have-attributes
import copy
from typing import List, Optional
import tensorflow as tf
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class MobileBERTEdgeTPUPretrainer(tf.keras.Model):
"""BERT pretraining model V2.
Adds the masked language model head and optional classification heads upon the
transformer encoder.
Args:
encoder_network: A transformer network. This network should output a
sequence output and a classification output.
mlm_activation: The activation (if any) to use in the masked LM network. If
None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM. Default
to a Glorot uniform initializer.
classification_heads: A list of optional head layers to transform on encoder
sequence outputs.
customized_masked_lm: A customized masked_lm layer. If None, will create
a standard layer from `layers.MaskedLM`; if not None, will use the
specified masked_lm layer. Above arguments `mlm_activation` and
`mlm_initializer` will be ignored.
name: The name of the model.
Inputs: Inputs defined by the encoder network, plus `masked_lm_positions` as a
dictionary.
Outputs: A dictionary of `lm_output`, classification head outputs keyed by
head names, and also outputs from `encoder_network`, keyed by
`sequence_output` and `encoder_outputs` (if any).
"""
def __init__(
self,
encoder_network: tf.keras.Model,
mlm_activation=None,
mlm_initializer='glorot_uniform',
classification_heads: Optional[List[tf.keras.layers.Layer]] = None,
customized_masked_lm: Optional[tf.keras.layers.Layer] = None,
name: str = 'bert',
**kwargs):
inputs = copy.copy(encoder_network.inputs)
outputs = {}
encoder_network_outputs = encoder_network(inputs)
if isinstance(encoder_network_outputs, list):
outputs['pooled_output'] = encoder_network_outputs[1]
if isinstance(encoder_network_outputs[0], list):
outputs['encoder_outputs'] = encoder_network_outputs[0]
outputs['sequence_output'] = encoder_network_outputs[0][-1]
else:
outputs['sequence_output'] = encoder_network_outputs[0]
elif isinstance(encoder_network_outputs, dict):
outputs = encoder_network_outputs
else:
raise ValueError('encoder_network\'s output should be either a list '
'or a dict, but got %s' % encoder_network_outputs)
masked_lm_positions = tf.keras.layers.Input(
shape=(None,), name='masked_lm_positions', dtype=tf.int32)
inputs.append(masked_lm_positions)
masked_lm_layer = customized_masked_lm or layers.MaskedLM(
embedding_table=encoder_network.get_embedding_table(),
activation=mlm_activation,
initializer=mlm_initializer,
name='cls/predictions')
sequence_output = outputs['sequence_output']
outputs['mlm_logits'] = masked_lm_layer(
sequence_output, masked_positions=masked_lm_positions)
classification_head_layers = classification_heads or []
for cls_head in classification_head_layers:
cls_outputs = cls_head(sequence_output)
if isinstance(cls_outputs, dict):
outputs.update(cls_outputs)
else:
outputs[cls_head.name] = cls_outputs
super(MobileBERTEdgeTPUPretrainer, self).__init__(
inputs=inputs,
outputs=outputs,
name=name,
**kwargs)
self._config = {
'encoder_network': encoder_network,
'mlm_activation': mlm_activation,
'mlm_initializer': mlm_initializer,
'classification_heads': classification_heads,
'customized_masked_lm': customized_masked_lm,
'name': name,
}
self.encoder_network = encoder_network
self.masked_lm = masked_lm_layer
self.classification_heads = classification_head_layers
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.encoder_network, masked_lm=self.masked_lm)
for head in self.classification_heads:
for key, item in head.checkpoint_items.items():
items['.'.join([head.name, key])] = item
return items
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,076 | 37.172932 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/edgetpu_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized MobileBERT-EdgeTPU layers.
There are two reasons for us to customize the layers instead of using the well-
defined layers used in baseline MobileBERT.
1. The layer introduces compiler sharding failures. For example, the gather in
OnDeviceEmbedding.
2. The layer contains ops that need to have bounded input/output ranges. For
example, softmax op.
"""
import string
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
_CHR_IDX = string.ascii_lowercase
# This function is directly copied from the tf.keras.layers.MultiHeadAttention
# implementation.
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ''
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = ''.join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = '%s,%s->%s' % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = '%s,%s->%s' % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUSoftmax(tf.keras.layers.Softmax):
"""EdgeTPU/Quantization friendly implementation for the SoftMax.
When export quant model, use -120 mask value.
When export float model and run inference with bf16 on device, use -10000.
"""
def __init__(self,
mask_value: int = -120,
**kwargs):
self._mask_value = mask_value
super(EdgeTPUSoftmax, self).__init__(**kwargs)
def get_config(self):
config = {
'mask_value': self._mask_value
}
base_config = super(EdgeTPUSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, mask=None):
if mask is not None:
adder = (1.0 - tf.cast(mask, inputs.dtype)) * self._mask_value
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return tf.exp(inputs - tf.reduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return tf.keras.backend.softmax(inputs, axis=self.axis[0])
return tf.keras.backend.softmax(inputs, axis=self.axis)
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""Quantization friendly implementation for the MultiHeadAttention."""
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(
rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = EdgeTPUSoftmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
class EdgetpuMobileBertTransformer(layers.MobileBertTransformer):
"""Quantization friendly MobileBertTransformer.
Inherits from the MobileBertTransformer but use our customized MHA.
"""
def __init__(self, **kwargs):
super(EdgetpuMobileBertTransformer, self).__init__(**kwargs)
attention_head_size = int(
self.intra_bottleneck_size / self.num_attention_heads)
attention_layer = EdgeTPUMultiHeadAttention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=self.initializer,
name='attention')
layer_norm = self.block_layers['attention'][1]
self.block_layers['attention'] = [attention_layer, layer_norm]
| 6,274 | 36.801205 | 79 | py |
models | models-master/official/projects/edgetpu/vision/serving/export_tflite_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for export_tflite."""
import itertools
import os
from absl.testing import parameterized
import tensorflow as tf
from official.core import exp_factory
from official.core import task_factory
from official.projects.edgetpu.vision.serving import export_util
def _build_experiment_model(experiment_type):
"""Builds model from experiment type configuration w/o loading checkpoint.
To reduce test latency and avoid unexpected errors (e.g. checkpoint files not
exist in the dedicated path), we skip the checkpoint loading for the tests.
Args:
experiment_type: model type for the experiment.
Returns:
TF/Keras model for the task.
"""
params = exp_factory.get_exp_config(experiment_type)
if 'deeplabv3plus_mobilenet_edgetpuv2' in experiment_type:
params.task.model.backbone.mobilenet_edgetpu.pretrained_checkpoint_path = None
if 'autoseg_edgetpu' in experiment_type:
params.task.model.model_params.model_weights_path = None
params.validate()
params.lock()
task = task_factory.get_task(params.task)
return task.build_model()
def _build_model(config):
model = _build_experiment_model(config.model_name)
model_input = tf.keras.Input(
shape=(config.image_size, config.image_size, 3), batch_size=1)
model_output = export_util.finalize_serving(model(model_input), config)
model_for_inference = tf.keras.Model(model_input, model_output)
return model_for_inference
def _dump_tflite(model, config):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
export_util.configure_tflite_converter(config, converter)
tflite_buffer = converter.convert()
tf.io.gfile.makedirs(os.path.dirname(config.output_dir))
tflite_path = os.path.join(config.output_dir, f'{config.model_name}.tflite')
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_buffer)
return tflite_path
SEG_MODELS = [
'autoseg_edgetpu_xs',
]
FINALIZE_METHODS = [
'resize512,argmax,squeeze', 'resize256,argmax,resize512,squeeze',
'resize128,argmax,resize512,squeeze'
]
class ExportTfliteTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('mobilenet_edgetpu_v2_xs', 224),
('autoseg_edgetpu_xs', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32', 512),
)
def test_model_build_and_export_tflite(self, model_name, image_size):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name, image_size=image_size, output_dir=tmp_dir)
config.quantization_config.quantize = False
model = _build_model(config)
tflite_path = _dump_tflite(model, config)
self.assertTrue(tf.io.gfile.exists(tflite_path))
@parameterized.parameters(
('mobilenet_edgetpu_v2_xs', 224),
('autoseg_edgetpu_xs', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32', 512),
)
def test_model_build_and_export_saved_model(self, model_name, image_size):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name, image_size=image_size, output_dir=tmp_dir)
model = _build_model(config)
saved_model_path = os.path.join(config.output_dir, config.model_name)
model.save(saved_model_path)
self.assertTrue(tf.saved_model.contains_saved_model(saved_model_path))
@parameterized.parameters(itertools.product(SEG_MODELS, FINALIZE_METHODS))
def test_segmentation_finalize_methods(self, model_name, finalize_method):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name,
image_size=512,
output_dir=tmp_dir,
finalize_method=finalize_method.split(','))
config.quantization_config.quantize = False
model = _build_model(config)
model_input = tf.random.normal([1, config.image_size, config.image_size, 3])
self.assertEqual(
model(model_input).get_shape().as_list(),
[1, config.image_size, config.image_size])
if __name__ == '__main__':
tf.test.main()
| 4,725 | 35.921875 | 82 | py |
models | models-master/official/projects/edgetpu/vision/serving/export_util.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements serving with custom post processing."""
import dataclasses
from typing import List, Optional
import tensorflow as tf
import tensorflow_datasets as tfds
from official.core import exp_factory
from official.core import task_factory
from official.modeling.hyperparams import base_config
# pylint: disable=unused-import
from official.projects.edgetpu.vision.configs import mobilenet_edgetpu_config
from official.projects.edgetpu.vision.configs import semantic_segmentation_config
from official.projects.edgetpu.vision.configs import semantic_segmentation_searched_config
from official.projects.edgetpu.vision.modeling import custom_layers
from official.projects.edgetpu.vision.modeling.backbones import mobilenet_edgetpu
from official.projects.edgetpu.vision.tasks import image_classification
from official.projects.edgetpu.vision.tasks import semantic_segmentation as edgetpu_semantic_segmentation
from official.vision.tasks import semantic_segmentation
# pylint: enable=unused-import
MEAN_RGB = [127.5, 127.5, 127.5]
STDDEV_RGB = [127.5, 127.5, 127.5]
@dataclasses.dataclass
class QuantizationConfig(base_config.Config):
"""Configuration for post training quantization.
Attributes:
quantize: Whether to quantize model before exporting tflite.
quantize_less_restrictive: Allows non int8 based intermediate types,
automatic model output type.
use_experimental_quantizer: Enables experimental quantizer of
TFLiteConverter 2.0.
num_calibration_steps: Number of post-training quantization calibration
steps to run.
dataset_name: Name of the dataset to use for quantization calibration.
dataset_dir: Dataset location.
dataset_split: The dataset split (train, validation etc.) to use for
calibration.
"""
quantize: bool = False
quantize_less_restrictive: bool = False
use_experimental_quantizer: bool = True
dataset_name: Optional[str] = None
dataset_dir: Optional[str] = None
dataset_split: Optional[str] = None
num_calibration_steps: int = 100
@dataclasses.dataclass
class ExportConfig(base_config.Config):
"""Configuration for exporting models as tflite and saved_models.
Attributes:
model_name: One of the registered model names.
output_layer: Layer name to take the output from. Can be used to take the
output from an intermediate layer.
ckpt_path: Path of the training checkpoint. If not provided tflite with
random parameters is exported.
ckpt_format: Format of the checkpoint. tf_checkpoint is for ckpt files from
tf.train.Checkpoint.save() method. keras_checkpoint is for ckpt files from
keras.Model.save_weights() method
output_dir: Directory to output exported files.
image_size: Size of the input image. Ideally should be the same as the
image_size used in training config
output_layer: Layer name to take the output from. Can be used to take the
output from an intermediate layer. None means use the original model
output.
finalize_method: 'Additional layers to be added to customize serving output
Supported are (none|(argmax|resize<?>)[,...]).
- none: do not add extra serving layers.
- argmax: adds argmax.
- squeeze: removes dimensions (except batch dim) of size 1 from the shape
of a tensor.
- resize<?> (for example resize512): adds resize bilinear|nn to <?> size.
For example: --finalize_method=resize128,argmax,resize512,squeeze will do
resize bilinear to 128x128, then argmax then resize nn to 512x512
"""
quantization_config: QuantizationConfig = dataclasses.field(
default_factory=QuantizationConfig
)
model_name: Optional[str] = None
output_layer: Optional[str] = None
ckpt_path: Optional[str] = None
ckpt_format: Optional[str] = 'tf_checkpoint'
output_dir: str = '/tmp/'
image_size: int = 224
output_layer: Optional[str] = None
finalize_method: Optional[List[str]] = None
def finalize_serving(model_output, export_config):
"""Adds extra layers based on the provided configuration."""
if isinstance(model_output, dict):
return {
key: finalize_serving(model_output[key], export_config)
for key in model_output
}
finalize_method = export_config.finalize_method
output_layer = model_output
if not finalize_method or finalize_method[0] == 'none':
return output_layer
discrete = False
for i in range(len(finalize_method)):
if finalize_method[i] == 'argmax':
discrete = True
is_argmax_last = (i + 1) == len(finalize_method)
if is_argmax_last:
output_layer = tf.argmax(
output_layer, axis=3, output_type=tf.dtypes.int32)
else:
# TODO(tohaspiridonov): add first_match=False when cl/383951533 submited
output_layer = custom_layers.argmax(
output_layer, keepdims=True, epsilon=1e-3)
elif finalize_method[i] == 'squeeze':
output_layer = tf.squeeze(output_layer, axis=3)
else:
resize_params = finalize_method[i].split('resize')
if len(resize_params) != 2 or resize_params[0]:
raise ValueError('Cannot finalize with ' + finalize_method[i] + '.')
resize_to_size = int(resize_params[1])
if discrete:
output_layer = tf.image.resize(
output_layer, [resize_to_size, resize_to_size],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
output_layer = tf.image.resize(
output_layer, [resize_to_size, resize_to_size],
method=tf.image.ResizeMethod.BILINEAR)
return output_layer
def preprocess_for_quantization(image_data, image_size, crop_padding=32):
"""Crops to center of image with padding then scales, normalizes image_size.
Args:
image_data: A 3D Tensor representing the RGB image data. Image can be of
arbitrary height and width.
image_size: image height/width dimension.
crop_padding: the padding size to use when centering the crop.
Returns:
A decoded and cropped image Tensor. Image is normalized to [-1,1].
"""
shape = tf.shape(image_data)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
(image_size * 1.0 / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
image = tf.image.crop_to_bounding_box(
image_data,
offset_height=offset_height,
offset_width=offset_width,
target_height=padded_center_crop_size,
target_width=padded_center_crop_size)
image = tf.image.resize([image], [image_size, image_size],
method=tf.image.ResizeMethod.BILINEAR)[0]
image = tf.cast(image, tf.float32)
image -= tf.constant(MEAN_RGB)
image /= tf.constant(STDDEV_RGB)
return image
def representative_dataset_gen(export_config):
"""Gets a python generator of numpy arrays for the given dataset."""
quantization_config = export_config.quantization_config
dataset = tfds.builder(
quantization_config.dataset_name, try_gcs=True)
dataset.download_and_prepare()
data = dataset.as_dataset()[quantization_config.dataset_split]
iterator = data.as_numpy_iterator()
for _ in range(quantization_config.num_calibration_steps):
features = next(iterator)
image = features['image']
image = preprocess_for_quantization(image, export_config.image_size)
image = tf.reshape(
image, [1, export_config.image_size, export_config.image_size, 3])
yield [image]
def configure_tflite_converter(export_config, converter):
"""Common code for picking up quantization parameters."""
quantization_config = export_config.quantization_config
if quantization_config.quantize:
if (quantization_config.dataset_dir is
None) and (quantization_config.dataset_name is None):
raise ValueError(
'Must provide a representative dataset when quantizing the model.')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
if quantization_config.quantize_less_restrictive:
converter.target_spec.supported_ops += [
tf.lite.OpsSet.TFLITE_BUILTINS
]
converter.inference_output_type = tf.float32
def _representative_dataset_gen():
return representative_dataset_gen(export_config)
converter.representative_dataset = _representative_dataset_gen
def build_experiment_model(experiment_type):
"""Builds model from experiment type configuration."""
params = exp_factory.get_exp_config(experiment_type)
params.validate()
params.lock()
task = task_factory.get_task(params.task)
return task.build_model()
| 9,508 | 38.786611 | 105 | py |
models | models-master/official/projects/edgetpu/vision/serving/export_tflite.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Export model (float or quantized tflite, and saved model) from a trained checkpoint.
Example:
To export dummy quantized model:
export_tflite --model_name=mobilenet_edgetpu_v2_s --output_dir=/tmp --quantize
Using a training checkpoint:
export_tflite --model_name=mobilenet_edgetpu_v2_s \
--ckpt_path=/path/to/training/checkpoint \
--dataset_dir=/path/to/your/dataset --output_dir=/tmp --quantize
Exporting w/o final squeeze layer:
export_tflite --model_name=mobilenet_edgetpu_v2_xs \
--output_layer=probs \
--dataset_dir=/path/to/your/dataset --output_dir=/tmp --quantize
"""
# pylint: enable=line-too-long
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.serving import export_util
flags.DEFINE_string('model_name', None,
'Used to build model using experiment config factory.')
flags.DEFINE_string(
'ckpt_path', None, 'Path to the checkpoint. '
'If not provided tflite with random parameters is exported.')
flags.DEFINE_enum(
'ckpt_format', 'tf_checkpoint',
['tf_checkpoint', 'keras_checkpoint'],
'tf_checkpoint is for ckpt files from tf.train.Checkpoint.save() method'
'keras_checkpoint is for ckpt files from keras.Model.save_weights() method')
flags.DEFINE_bool(
'export_keras_model', False,
'Export SavedModel format: if False, export TF SavedModel with'
'tf.saved_model API; if True, export Keras SavedModel with tf.keras.Model'
'API.')
flags.DEFINE_string('output_dir', None, 'Directory to output exported files.')
flags.DEFINE_integer(
'image_size', 224,
'Size of the input image. Ideally should be the same as the image_size used '
'in training config.')
flags.DEFINE_bool(
'fix_batch_size', True, 'Whether to export model with fixed batch size.')
flags.DEFINE_string(
'output_layer', None,
'Layer name to take the output from. Can be used to take the output from '
'an intermediate layer. None means use the original model output.')
flags.DEFINE_string(
'finalize_method', 'none',
'Additional layers to be added to customize serving output.\n'
'Supported are (none|(argmax|resize<?>)[,...]).\n'
'- none: do not add extra serving layers.\n'
'- argmax: adds argmax.\n'
'- squeeze: removes dimensions of size 1 from the shape of a tensor.\n'
'- resize<?> (for example resize512): adds resize bilinear|nn to <?> size.'
'For example: --finalize_method=resize128,argmax,resize512,squeeze\n'
'Will do resize bilinear to 128x128, then argmax then resize nn to 512x512')
# Quantization related parameters
flags.DEFINE_bool(
'quantize', False,
'Quantize model before exporting tflite. Note that only the exported '
'TFLite is quantized not the SavedModel.')
flags.DEFINE_bool('use_experimental_quantizer', True, 'Enables experimental '
'quantizer of TFLiteConverter 2.0.')
flags.DEFINE_bool(
'quantize_less_restrictive', False,
'Allows non int8 based intermediate types, automatic model output type.')
flags.DEFINE_integer(
'num_calibration_steps', 100,
'Number of post-training quantization calibration steps to run.')
flags.DEFINE_string('dataset_name', 'imagenet2012',
'Name of the dataset to use for quantization calibration.')
flags.DEFINE_string('dataset_dir', None, 'Dataset location.')
flags.DEFINE_string(
'dataset_split', 'train',
'The dataset split (train, validation etc.) to use for calibration.')
FLAGS = flags.FLAGS
def get_export_config_from_flags():
"""Creates ExportConfig from cmd line flags."""
quantization_config = export_util.QuantizationConfig(
quantize=FLAGS.quantize,
quantize_less_restrictive=FLAGS.quantize_less_restrictive,
use_experimental_quantizer=FLAGS.use_experimental_quantizer,
num_calibration_steps=FLAGS.num_calibration_steps,
dataset_name=FLAGS.dataset_name,
dataset_dir=FLAGS.dataset_dir,
dataset_split=FLAGS.dataset_split)
export_config = export_util.ExportConfig(
model_name=FLAGS.model_name,
output_layer=FLAGS.output_layer,
ckpt_path=FLAGS.ckpt_path,
ckpt_format=FLAGS.ckpt_format,
output_dir=FLAGS.output_dir,
image_size=FLAGS.image_size,
finalize_method=FLAGS.finalize_method.lower().split(','),
quantization_config=quantization_config)
return export_config
def run_export():
"""Exports TFLite with PTQ."""
export_config = get_export_config_from_flags()
model = export_util.build_experiment_model(
experiment_type=export_config.model_name)
if export_config.ckpt_path:
logging.info('Loading checkpoint from %s', FLAGS.ckpt_path)
common_modules.load_weights(
model,
export_config.ckpt_path,
checkpoint_format=export_config.ckpt_format)
else:
logging.info('No checkpoint provided. Using randomly initialized weights.')
if export_config.output_layer is not None:
all_layer_names = {l.name for l in model.layers}
if export_config.output_layer not in all_layer_names:
model.summary()
logging.info(
'Cannot find the layer %s in the model. See the above summary to '
'chose an output layer.', export_config.output_layer)
return
output_layer = model.get_layer(export_config.output_layer)
model = tf.keras.Model(model.input, output_layer.output)
batch_size = 1 if FLAGS.fix_batch_size else None
model_input = tf.keras.Input(
shape=(export_config.image_size, export_config.image_size, 3),
batch_size=batch_size)
model_output = export_util.finalize_serving(model(model_input), export_config)
model_for_inference = tf.keras.Model(model_input, model_output)
# Convert to tflite. Quantize if quantization parameters are specified.
converter = tf.lite.TFLiteConverter.from_keras_model(model_for_inference)
export_util.configure_tflite_converter(export_config, converter)
tflite_buffer = converter.convert()
# Make sure the base directory exists and write tflite.
tf.io.gfile.makedirs(os.path.dirname(export_config.output_dir))
tflite_path = os.path.join(export_config.output_dir,
f'{export_config.model_name}.tflite')
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_buffer)
print('TfLite model exported to {}'.format(tflite_path))
# Export saved model.
saved_model_path = os.path.join(export_config.output_dir,
export_config.model_name)
if FLAGS.export_keras_model:
model_for_inference.save(saved_model_path)
else:
tf.saved_model.save(model_for_inference, saved_model_path)
print('SavedModel exported to {}'.format(saved_model_path))
def main(_):
run_export()
if __name__ == '__main__':
flags.mark_flag_as_required('model_name')
app.run(main)
| 7,532 | 39.069149 | 88 | py |
models | models-master/official/projects/edgetpu/vision/modeling/custom_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for custom_layers."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import custom_layers
GROUPS = [2, 4]
INPUT_CHANNEL = [8, 16]
OUTPUT_CHANNEL = [8, 16]
USE_BATCH_NORM = [True, False]
ACTIVATION = ['relu', 'linear']
BATCH_NORM_LAYER = tf.keras.layers.BatchNormalization
# 2 functionally identical group conv implementations.
GROUP_CONV_IMPL = {
'layer': custom_layers.GroupConv2D,
'model': custom_layers.GroupConv2DKerasModel
}
def _get_random_inputs(input_shape):
return tf.random.uniform(shape=input_shape)
class GroupConv2DTest(tf.test.TestCase, parameterized.TestCase):
# Test for combinations of groups, input_channel, output_channel, and
# whether to use batch_norm
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL, USE_BATCH_NORM))
def test_construction(self, groups, input_channel, output_channel,
use_batch_norm):
batch_norm_layer = BATCH_NORM_LAYER if use_batch_norm else None
l = custom_layers.GroupConv2D(
output_channel,
3,
groups=groups,
use_bias=True,
batch_norm_layer=batch_norm_layer)
inputs = _get_random_inputs(input_shape=(1, 4, 4, output_channel))
_ = l(inputs)
# kernel and bias for each group. When using batch norm, 2 additional
# trainable weights per group for batchnorm layers: gamma and beta.
expected_num_trainable_weights = groups * (2 + 2 * use_batch_norm)
self.assertLen(l.trainable_weights, expected_num_trainable_weights)
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL))
def test_kernel_shapes(self, groups, input_channel, output_channel):
l = custom_layers.GroupConv2D(
output_channel, 3, groups=groups, use_bias=False)
_ = l(_get_random_inputs(input_shape=(1, 32, 32, input_channel)))
expected_kernel_shapes = [(3, 3, int(input_channel / groups),
int(output_channel / groups))
for _ in range(groups)]
kernel_shapes = [
l.trainable_weights[i].get_shape()
for i in range(len(l.trainable_weights))
]
self.assertListEqual(kernel_shapes, expected_kernel_shapes)
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL))
def test_output_shapes(self, groups, input_channel, output_channel):
l = custom_layers.GroupConv2D(
output_channel, 3, groups=groups, use_bias=False, padding='same')
outputs = l(_get_random_inputs(input_shape=[2, 32, 32, input_channel]))
self.assertListEqual(outputs.get_shape().as_list(),
[2, 32, 32, output_channel])
@parameterized.parameters(
itertools.product(GROUPS, USE_BATCH_NORM, ACTIVATION))
def test_serialization_deserialization(self, groups, use_batch_norm,
activation):
batch_norm_layer = BATCH_NORM_LAYER if use_batch_norm else None
l = custom_layers.GroupConv2D(
filters=8,
kernel_size=1,
groups=groups,
use_bias=False,
padding='same',
batch_norm_layer=batch_norm_layer,
activation=activation)
config = l.get_config()
# New layer from config
new_l = custom_layers.GroupConv2D.from_config(config)
# Copy the weights too.
l.build(input_shape=(1, 1, 4))
new_l.build(input_shape=(1, 1, 4))
new_l.set_weights(l.get_weights())
inputs = _get_random_inputs((1, 1, 1, 4))
self.assertNotEqual(l, new_l)
self.assertAllEqual(l(inputs), new_l(inputs))
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL, USE_BATCH_NORM,
ACTIVATION))
def test_equivalence(self, groups, input_channel, output_channel,
use_batch_norm, activation):
batch_norm_layer = BATCH_NORM_LAYER if use_batch_norm else None
kwargs = dict(
filters=output_channel,
groups=groups,
kernel_size=1,
use_bias=False,
batch_norm_layer=batch_norm_layer,
activation=activation)
gc_layer = tf.keras.Sequential([custom_layers.GroupConv2D(**kwargs)])
gc_model = custom_layers.GroupConv2DKerasModel(**kwargs)
gc_layer.build(input_shape=(None, 3, 3, input_channel))
gc_model.build(input_shape=(None, 3, 3, input_channel))
inputs = _get_random_inputs((2, 3, 3, input_channel))
gc_layer.set_weights(gc_model.get_weights())
self.assertAllEqual(gc_layer(inputs), gc_model(inputs))
@parameterized.parameters(('layer', 1, 4), ('layer', 4, 4), ('model', 1, 4),
('model', 4, 4))
def test_invalid_groups_raises_value_error(self, gc_type, groups,
output_channel):
with self.assertRaisesRegex(ValueError, r'^(Number of groups)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=output_channel, groups=groups, kernel_size=3)
@parameterized.parameters(('layer', 3, 4), ('layer', 4, 6), ('model', 3, 4),
('model', 4, 6))
def test_non_group_divisible_raises_value_error(self, gc_type, groups,
input_channel):
with self.assertRaisesRegex(ValueError, r'^(Number of input channels)'):
l = GROUP_CONV_IMPL[gc_type](
filters=groups * 4, groups=groups, kernel_size=3)
l.build(input_shape=(4, 4, input_channel))
@parameterized.parameters(('layer'), ('model'))
def test_non_supported_data_format_raises_value_error(self, gc_type):
with self.assertRaisesRegex(ValueError, r'^(.*(channels_last).*)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=4, groups=2, kernel_size=1, data_format='channels_first')
@parameterized.parameters(('layer'), ('model'))
def test_invalid_batch_norm_raises_value_error(self, gc_type):
def my_batch_norm(x):
return x**2
with self.assertRaisesRegex(ValueError, r'^(.*(not a class).*)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=4, groups=2, kernel_size=1, batch_norm_layer=my_batch_norm)
@parameterized.parameters(('layer'), ('model'))
def test_invalid_padding_raises_value_error(self, gc_type):
with self.assertRaisesRegex(ValueError, r'^(.*(same, or valid).*)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=4, groups=2, kernel_size=1, padding='causal')
class ArgmaxTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(([16, 32, 64], tf.dtypes.float32, tf.dtypes.int32),
([255, 19], tf.dtypes.int32, tf.dtypes.int64))
def test_reference_match(self, shape, input_type, output_type):
random_inputs = tf.random.uniform(shape=shape, maxval=10, dtype=input_type)
for axis in range(-len(shape) + 1, len(shape)):
control_output = tf.math.argmax(
random_inputs, axis=axis, output_type=output_type)
test_output = custom_layers.argmax(
random_inputs, axis=axis, output_type=output_type)
self.assertAllEqual(control_output, test_output)
| 7,740 | 40.395722 | 79 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPUV2 model's building blocks."""
import dataclasses
import math
from typing import Any, Dict, List, Optional, Tuple, Union
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.modeling.hyperparams import oneof
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import custom_layers
InitializerType = Optional[Union[str, tf.keras.initializers.Initializer]]
@dataclasses.dataclass
class BlockType(oneof.OneOfConfig):
"""Block OP types representing IBN version."""
type: str = 'ibn_dw'
skip: str = 'skip'
ibn_dw: str = 'ibn_dw'
ibn_fused: str = 'ibn_fused'
ibn_grouped: str = 'ibn_grouped'
ibn_fused_grouped: str = 'ibn_fused_grouped'
@dataclasses.dataclass
class BlockSearchConfig(base_config.Config):
"""Config for searchable BlockConfig parameters."""
op_type: BlockType = dataclasses.field(default_factory=BlockType)
kernel_size: Optional[int] = None
expand_ratio: Optional[int] = None
stride: Optional[int] = None
group_size: Optional[int] = None
@dataclasses.dataclass
class BlockConfig(base_config.Config):
"""Full config for a single MB Conv Block."""
input_filters: int = 0
output_filters: int = 0
kernel_size: int = 3
num_repeat: int = 1
expand_ratio: int = 1
strides: Tuple[int, int] = (1, 1)
se_ratio: Optional[float] = None
id_skip: bool = True
fused_expand: bool = False
fused_project: bool = False
conv_type: str = 'depthwise'
group_size: Optional[int] = None
@classmethod
def from_search_config(cls,
input_filters: int,
output_filters: int,
block_search_config: BlockSearchConfig,
num_repeat: int = 1,
se_ratio: Optional[float] = None,
id_skip: bool = True) -> 'BlockConfig':
"""Creates BlockConfig from the given parameters."""
block_op_type = block_search_config.op_type
if block_op_type.type == BlockType.skip:
raise ValueError('Received skip type within block creation.')
elif block_op_type.type == BlockType.ibn_dw:
fused_expand = False
fused_project = False
conv_type = 'depthwise'
elif block_op_type.type == BlockType.ibn_fused:
fused_expand = True
fused_project = False
conv_type = 'full'
elif block_op_type.type == BlockType.ibn_fused_grouped:
fused_expand = True
fused_project = False
conv_type = 'group'
elif block_op_type.type == BlockType.ibn_grouped:
fused_expand = False
fused_project = False
conv_type = 'group'
else:
raise NotImplementedError(f'Unsupported IBN type {block_op_type.type}.')
return cls.from_args(
input_filters=input_filters,
output_filters=output_filters,
kernel_size=block_search_config.kernel_size,
num_repeat=num_repeat,
expand_ratio=block_search_config.expand_ratio,
strides=(block_search_config.stride, block_search_config.stride),
se_ratio=se_ratio,
id_skip=id_skip,
fused_expand=fused_expand,
fused_project=fused_project,
conv_type=conv_type,
group_size=block_search_config.group_size)
@dataclasses.dataclass
class BlockGroupConfig(base_config.Config):
"""Config for group of blocks that share the same filter size."""
blocks: List[BlockSearchConfig] = dataclasses.field(default_factory=list)
filters: int = 64
def _default_mobilenet_edgetpu_v2_topology():
return [
# Block Group 0
BlockGroupConfig(
blocks=[
# BlockSearchConfig: op_type, kernel_size, expand_ratio, stride
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 1, 1),
],
filters=24),
# Block Group 1
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused_grouped'), 3, 4, 1),
],
filters=48),
# Block Group 2
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused_grouped'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused_grouped'), 3, 4, 1),
],
filters=64),
# Block Group 3
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
],
filters=128),
# Block Group 4
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 8, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
],
filters=160),
# Block Group 5
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
],
filters=192),
# Block Group 6
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 8, 1),
],
filters=256),
]
@dataclasses.dataclass
class TopologyConfig(base_config.Config):
"""Config for model topology as a collection of BlockGroupConfigs."""
block_groups: List[BlockGroupConfig] = dataclasses.field(
default_factory=_default_mobilenet_edgetpu_v2_topology)
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""Default Config for MobilenetEdgeTPUV2."""
width_coefficient: float = 1.0
depth_coefficient: float = 1.0
resolution: Union[int, Tuple[int, int]] = 224
dropout_rate: float = 0.1
stem_base_filters: int = 64
stem_kernel_size: int = 5
top_base_filters: int = 1280
conv_kernel_initializer: InitializerType = None
dense_kernel_initializer: InitializerType = None
blocks: Tuple[BlockConfig, ...] = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(
stem_base_filters, 24, 3, 1, 1, (1, 1), conv_type='full'),
BlockConfig.from_args(
24, 48, 3, 1, 8, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(
48, 48, 3, 1, 4, (1, 1), fused_expand=True, conv_type='group'),
BlockConfig.from_args(
48, 64, 3, 1, 8, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(
64, 64, 3, 1, 4, (1, 1), fused_expand=True, conv_type='group'),
BlockConfig.from_args(
64, 64, 3, 1, 4, (1, 1), fused_expand=True, conv_type='full'),
BlockConfig.from_args(
64, 64, 3, 1, 4, (1, 1), fused_expand=True, conv_type='group'),
BlockConfig.from_args(
64, 128, 3, 1, 8, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(128, 128, 3, 3, 4, (1, 1)),
BlockConfig.from_args(128, 160, 3, 1, 8, (1, 1)),
BlockConfig.from_args(160, 160, 3, 3, 4, (1, 1)),
BlockConfig.from_args(160, 192, 5, 1, 8, (2, 2)),
BlockConfig.from_args(192, 192, 5, 3, 4, (1, 1)),
BlockConfig.from_args(192, 256, 5, 1, 8, (1, 1)),
# pylint: enable=bad-whitespace
)
activation: str = 'relu'
batch_norm: str = 'default'
bn_momentum: float = 0.99
bn_epsilon: float = 1e-3
# While the original implementation used a weight decay of 1e-5,
# tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras
weight_decay: float = 5e-6
drop_connect_rate: float = 0.1
depth_divisor: int = 8
min_depth: Optional[int] = None
# No Squeeze/Excite for MobilenetEdgeTPUV2
use_se: bool = False
input_channels: int = 3
num_classes: int = 1001
model_name: str = 'mobilenet_edgetpu_v2'
rescale_input: bool = False
data_format: str = 'channels_last'
dtype: str = 'float32'
# The number of filters in each group. HW arch dependent.
group_base_size: int = 64
backbone_only: bool = False
features_as_dict: bool = False
def mobilenet_edgetpu_v2_base(
width_coefficient: float = 1.0,
depth_coefficient: float = 1.0,
stem_base_filters: int = 64,
stem_kernel_size: int = 5,
top_base_filters: int = 1280,
group_base_size: int = 64,
dropout_rate: float = 0.2,
drop_connect_rate: float = 0.1,
filter_size_overrides: Optional[Dict[int, int]] = None,
block_op_overrides: Optional[Dict[int, Dict[int, Dict[str, Any]]]] = None,
block_group_overrides: Optional[Dict[int, Dict[str, Any]]] = None,
topology: Optional[TopologyConfig] = None):
"""Creates MobilenetEdgeTPUV2 ModelConfig based on tuning parameters."""
config = ModelConfig()
param_overrides = {
'width_coefficient': width_coefficient,
'depth_coefficient': depth_coefficient,
'stem_base_filters': stem_base_filters,
'stem_kernel_size': stem_kernel_size,
'top_base_filters': top_base_filters,
'group_base_size': group_base_size,
'dropout_rate': dropout_rate,
'drop_connect_rate': drop_connect_rate
}
config = config.replace(**param_overrides)
topology_config = TopologyConfig() if topology is None else topology
if filter_size_overrides:
for group_id in filter_size_overrides:
topology_config.block_groups[group_id].filters = filter_size_overrides[
group_id]
if block_op_overrides:
for group_id in block_op_overrides:
for block_id in block_op_overrides[group_id]:
replaced_block = topology_config.block_groups[group_id].blocks[
block_id].replace(**block_op_overrides[group_id][block_id])
topology_config.block_groups[group_id].blocks[block_id] = replaced_block
if block_group_overrides:
for group_id in block_group_overrides:
replaced_group = topology_config.block_groups[group_id].replace(
**block_group_overrides[group_id])
topology_config.block_groups[group_id] = replaced_group
blocks = ()
input_filters = stem_base_filters
for group in topology_config.block_groups:
for block_search in group.blocks:
if block_search.op_type != BlockType.skip:
block = BlockConfig.from_search_config(
input_filters=input_filters,
output_filters=group.filters,
block_search_config=block_search)
blocks += (block,)
# Set input filters for the next block
input_filters = group.filters
config = config.replace(blocks=blocks)
return config
def autoseg_edgetpu_backbone_base(
width_coefficient: float = 1.0,
depth_coefficient: float = 1.0,
stem_base_filters: int = 64,
stem_kernel_size: int = 5,
top_base_filters: int = 1280,
group_base_size: int = 64,
dropout_rate: float = 0.2,
drop_connect_rate: float = 0.1,
blocks_overrides: Optional[Tuple[BlockConfig, ...]] = None):
"""Creates a edgetpu ModelConfig based on search on segmentation."""
config = ModelConfig()
config.depth_divisor = 4
param_overrides = {
'width_coefficient': width_coefficient,
'depth_coefficient': depth_coefficient,
'stem_base_filters': stem_base_filters,
'stem_kernel_size': stem_kernel_size,
'top_base_filters': top_base_filters,
'group_base_size': group_base_size,
'dropout_rate': dropout_rate,
'drop_connect_rate': drop_connect_rate,
}
if blocks_overrides:
param_overrides['blocks'] = blocks_overrides
config = config.replace(**param_overrides)
return config
def autoseg_edgetpu_backbone_s() -> ModelConfig:
"""AutoML searched model with 2.5ms target simulated latency."""
stem_base_filters = 32
stem_kernel_size = 3
top_base_filters = 1280
blocks = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(
stem_base_filters,
12,
3,
1,
1, (1, 1),
fused_expand=True,
conv_type='full'),
BlockConfig.from_args(
12, 36, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(36, 18, 5, 1, 3, (1, 1)),
BlockConfig.from_args(
18, 60, 5, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(60, 60, 3, 1, 3, (1, 1)),
BlockConfig.from_args(60, 120, 5, 1, 6, (2, 2)),
BlockConfig.from_args(120, 120, 3, 1, 3, (1, 1)),
BlockConfig.from_args(120, 120, 5, 1, 6, (1, 1)),
BlockConfig.from_args(120, 112, 3, 1, 6, (1, 1)),
BlockConfig.from_args(112, 112, 5, 2, 6, (1, 1)),
BlockConfig.from_args(112, 112, 5, 1, 1, (2, 2), id_skip=False),
BlockConfig.from_args(
112, 192, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 3, (1, 1)),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 192, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 160, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
# pylint: enable=bad-whitespace
)
return autoseg_edgetpu_backbone_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
blocks_overrides=blocks,
dropout_rate=0.2,
drop_connect_rate=0.2)
def autoseg_edgetpu_backbone_xs() -> ModelConfig:
"""AutoML searched model with 2ms target simulated latency."""
stem_base_filters = 32
stem_kernel_size = 3
top_base_filters = 1280
blocks = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(
stem_base_filters,
12,
3,
1,
1, (1, 1),
fused_expand=True,
conv_type='full'),
BlockConfig.from_args(
12, 24, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(24, 24, 3, 1, 3, (1, 1)),
BlockConfig.from_args(
24, 60, 3, 1, 3, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(60, 40, 3, 1, 6, (1, 1)),
BlockConfig.from_args(40, 40, 5, 1, 3, (2, 2)),
BlockConfig.from_args(40, 40, 3, 1, 6, (1, 1)),
BlockConfig.from_args(
40, 120, 3, 1, 6, (1, 1), fused_expand=True, conv_type='full'),
BlockConfig.from_args(120, 168, 3, 1, 6, (1, 1)),
BlockConfig.from_args(168, 84, 5, 1, 6, (1, 1)),
BlockConfig.from_args(84, 84, 5, 1, 3, (1, 1)),
BlockConfig.from_args(84, 84, 5, 1, 1, (2, 2), id_skip=False),
BlockConfig.from_args(
84, 288, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(288, 288, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
288, 96, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 480, 5, 1, 3, (1, 1)),
# pylint: enable=bad-whitespace
)
return autoseg_edgetpu_backbone_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
blocks_overrides=blocks,
dropout_rate=0.2,
drop_connect_rate=0.2)
def autoseg_edgetpu_backbone_m() -> ModelConfig:
"""AutoML searched model with 3ms target simulated latency."""
stem_base_filters = 32
stem_kernel_size = 3
top_base_filters = 1280
blocks = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(stem_base_filters, 16, 5, 1, 1, (1, 1)),
BlockConfig.from_args(
16, 36, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(36, 36, 3, 1, 3, (1, 1)),
BlockConfig.from_args(
36, 60, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(60, 60, 3, 1, 6, (1, 1)),
BlockConfig.from_args(
60, 120, 5, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(120, 120, 5, 1, 6, (1, 1)),
BlockConfig.from_args(
120, 80, 3, 1, 6, (1, 1), fused_expand=True, conv_type='full'),
BlockConfig.from_args(80, 168, 3, 1, 6, (1, 1)),
BlockConfig.from_args(168, 168, 5, 1, 6, (1, 1)),
BlockConfig.from_args(168, 168, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
168, 168, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(168, 168, 3, 1, 1, (2, 2), id_skip=False),
BlockConfig.from_args(
168, 192, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 288, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(288, 288, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
288, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 192, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 320, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
# pylint: enable=bad-whitespace
)
return autoseg_edgetpu_backbone_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
blocks_overrides=blocks,
dropout_rate=0.3,
drop_connect_rate=0.3)
def mobilenet_edgetpu_v2_tiny() -> ModelConfig:
"""MobilenetEdgeTPUV2 tiny model config."""
stem_base_filters = 32
stem_kernel_size = 5
top_base_filters = 1280
filter_sizes = [16, 32, 48, 80, 112, 160, 192]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
block_op_overrides = {
2: {
0: {'op_type': BlockType.from_args('ibn_fused_grouped')},
2: {'op_type': BlockType.from_args('ibn_fused_grouped')},
},
3: {
0: {'op_type': BlockType.from_args('ibn_fused_grouped')},
}
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides,
block_op_overrides=block_op_overrides,
dropout_rate=0.05,
drop_connect_rate=0.05)
def mobilenet_edgetpu_v2_xs() -> ModelConfig:
"""MobilenetEdgeTPUV2 extra small model config."""
stem_base_filters = 32
stem_kernel_size = 5
top_base_filters = 1280
filter_sizes = [16, 32, 48, 96, 144, 160, 192]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides,
dropout_rate=0.05,
drop_connect_rate=0.05)
def mobilenet_edgetpu_v2_s():
"""MobilenetEdgeTPUV2 small model config."""
stem_base_filters = 64
stem_kernel_size = 5
top_base_filters = 1280
filter_sizes = [24, 48, 64, 128, 160, 192, 256]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides)
def mobilenet_edgetpu_v2_m():
"""MobilenetEdgeTPUV2 medium model config."""
stem_base_filters = 64
stem_kernel_size = 5
top_base_filters = 1344
filter_sizes = [32, 64, 80, 160, 192, 240, 320]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides)
def mobilenet_edgetpu_v2_l():
"""MobilenetEdgeTPUV2 large model config."""
stem_base_filters = 64
stem_kernel_size = 7
top_base_filters = 1408
filter_sizes = [32, 64, 96, 192, 240, 256, 384]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
group_base_size = 128
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
group_base_size=group_base_size,
filter_size_overrides=filter_size_overrides)
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# Note: this is a truncated normal distribution
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def round_filters(filters: int,
config: ModelConfig) -> int:
"""Round number of filters based on width coefficient."""
width_coefficient = config.width_coefficient
min_depth = config.min_depth
divisor = config.depth_divisor
orig_filters = filters
if not width_coefficient:
return filters
filters *= width_coefficient
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
logging.info('round_filter input=%s output=%s', orig_filters, new_filters)
return int(new_filters)
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Round number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
def groupconv2d_block(conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
group_size: Optional[int] = None,
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
name: Optional[str] = None) -> tf.keras.layers.Layer:
"""2D group convolution with batchnorm and activation."""
batch_norm = common_modules.get_batch_norm(config.batch_norm)
bn_momentum = config.bn_momentum
bn_epsilon = config.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
if group_size is None:
group_size = config.group_base_size
name = name or ''
# Compute the # of groups
if conv_filters % group_size != 0:
raise ValueError(f'Number of filters: {conv_filters} is not divisible by '
f'size of the groups: {group_size}')
groups = int(conv_filters / group_size)
# Collect args based on what kind of groupconv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_groupconv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
'filters': conv_filters,
'groups': groups,
'batch_norm_layer': batch_norm if use_batch_norm else None,
'bn_epsilon': bn_epsilon,
'bn_momentum': bn_momentum,
'activation': activation,
'data_format': data_format,
}
return custom_layers.GroupConv2D(**init_kwargs)
def conv2d_block_as_layers(
conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
kernel_initializer: InitializerType = None,
name: Optional[str] = None) -> List[tf.keras.layers.Layer]:
"""A conv2d followed by batch norm and an activation."""
batch_norm = common_modules.get_batch_norm(config.batch_norm)
bn_momentum = config.bn_momentum
bn_epsilon = config.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
name = name or ''
# Collect args based on what kind of conv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_conv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
}
sequential_layers: List[tf.keras.layers.Layer] = []
if depthwise:
conv2d = tf.keras.layers.DepthwiseConv2D
init_kwargs.update({'depthwise_initializer': kernel_initializer})
else:
conv2d = tf.keras.layers.Conv2D
init_kwargs.update({
'filters': conv_filters,
'kernel_initializer': kernel_initializer
})
sequential_layers.append(conv2d(**init_kwargs))
if use_batch_norm:
bn_axis = 1 if data_format == 'channels_first' else -1
sequential_layers.append(
batch_norm(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + '_bn'))
if activation is not None:
sequential_layers.append(
tf.keras.layers.Activation(activation, name=name + '_activation'))
return sequential_layers
def conv2d_block(inputs: tf.Tensor,
conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
kernel_initializer: Optional[InitializerType] = None,
name: Optional[str] = None) -> tf.Tensor:
"""Compatibility with third_party/car/deep_nets."""
x = inputs
for layer in conv2d_block_as_layers(
conv_filters=conv_filters,
config=config,
kernel_size=kernel_size,
strides=strides,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
activation=activation,
depthwise=depthwise,
kernel_initializer=kernel_initializer,
name=name):
x = layer(x)
return x
# Do not inherit from (tf.keras.layers.Layer), will break weights loading.
class _MbConvBlock:
"""Mobile Inverted Residual Bottleneck composite layer."""
def __call__(self, inputs: tf.Tensor, training=False):
x = inputs
for layer in self.expand_block:
x = layer(x)
if self.squeeze_excitation:
se = x
for layer in self.squeeze_excitation:
se = layer(se)
x = tf.keras.layers.multiply([x, se], name=self.name + 'se_excite')
for layer in self.project_block:
x = layer(x)
if self.has_skip_add:
x = tf.keras.layers.add([x, inputs], name=self.name + 'add')
return x
def __init__(self,
block: BlockConfig,
config: ModelConfig,
prefix: Optional[str] = None):
"""Mobile Inverted Residual Bottleneck.
Args:
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
"""
use_se = config.use_se
activation = tf_utils.get_activation(config.activation)
drop_connect_rate = config.drop_connect_rate
data_format = tf.keras.backend.image_data_format()
use_depthwise = block.conv_type == 'depthwise'
use_groupconv = block.conv_type == 'group'
prefix = prefix or ''
self.name = prefix
conv_kernel_initializer = (
config.conv_kernel_initializer if config.conv_kernel_initializer
is not None else CONV_KERNEL_INITIALIZER)
filters = block.input_filters * block.expand_ratio
self.expand_block: List[tf.keras.layers.Layer] = []
self.squeeze_excitation: List[tf.keras.layers.Layer] = []
self.project_block: List[tf.keras.layers.Layer] = []
if block.fused_project:
raise NotImplementedError('Fused projection is not supported.')
if block.fused_expand and block.expand_ratio != 1:
# If we use fused mbconv, fuse expansion with the main kernel.
# If conv_type is depthwise we still fuse it to a full conv.
if use_groupconv:
self.expand_block.append(groupconv2d_block(
filters,
config,
kernel_size=block.kernel_size,
strides=block.strides,
group_size=block.group_size,
activation=activation,
name=prefix + 'fused'))
else:
self.expand_block.extend(
conv2d_block_as_layers(
conv_filters=filters,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'fused'))
else:
if block.expand_ratio != 1:
# Expansion phase with a pointwise conv
self.expand_block.extend(
conv2d_block_as_layers(
conv_filters=filters,
config=config,
kernel_size=(1, 1),
activation=activation,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'expand'))
# Main kernel, after the expansion (if applicable, i.e. not fused).
if use_depthwise:
self.expand_block.extend(conv2d_block_as_layers(
conv_filters=filters,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
kernel_initializer=conv_kernel_initializer,
depthwise=True,
name=prefix + 'depthwise'))
elif use_groupconv:
self.expand_block.append(groupconv2d_block(
conv_filters=filters,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
group_size=block.group_size,
activation=activation,
name=prefix + 'group'))
# Squeeze and Excitation phase
if use_se:
assert block.se_ratio is not None
assert 0 < block.se_ratio <= 1
num_reduced_filters = max(1, int(
block.input_filters * block.se_ratio
))
if data_format == 'channels_first':
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
self.squeeze_excitation.append(
tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze'))
self.squeeze_excitation.append(
tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape'))
self.squeeze_excitation.extend(
conv2d_block_as_layers(
conv_filters=num_reduced_filters,
config=config,
use_bias=True,
use_batch_norm=False,
activation=activation,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'se_reduce'))
self.squeeze_excitation.extend(
conv2d_block_as_layers(
conv_filters=filters,
config=config,
use_bias=True,
use_batch_norm=False,
activation='sigmoid',
kernel_initializer=conv_kernel_initializer,
name=prefix + 'se_expand'))
# Output phase
self.project_block.extend(
conv2d_block_as_layers(
conv_filters=block.output_filters,
config=config,
activation=None,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'project'))
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
self.project_block.append(
tf.keras.layers.Activation('linear', name=prefix + 'id'))
self.has_skip_add = False
if (block.id_skip
and all(s == 1 for s in block.strides)
and block.input_filters == block.output_filters):
self.has_skip_add = True
if drop_connect_rate and drop_connect_rate > 0:
# Apply dropconnect
# The only difference between dropout and dropconnect in TF is scaling
# by drop_connect_rate during training. See:
# https://github.com/keras-team/keras/pull/9898#issuecomment-380577612
self.project_block.append(
tf.keras.layers.Dropout(
drop_connect_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop'))
def mb_conv_block(inputs: tf.Tensor,
block: BlockConfig,
config: ModelConfig,
prefix: Optional[str] = None) -> tf.Tensor:
"""Mobile Inverted Residual Bottleneck.
Args:
inputs: the Keras input to the block
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
Returns:
the output of the block
"""
return _MbConvBlock(block, config, prefix)(inputs)
def mobilenet_edgetpu_v2(image_input: tf.keras.layers.Input,
config: ModelConfig): # pytype: disable=invalid-annotation # typed-keras
"""Creates a MobilenetEdgeTPUV2 graph given the model parameters.
This function is wrapped by the `MobilenetEdgeTPUV2` class to make a
tf.keras.Model.
Args:
image_input: the input batch of images
config: the model config
Returns:
The output of classification model or if backbone is needed, dictionary with
backbone feature levels.
"""
depth_coefficient = config.depth_coefficient
blocks = config.blocks
stem_base_filters = config.stem_base_filters
stem_kernel_size = config.stem_kernel_size
top_base_filters = config.top_base_filters
activation = tf_utils.get_activation(config.activation)
dropout_rate = config.dropout_rate
drop_connect_rate = config.drop_connect_rate
conv_kernel_initializer = (
config.conv_kernel_initializer if config.conv_kernel_initializer
is not None else CONV_KERNEL_INITIALIZER)
dense_kernel_initializer = (
config.dense_kernel_initializer if config.dense_kernel_initializer
is not None else DENSE_KERNEL_INITIALIZER)
num_classes = config.num_classes
input_channels = config.input_channels
rescale_input = config.rescale_input
data_format = tf.keras.backend.image_data_format()
dtype = config.dtype
weight_decay = config.weight_decay
x = image_input
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
x = common_modules.normalize_images(
x, num_channels=input_channels, dtype=dtype, data_format=data_format)
# Build stem
x = conv2d_block(
inputs=x,
conv_filters=round_filters(stem_base_filters, config),
config=config,
kernel_size=[stem_kernel_size, stem_kernel_size],
strides=[2, 2],
activation=activation,
kernel_initializer=conv_kernel_initializer,
name='stem')
# Build blocks
num_blocks_total = sum(block.num_repeat for block in blocks)
block_num = 0
backbone_levels = []
for stack_idx, block in enumerate(blocks):
is_reduction = False
assert block.num_repeat > 0
# Update block input and output filters based on depth multiplier
block = block.replace(
input_filters=round_filters(block.input_filters, config),
output_filters=round_filters(block.output_filters, config),
num_repeat=round_repeats(block.num_repeat, depth_coefficient))
if stack_idx == 0:
backbone_levels.append(x)
elif (stack_idx == len(blocks) - 1) or (blocks[stack_idx + 1].strides
== (2, 2)):
is_reduction = True
# The first block needs to take care of stride and filter size increase
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = _MbConvBlock(block, config, block_prefix)(x)
block_num += 1
if block.num_repeat > 1:
block = block.replace(
input_filters=block.output_filters,
strides=[1, 1]
)
for block_idx in range(block.num_repeat - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = _MbConvBlock(block, config, prefix=block_prefix)(x)
block_num += 1
if is_reduction:
backbone_levels.append(x)
if config.backbone_only:
return backbone_levels
# Build top
x = conv2d_block(
inputs=x,
conv_filters=round_filters(top_base_filters, config),
config=config,
activation=activation,
kernel_initializer=conv_kernel_initializer,
name='top')
# Build classifier
pool_size = (x.shape.as_list()[1], x.shape.as_list()[2])
x = tf.keras.layers.AveragePooling2D(pool_size, name='top_pool')(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Conv2D(
num_classes,
1,
kernel_initializer=dense_kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(
x)
x = tf.keras.layers.Activation('softmax', name='probs')(x)
x = tf.squeeze(x, axis=[1, 2])
return x
| 40,716 | 35.225089 | 99 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilenet_edgetpu_v2_model_blocks."""
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import custom_layers
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model_blocks
class MobilenetEdgetpuV2ModelBlocksTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.model_config = mobilenet_edgetpu_v2_model_blocks.ModelConfig()
def test_model_creatation(self):
model_input = tf.keras.layers.Input(shape=(224, 224, 1))
model_output = mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2(
image_input=model_input,
config=self.model_config)
test_model = tf.keras.Model(inputs=model_input, outputs=model_output)
self.assertIsInstance(test_model, tf.keras.Model)
self.assertEqual(test_model.input.shape, (None, 224, 224, 1))
self.assertEqual(test_model.output.shape, (None, 1001))
def test_model_with_customized_kernel_initializer(self):
self.model_config.conv_kernel_initializer = 'he_uniform'
self.model_config.dense_kernel_initializer = 'glorot_normal'
model_input = tf.keras.layers.Input(shape=(224, 224, 1))
model_output = mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2(
image_input=model_input,
config=self.model_config)
test_model = tf.keras.Model(inputs=model_input, outputs=model_output)
conv_layer_stack = []
for layer in test_model.layers:
if (isinstance(layer, tf.keras.layers.Conv2D) or
isinstance(layer, tf.keras.layers.DepthwiseConv2D) or
isinstance(layer, custom_layers.GroupConv2D)):
conv_layer_stack.append(layer)
self.assertGreater(len(conv_layer_stack), 2)
# The last Conv layer is used as a Dense layer.
for layer in conv_layer_stack[:-1]:
if isinstance(layer, custom_layers.GroupConv2D):
self.assertIsInstance(layer.kernel_initializer,
tf.keras.initializers.GlorotUniform)
elif isinstance(layer, tf.keras.layers.Conv2D):
self.assertIsInstance(layer.kernel_initializer,
tf.keras.initializers.HeUniform)
elif isinstance(layer, tf.keras.layers.DepthwiseConv2D):
self.assertIsInstance(layer.depthwise_initializer,
tf.keras.initializers.HeUniform)
self.assertIsInstance(conv_layer_stack[-1].kernel_initializer,
tf.keras.initializers.GlorotNormal)
if __name__ == '__main__':
tf.test.main()
| 3,102 | 41.506849 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilenet_edgetpu model."""
import os
from absl.testing import parameterized
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model
class MobilenetEdgeTPUV2BuildTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def test_create_mobilenet_edgetpu(self):
model = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2()
self.assertEqual(common_modules.count_params(model), 6069657)
def test_export_tflite(self):
model = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2()
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tmp_dir = self.create_tempdir()
output_tflite = os.path.join(tmp_dir, 'model_quant.tflite')
tflite_buffer = converter.convert()
tf.io.gfile.GFile(output_tflite, 'wb').write(tflite_buffer)
self.assertTrue(tf.io.gfile.exists(output_tflite))
def test_model_save_load(self):
"""Serializes and de-serializeds the model."""
model_builder = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2
model = model_builder.from_name(model_name='mobilenet_edgetpu_v2')
# Model always has a conv2d layer followed by the input layer, and we
# compare the weight parameters of this layers for the original model and
# the save-then-load model.
first_conv_layer = model.get_layer('stem_conv2d')
kernel_tensor = first_conv_layer.trainable_weights[0].numpy()
model.save('/tmp/test_model')
loaded_model = tf.keras.models.load_model('/tmp/test_model')
loaded_first_conv_layer = loaded_model.get_layer('stem_conv2d')
loaded_kernel_tensor = loaded_first_conv_layer.trainable_weights[0].numpy()
self.assertAllClose(kernel_tensor, loaded_kernel_tensor)
def test_model_initialization_failure(self):
"""Tests model can only be initialized with predefined model name."""
model_builder = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2
with self.assertRaises(ValueError):
_ = model_builder.from_name(model_name='undefined_model_name')
if __name__ == '__main__':
tf.test.main()
| 2,909 | 39.416667 | 80 | py |
models | models-master/official/projects/edgetpu/vision/modeling/optimized_multiheadattention_layer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MultiHeadAttention layer optimized for EdgeTPU.
Compared to tf.keras.layers.MultiHeadAttention, this layer performs query-key
multiplication instead of key-query multiplication to remove an unnecessary
transpose.
"""
import math
import string
from typing import Optional, Tuple
import numpy as np
import tensorflow as tf
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(
rank: int, attn_axes: Tuple[int, ...]) -> Tuple[str, str, int]:
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch
dims>, <query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`, that attention will be
applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = "%s,%s->%s" % (
target_notation,
source_notation,
product_notation,
)
attn_scores_rank = len(product_notation)
combine_equation = "%s,%s->%s" % (
product_notation,
source_notation,
target_notation,
)
return dot_product_equation, combine_equation, attn_scores_rank
class OptimizedMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""MultiHeadAttention with query-key multiplication.
Currently, this layer only works for self-attention but not for
cross-attention. TODO(b/243166060).
"""
def _build_attention(self, rank: int) -> None:
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
(
self._dot_product_equation,
self._combine_equation,
attn_scores_rank,
) = _build_attention_equation(
rank, attn_axes=self._attention_axes)
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = tf.keras.layers.Softmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
def _compute_attention(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for
customized attention implementation.
Args:
query: Projected query `Tensor` of shape `(B, T, N, key_dim)`.
key: Projected key `Tensor` of shape `(B, S, N, key_dim)`.
value: Projected value `Tensor` of shape `(B, S, N, value_dim)`.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. It is generally not needed if the
`query` and `value` (and/or `key`) are masked.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum(self._dot_product_equation, query, key)
attention_scores = self._masked_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training)
# `context_layer` = [B, T, N, H]
attention_output = tf.einsum(self._combine_equation,
attention_scores_dropout, value)
return attention_output, attention_scores
| 6,277 | 37.048485 | 78 | py |
models | models-master/official/projects/edgetpu/vision/modeling/common_modules.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common modeling utilities."""
from typing import Optional, Tuple
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from tensorflow.python.tpu import tpu_function # pylint: disable=g-direct-tensorflow-import
MEAN_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)
STDDEV_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)
@tf.keras.utils.register_keras_serializable(package='Vision')
class TpuBatchNormalization(tf.keras.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, fused: Optional[bool] = False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t: tf.Tensor, num_shards_per_group: int):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError(
'num_shards: %d mod shards_per_group: %d, should be 0' %
(num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf1.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
def _moments(self,
inputs: tf.Tensor,
reduction_axes: int,
keep_dims: int,
mask: Optional[tf.Tensor] = None):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims, mask=mask)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
if num_shards_per_group > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
def get_batch_norm(batch_norm_type: str) -> tf.keras.layers.BatchNormalization:
"""A helper to create a batch normalization getter.
Args:
batch_norm_type: The type of batch normalization layer implementation. `tpu`
will use `TpuBatchNormalization`.
Returns:
An instance of `tf.keras.layers.BatchNormalization`.
"""
if batch_norm_type == 'tpu':
return TpuBatchNormalization
return tf.keras.layers.BatchNormalization # pytype: disable=bad-return-type # typed-keras
def count_params(model, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return model.count_params()
else:
return int(np.sum([tf.keras.backend.count_params(p)
for p in model.trainable_weights]))
def load_weights(model: tf.keras.Model,
model_weights_path: str,
checkpoint_format: str = 'tf_checkpoint'):
"""Load model weights from the given file path.
Args:
model: the model to load weights into
model_weights_path: the path of the model weights
checkpoint_format: The source of checkpoint files. By default, we assume the
checkpoint is saved by tf.train.Checkpoint().save(). For legacy reasons,
we can also resotre checkpoint from keras model.save_weights() method by
setting checkpoint_format = 'keras_checkpoint'.
"""
if checkpoint_format == 'tf_checkpoint':
checkpoint_dict = {'model': model}
checkpoint = tf.train.Checkpoint(**checkpoint_dict)
checkpoint.restore(model_weights_path).assert_existing_objects_matched()
elif checkpoint_format == 'keras_checkpoint':
# Assert makes sure load is successeful.
model.load_weights(model_weights_path).assert_existing_objects_matched()
else:
raise ValueError(f'Unsupported checkpoint format {checkpoint_format}.')
def normalize_images(
features: tf.Tensor,
num_channels: int = 3,
dtype: str = 'float32',
data_format: str = 'channels_last',
mean_rgb: Tuple[float, ...] = MEAN_RGB,
stddev_rgb: Tuple[float, ...] = STDDEV_RGB,
) -> tf.Tensor:
"""Normalizes the input image channels with the given mean and stddev.
Args:
features: `Tensor` representing decoded images in float format.
num_channels: the number of channels in the input image tensor.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
data_format: the format of the input image tensor ['channels_first',
'channels_last'].
mean_rgb: the mean of the channels to subtract.
stddev_rgb: the stddev of the channels to divide.
Returns:
A normalized image `Tensor`.
"""
if data_format == 'channels_first':
stats_shape = [num_channels, 1, 1]
else:
stats_shape = [1, 1, num_channels]
if dtype is not None:
if dtype == 'bfloat16':
features = tf.image.convert_image_dtype(features, dtype=tf.bfloat16)
if mean_rgb is not None:
mean_rgb = tf.constant(mean_rgb, shape=stats_shape, dtype=features.dtype)
mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features))
features = features - mean_rgb
if stddev_rgb is not None:
stddev_rgb = tf.constant(
stddev_rgb, shape=stats_shape, dtype=features.dtype)
stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features))
features = features / stddev_rgb
return features
| 6,720 | 37.849711 | 93 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPU image classification models."""
import dataclasses
import math
from typing import Any, Optional, Tuple, Union
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.projects.edgetpu.vision.modeling import common_modules
@dataclasses.dataclass
class BlockConfig(base_config.Config):
"""Config for a single MB Conv Block."""
input_filters: int = 0
output_filters: int = 0
kernel_size: int = 3
num_repeat: int = 1
expand_ratio: int = 1
strides: Tuple[int, int] = (1, 1)
se_ratio: Optional[float] = None
id_skip: bool = True
fused_conv: bool = False
conv_type: str = 'depthwise'
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""Default Config for MobilenetEdgeTPU."""
width_coefficient: float = 1.0
depth_coefficient: float = 1.0
resolution: Union[int, Tuple[int, int]] = 224
dropout_rate: float = 0.1
blocks: Tuple[BlockConfig, ...] = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(32, 16, 3, 1, 1, (1, 1), conv_type='no_depthwise'),
BlockConfig.from_args(16, 32, 3, 1, 8, (2, 2), fused_conv=True),
BlockConfig.from_args(32, 32, 3, 3, 4, (1, 1), conv_type='no_depthwise'),
BlockConfig.from_args(32, 48, 3, 1, 8, (2, 2), fused_conv=True),
BlockConfig.from_args(48, 48, 3, 3, 4, (1, 1), conv_type='no_depthwise'),
BlockConfig.from_args(48, 96, 3, 1, 8, (2, 2)),
BlockConfig.from_args(96, 96, 3, 3, 4, (1, 1)),
BlockConfig.from_args(96, 96, 3, 1, 8, (1, 1), id_skip=False),
BlockConfig.from_args(96, 96, 3, 3, 4, (1, 1)),
BlockConfig.from_args(96, 160, 5, 1, 8, (2, 2)),
BlockConfig.from_args(160, 160, 5, 3, 4, (1, 1)),
BlockConfig.from_args(160, 192, 3, 1, 8, (1, 1)),
# pylint: enable=bad-whitespace
)
stem_base_filters: int = 32
top_base_filters: int = 1280
activation: str = 'relu'
batch_norm: str = 'default'
bn_momentum: float = 0.99
bn_epsilon: float = 1e-3
# While the original implementation used a weight decay of 1e-5,
# tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras
weight_decay: float = 5e-6
drop_connect_rate: float = 0.1
depth_divisor: int = 8
min_depth: Optional[int] = None
# No Squeeze/Excite for MobilenetEdgeTPU
use_se: bool = False
input_channels: int = 3
num_classes: int = 1001
model_name: str = 'mobilenet_edgetpu'
rescale_input: bool = False
data_format: str = 'channels_last'
dtype: str = 'float32'
backbone_only: bool = False
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# Note: this is a truncated normal distribution
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
# TODO(longy): Reuse the utility functions for V1/V2 models.
def round_filters(filters: int,
config: ModelConfig) -> int:
"""Round number of filters based on width coefficient."""
width_coefficient = config.width_coefficient
min_depth = config.min_depth
divisor = config.depth_divisor
orig_filters = filters
if not width_coefficient:
return filters
filters *= width_coefficient
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
logging.info('round_filter input=%s output=%s', orig_filters, new_filters)
return int(new_filters)
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Round number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
def conv2d_block(inputs: tf.Tensor,
conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
name: Optional[str] = None):
"""A conv2d followed by batch norm and an activation."""
batch_norm = common_modules.get_batch_norm(config.batch_norm)
bn_momentum = config.bn_momentum
bn_epsilon = config.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
name = name or ''
# Collect args based on what kind of conv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_conv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
}
if depthwise:
conv2d = tf.keras.layers.DepthwiseConv2D
init_kwargs.update({'depthwise_initializer': CONV_KERNEL_INITIALIZER})
else:
conv2d = tf.keras.layers.Conv2D
init_kwargs.update({'filters': conv_filters,
'kernel_initializer': CONV_KERNEL_INITIALIZER})
x = conv2d(**init_kwargs)(inputs)
if use_batch_norm:
bn_axis = 1 if data_format == 'channels_first' else -1
x = batch_norm(axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + '_bn')(x)
if activation is not None:
x = tf.keras.layers.Activation(activation,
name=name + '_activation')(x)
return x
def mb_conv_block(inputs: tf.Tensor,
block: BlockConfig,
config: ModelConfig,
prefix: Optional[str] = None):
"""Mobile Inverted Residual Bottleneck.
Args:
inputs: the Keras input to the block
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
Returns:
the output of the block
"""
use_se = config.use_se
activation = tf_utils.get_activation(config.activation)
drop_connect_rate = config.drop_connect_rate
data_format = tf.keras.backend.image_data_format()
use_depthwise = block.conv_type == 'depthwise'
prefix = prefix or ''
filters = block.input_filters * block.expand_ratio
x = inputs
if block.fused_conv:
# If we use fused mbconv, skip expansion and use regular conv.
x = conv2d_block(x,
filters,
config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
name=prefix + 'fused')
else:
if block.expand_ratio != 1:
# Expansion phase
kernel_size = (1, 1) if use_depthwise else (3, 3)
x = conv2d_block(x,
filters,
config,
kernel_size=kernel_size,
activation=activation,
name=prefix + 'expand')
# Depthwise Convolution
if use_depthwise:
x = conv2d_block(x,
conv_filters=None,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
depthwise=True,
name=prefix + 'depthwise')
# Squeeze and Excitation phase
if use_se:
assert block.se_ratio is not None
assert 0 < block.se_ratio <= 1
num_reduced_filters = max(1, int(
block.input_filters * block.se_ratio
))
if data_format == 'channels_first':
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x)
se = tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape')(se)
se = conv2d_block(se,
num_reduced_filters,
config,
use_bias=True,
use_batch_norm=False,
activation=activation,
name=prefix + 'se_reduce')
se = conv2d_block(se,
filters,
config,
use_bias=True,
use_batch_norm=False,
activation='sigmoid',
name=prefix + 'se_expand')
x = tf.keras.layers.multiply([x, se], name=prefix + 'se_excite')
# Output phase
x = conv2d_block(x,
block.output_filters,
config,
activation=None,
name=prefix + 'project')
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.keras.layers.Activation('linear', name=prefix + 'id')(x)
if (block.id_skip
and all(s == 1 for s in block.strides)
and block.input_filters == block.output_filters):
if drop_connect_rate and drop_connect_rate > 0:
# Apply dropconnect
# The only difference between dropout and dropconnect in TF is scaling by
# drop_connect_rate during training. See:
# https://github.com/keras-team/keras/pull/9898#issuecomment-380577612
x = tf.keras.layers.Dropout(drop_connect_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop')(x)
x = tf.keras.layers.add([x, inputs], name=prefix + 'add')
return x
def mobilenet_edgetpu(image_input: tf.keras.layers.Input, config: ModelConfig): # pytype: disable=invalid-annotation # typed-keras
"""Creates a MobilenetEdgeTPU graph given the model parameters.
This function is wrapped by the `MobilenetEdgeTPU` class to make a
tf.keras.Model.
Args:
image_input: the input batch of images
config: the model config
Returns:
The output of clossification model or if backbone is needed, dictionary with
backbone feature levels.
"""
depth_coefficient = config.depth_coefficient
blocks = config.blocks
stem_base_filters = config.stem_base_filters
top_base_filters = config.top_base_filters
activation = tf_utils.get_activation(config.activation)
dropout_rate = config.dropout_rate
drop_connect_rate = config.drop_connect_rate
num_classes = config.num_classes
input_channels = config.input_channels
rescale_input = config.rescale_input
data_format = tf.keras.backend.image_data_format()
dtype = config.dtype
weight_decay = config.weight_decay
x = image_input
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
x = common_modules.normalize_images(
x, num_channels=input_channels, dtype=dtype, data_format=data_format)
# Build stem
x = conv2d_block(x,
round_filters(stem_base_filters, config),
config,
kernel_size=[3, 3],
strides=[2, 2],
activation=activation,
name='stem')
# Build blocks
num_blocks_total = sum(block.num_repeat for block in blocks)
block_num = 0
backbone_levels = {}
for stack_idx, block in enumerate(blocks):
assert block.num_repeat > 0
# Update block input and output filters based on depth multiplier
block = block.replace(
input_filters=round_filters(block.input_filters, config),
output_filters=round_filters(block.output_filters, config),
num_repeat=round_repeats(block.num_repeat, depth_coefficient))
# The first block needs to take care of stride and filter size increase
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = mb_conv_block(x, block, config, block_prefix)
block_num += 1
if block.num_repeat > 1:
block = block.replace(
input_filters=block.output_filters,
strides=[1, 1]
)
for block_idx in range(block.num_repeat - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = mb_conv_block(x, block, config, prefix=block_prefix)
block_num += 1
backbone_levels[str(stack_idx)] = x
if config.backbone_only:
return backbone_levels
# Build top
x = conv2d_block(x,
round_filters(top_base_filters, config),
config,
activation=activation,
name='top')
# Build classifier
pool_size = (x.shape.as_list()[1], x.shape.as_list()[2])
x = tf.keras.layers.AveragePooling2D(pool_size, name='top_pool')(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Conv2D(
num_classes,
1,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(
x)
x = tf.keras.layers.Activation('softmax', name='probs')(x)
x = tf.squeeze(x, axis=[1, 2])
return x
| 14,355 | 33.676329 | 132 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPU image classification models."""
from typing import Any, Dict, Optional, Text
# Import libraries
from absl import logging
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model_blocks
ModelConfig = mobilenet_edgetpu_v1_model_blocks.ModelConfig
MODEL_CONFIGS = {
# (width, depth, resolution, dropout)
'mobilenet_edgetpu': ModelConfig.from_args(1.0, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm0p75': ModelConfig.from_args(0.75, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm1p25': ModelConfig.from_args(1.25, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm1p5': ModelConfig.from_args(1.5, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm1p75': ModelConfig.from_args(1.75, 1.0, 224, 0.1)
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobilenetEdgeTPU(tf.keras.Model):
"""Wrapper class for a MobilenetEdgeTPU Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
config: Optional[ModelConfig] = None,
overrides: Optional[Dict[Text, Any]] = None):
"""Create a MobilenetEdgeTPU model.
Args:
config: (optional) the main model parameters to create the model
overrides: (optional) a dict containing keys that can override config
"""
overrides = overrides or {}
config = config or ModelConfig()
self.config = config.replace(**overrides)
input_channels = self.config.input_channels
model_name = self.config.model_name
if isinstance(self.config.resolution, tuple):
input_shape = (self.config.resolution[0], self.config.resolution[1],
input_channels)
else:
input_shape = (self.config.resolution, self.config.resolution,
input_channels)
image_input = tf.keras.layers.Input(shape=input_shape)
output = mobilenet_edgetpu_v1_model_blocks.mobilenet_edgetpu(
image_input, self.config)
if not isinstance(output, dict):
# Cast to float32 in case we have a different model dtype
output = tf.cast(output, tf.float32)
self._output_specs = output.get_shape()
else:
self._output_specs = {
feature: output[feature].get_shape() for feature in output
}
logging.info('Building model %s with params %s',
model_name,
self.config)
super(MobilenetEdgeTPU, self).__init__(
inputs=image_input, outputs=output, name=model_name)
@classmethod
def from_name(cls,
model_name: str,
model_weights_path: Optional[str] = None,
checkpoint_format: Optional[str] = 'tf_checkpoint',
overrides: Optional[Dict[str, Any]] = None):
"""Construct an MobilenetEdgeTPU model from a predefined model name.
E.g., `MobilenetEdgeTPU.from_name('mobilenet_edgetpu')`.
Args:
model_name: the predefined model name
model_weights_path: the path to the weights (h5 file or saved model dir)
checkpoint_format: the model weights format. One of 'tf_checkpoint' or
'keras_checkpoint'.
overrides: (optional) a dict containing keys that can override config
Returns:
A constructed EfficientNet instance.
"""
model_configs = dict(MODEL_CONFIGS)
overrides = dict(overrides) if overrides else {}
# One can define their own custom models if necessary
model_configs.update(overrides.pop('model_config', {}))
if model_name not in model_configs:
raise ValueError('Unknown model name {}'.format(model_name))
config = model_configs[model_name]
model = cls(config=config, overrides=overrides)
if model_weights_path:
common_modules.load_weights(model,
model_weights_path,
checkpoint_format=checkpoint_format)
return model
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| 4,742 | 35.206107 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/custom_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized keras layers used in the EdgeTPU models."""
from collections.abc import MutableMapping
import inspect
from typing import Any, Optional, Union
import tensorflow as tf
from official.modeling import tf_utils
class GroupConv2D(tf.keras.layers.Conv2D):
"""2D group convolution as a Keras Layer."""
def __init__(self,
filters: int,
kernel_size: Union[int, tuple[int, int]],
groups: int,
strides: tuple[int, int] = (1, 1),
padding: str = 'valid',
data_format: str = 'channels_last',
dilation_rate: tuple[int, int] = (1, 1),
activation: Any = None,
use_bias: bool = True,
kernel_initializer: Any = 'glorot_uniform',
bias_initializer: Any = 'zeros',
kernel_regularizer: Any = None,
bias_regularizer: Any = None,
activity_regularizer: Any = None,
kernel_constraint: Any = None,
bias_constraint: Any = None,
batch_norm_layer: Optional[tf.keras.layers.Layer] = None,
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
**kwargs: Any) -> tf.keras.layers.Layer:
"""Creates a 2D group convolution keras layer.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
groups: The number of input/output channel groups.
strides: An integer or tuple/list of n integers, specifying the stride
length of the convolution. Specifying any stride value != 1 is
incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: The ordering of the dimensions in the inputs. `channels_last`
corresponds to inputs with shape `(batch_size, height, width, channels)`
dilation_rate: an integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any
stride value != 1.
activation: Activation function to use. If you don't specify anything, no
activation is applied ( see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix ( see
`keras.initializers`).
bias_initializer: Initializer for the bias vector ( see
`keras.initializers`).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector ( see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") ( see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix ( see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector ( see
`keras.constraints`).
batch_norm_layer: The batch normalization layer to use. This is typically
tf.keras.layer.BatchNormalization or a derived class.
bn_epsilon: Batch normalization epsilon.
bn_momentum: Momentum used for moving average in batch normalization.
**kwargs: Additional keyword arguments.
Input shape:
4D tensor with shape: `(batch_size, rows, cols, channels)`
Output shape:
4D tensor with shape: `(batch_size, new_rows, new_cols, filters)` `rows`
and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(GroupConv2D(inputs, kernel) + bias)`.
Raises:
ValueError: if groups < 1 or groups > filters
ValueError: if data_format is not "channels_last".
ValueError: if `padding` is not `same` or `valid`.
ValueError: if `batch_norm_layer` is not a callable when provided.
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
if groups <= 1 or groups > filters:
raise ValueError(f'Number of groups {groups} should be greater than 1 and'
f' less or equal than the output filters {filters}.')
self._groups = groups
if data_format != 'channels_last':
raise ValueError(
'GroupConv2D expects input to be in channels_last format.')
if padding.lower() not in ('same', 'valid'):
raise ValueError('Valid padding options are : same, or valid.')
self.use_batch_norm = False
if batch_norm_layer is not None:
if not inspect.isclass(batch_norm_layer):
raise ValueError('batch_norm_layer is not a class.')
self.use_batch_norm = True
self.bn_epsilon = bn_epsilon
self.bn_momentum = bn_momentum
self.batch_norm_layer = []
if self.use_batch_norm:
self.batch_norm_layer = [
batch_norm_layer(
axis=-1, momentum=self.bn_momentum, epsilon=self.bn_epsilon)
for i in range(self._groups)
]
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
groups=1,
**kwargs) # pytype: disable=bad-return-type # typed-keras
def build(self, input_shape: tuple[int, ...]) -> None:
"""Builds GroupConv2D layer as a collection of smaller Conv2D layers."""
input_shape = tf.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self._groups != 0:
raise ValueError(
f'Number of input channels: {input_channel} are not divisible '
f'by number of groups: {self._groups}.')
self.group_input_channel = int(input_channel / self._groups)
self.group_output_channel = int(self.filters / self._groups)
self.group_kernel_shape = self.kernel_size + (self.group_input_channel,
self.group_output_channel)
self.kernel = []
self.bias = []
for g in range(self._groups):
self.kernel.append(
self.add_weight(
name='kernel_{}'.format(g),
shape=self.group_kernel_shape,
initializer=tf_utils.clone_initializer(self.kernel_initializer),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype))
if self.use_bias:
self.bias.append(
self.add_weight(
name='bias_{}'.format(g),
shape=(self.group_output_channel,),
initializer=tf_utils.clone_initializer(self.bias_initializer),
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype))
channel_axis = self._get_channel_axis()
self.input_spec = tf.keras.layers.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_channel})
self._build_conv_op_data_shape = input_shape[-(self.rank + 1):]
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
# channels_last corresponds to 'NHWC' data format.
self._conv_op_data_format = 'NHWC'
self.bn_layers = []
if self.use_batch_norm:
for group_index in range(self._groups):
self.bn_layers.append(self.batch_norm_layer[group_index])
self.built = True
def call(self, inputs: Any, training: Optional[bool] = None) -> Any:
"""Performs the GroupConv2D operation on the inputs."""
input_slices = tf.split(inputs, num_or_size_splits=self._groups, axis=-1)
output_slices = []
for i in range(self._groups):
# Apply conv2d to each slice
output_slice = tf.nn.conv2d(
input_slices[i],
self.kernel[i],
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format,
dilations=self.dilation_rate)
if self.use_bias:
output_slice = tf.nn.bias_add(
output_slice, self.bias[i], data_format='NHWC')
# Apply batch norm after bias addition.
if self.use_batch_norm:
output_slice = self.bn_layers[i](output_slice, training=training)
if self.activation is not None:
output_slice = self.activation(output_slice)
output_slices.append(output_slice)
# Concat the outputs back along the channel dimension
outputs = tf.concat(output_slices, axis=-1)
return outputs
def get_config(self) -> MutableMapping[str, Any]:
"""Enables serialization for the group convolution layer."""
config = super().get_config()
config['groups'] = self._groups
config['batch_norm_layer'] = self.batch_norm_layer
config['bn_epsilon'] = self.bn_epsilon
config['bn_momentum'] = self.bn_momentum
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same layer from the config dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Also, the get_config returns a config with a list type of `batch_norm_layer`
we need to convert it either to None or the batch_norm class.
Arguments:
config: A Python dictionary, typically the output of get_config.
Returns:
A layer instance.
"""
if not config['batch_norm_layer']:
config['batch_norm_layer'] = None
else:
config['batch_norm_layer'] = type(config['batch_norm_layer'][0])
return cls(**config)
class GroupConv2DKerasModel(tf.keras.Model):
"""2D group convolution as a keras model."""
def __init__(self,
filters: int,
kernel_size: tuple[int, int],
groups: int,
batch_norm_layer: Optional[tf.keras.layers.Layer] = None,
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
data_format: str = 'channels_last',
padding: str = 'valid',
**kwargs: Any) -> tf.keras.Model:
"""Creates a 2D group convolution layer as a keras model.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
groups: The number of input/output channel groups.
batch_norm_layer: The batch normalization layer to use. This is typically
tf.keras.layer.BatchNormalization or a derived class.
bn_epsilon: Batch normalization epsilon.
bn_momentum: Momentum used for moving average in batch normalization.
data_format: The ordering of the dimensions in the inputs. `channels_last`
corresponds to inputs with shape `(batch_size, height, width, channels)`
padding: one of `"valid"` or `"same"` (case-insensitive).
**kwargs: Additional keyword arguments passed to the underlying conv
layers.
Raises:
ValueError: if groups < 1 or groups > filters
ValueError: if `batch_norm_layer` is not a callable when provided.
ValueError: if `data_format` is not channels_last
ValueError: if `padding` is not `same` or `valid`.
"""
super().__init__()
self.conv_layers = []
self.bn_layers = []
per_conv_filter_size = filters / groups
if groups <= 1 or groups >= filters:
raise ValueError('Number of groups should be greater than 1 and less '
'than the output filters.')
self.batch_norm_layer = batch_norm_layer
self.use_batch_norm = False
if self.batch_norm_layer is not None:
if not inspect.isclass(self.batch_norm_layer): # pytype: disable=not-supported-yet
raise ValueError('batch_norm_layer is not a class.')
self.use_batch_norm = True
if 'activation' in kwargs.keys():
self.activation = tf.keras.activations.get(kwargs['activation'])
kwargs.pop('activation')
else:
self.activation = None
if data_format != 'channels_last':
raise ValueError(
'GroupConv2D expects input to be in channels_last format.')
if padding.lower() not in ('same', 'valid'):
raise ValueError('Valid padding options are : same, or valid.')
self._groups = groups
for _ in range(self._groups):
# Override the activation so that batchnorm can be applied after the conv.
self.conv_layers.append(
tf.keras.layers.Conv2D(per_conv_filter_size, kernel_size, **kwargs))
if self.use_batch_norm:
for _ in range(self._groups):
self.bn_layers.append(
self.batch_norm_layer(
axis=-1, momentum=bn_momentum, epsilon=bn_epsilon)) # pytype: disable=bad-return-type # typed-keras
def call(self, inputs: Any) -> Any: # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Applies 2d group convolution on the inputs."""
input_shape = inputs.get_shape().as_list()
if input_shape[-1] % self._groups != 0:
raise ValueError(
f'Number of input channels: {input_shape[-1]} are not divisible '
f'by number of groups: {self._groups}.')
input_slices = tf.split(inputs, num_or_size_splits=self._groups, axis=-1)
output_slices = []
for g in range(self._groups):
output_slice = self.conv_layers[g](input_slices[g])
if self.use_batch_norm:
output_slice = self.bn_layers[g](output_slice)
output_slice = self.activation(output_slice)
output_slices.append(output_slice)
outputs = tf.concat(output_slices, axis=-1)
return outputs
def _nnapi_scalar(value, dtype):
# Resolves "Scalar operand should be constant" at cost of broadcasting
return tf.constant(value, dtype=dtype, shape=(1,))
def _fqop(x, min_val=-128, max_val=127):
"""Wraps an op x with fake quant op and given min/max."""
return tf.quantization.fake_quant_with_min_max_args(
x, min=min_val, max=max_val)
def argmax(input_tensor,
axis=-1,
output_type: tf.DType = tf.dtypes.float32,
name: Optional[str] = None,
keepdims: bool = False,
epsilon: Optional[float] = None):
"""Returns the index with the largest value across axes of a tensor.
Approximately tf.compat.v1.argmax, but not equivalent. If arithmetic allows
value to be anomalously close to the maximum, but not equal to it, the
behavior is undefined.
Args:
input_tensor: A Tensor.
axis: A Value. Must be in the range [-rank(input), rank(input)). Describes
which axis of the input Tensor to reduce across. For vectors, use axis =
0.
output_type: An optional tf.DType. Note that default is different from
tflite (int64) to make default behavior compatible with darwinn.
name: Optional name for operations.
keepdims: If true, retains reduced dimensions with length 1.
epsilon: Optional small number which is intended to be always below
quantization threshold, used to distinguish equal and not equal numbers.
Returns:
A Tensor of type output_type.
"""
fqop = _fqop if output_type.is_floating else tf.identity
safe_axis = axis
if safe_axis < 0:
safe_axis = len(input_tensor.shape) + safe_axis
reduction_size = input_tensor.shape[axis]
axis_max = tf.math.reduce_max(input_tensor, axis=axis, keepdims=True)
zero_if_max = tf.subtract(axis_max, input_tensor)
eps = epsilon if epsilon else 1e-6
if input_tensor.dtype.is_floating:
zero_if_max_else_eps = tf.math.minimum(
_nnapi_scalar(eps, input_tensor.dtype), zero_if_max)
zero_if_max_else_one = zero_if_max_else_eps * _nnapi_scalar(
1 / eps, input_tensor.dtype)
elif input_tensor.dtype.is_integer:
zero_if_max_else_one = tf.math.minimum(
_nnapi_scalar(1, input_tensor.dtype), zero_if_max)
else:
raise ValueError('Please specify epsilon for unknown input data type')
# Input type ends here, output type starts here
zero_if_max_else_one = tf.cast(zero_if_max_else_one, dtype=output_type)
zero_if_max_else_one = fqop(zero_if_max_else_one)
one_if_max_else_zero = fqop(
tf.math.subtract(
fqop(_nnapi_scalar(1, output_type)), zero_if_max_else_one))
rev_index = tf.range(reduction_size, 0, -1, dtype=output_type)
for index in range(safe_axis + 1, len(input_tensor.shape)):
rev_index = tf.expand_dims(rev_index, axis=index - safe_axis)
rev_index = fqop(rev_index)
rev_index_if_max_else_zero = fqop(
tf.math.multiply(one_if_max_else_zero, rev_index))
reverse_argmax = fqop(
tf.math.reduce_max(
rev_index_if_max_else_zero, axis=axis, keepdims=keepdims, name=name))
# Final operation obtains name if argmax layer if provided
return fqop(
tf.math.subtract(
fqop(_nnapi_scalar(reduction_size, output_type)),
reverse_argmax,
name=name))
class ArgmaxKerasLayer(tf.keras.layers.Layer):
"""Implements argmax as a keras model."""
def __init__(self,
axis=-1,
name=None,
output_type=tf.dtypes.int32,
**kwargs: Any) -> tf.keras.Model:
"""Implements argmax as a keras model.
Args:
axis: A Value. Must be in the range [-rank(input), rank(input)). Describes
which axis of the input Tensor to reduce across. For vectors, use axis =
0.
name: Optional name for operations.
output_type: An optional tf.DType.
**kwargs: Other arguments passed to model constructor.
Returns:
A Tensor of type output_type.
"""
super().__init__(name=name, **kwargs)
self.axis = axis
self.output_type = output_type # pytype: disable=bad-return-type # typed-keras
def call(self, inputs: Any) -> Any:
"""Applies argmax on the inputs."""
return argmax(
input_tensor=inputs,
axis=self.axis,
output_type=self.output_type,
name=self.name)
| 19,671 | 39.898129 | 117 | py |
models | models-master/official/projects/edgetpu/vision/modeling/optimized_multiheadattention_layer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for optimized_multiheadattention_layer."""
import numpy as np
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import optimized_multiheadattention_layer
_BATCH_SIZE = 32
_SEQ_LEN = 4
_EMBEDDING_SIZE = 8
_NUM_HEADS = 2
_KEY_DIM = 2
class OptimizedMultiheadattentionLayerTest(tf.test.TestCase):
def test_same_output(self):
"""Tests that OptimizedMultiHeadAttention returns the expected outputs."""
input_tensor_1 = tf.random.uniform((_BATCH_SIZE, _SEQ_LEN, _EMBEDDING_SIZE))
input_tensor_2 = tf.random.uniform((_BATCH_SIZE, _SEQ_LEN, _EMBEDDING_SIZE))
# Instantiate layer and call with inputs to build.
orig_layer = tf.keras.layers.MultiHeadAttention(
num_heads=_NUM_HEADS, key_dim=_KEY_DIM)
_ = orig_layer(input_tensor_1, input_tensor_2)
opt_layer = optimized_multiheadattention_layer.OptimizedMultiHeadAttention(
num_heads=_NUM_HEADS, key_dim=_KEY_DIM)
_ = opt_layer(input_tensor_1, input_tensor_2)
# Set the weights of the two layers to be the same.
query_dense_weights = np.random.uniform(
size=(_EMBEDDING_SIZE, _NUM_HEADS, _KEY_DIM))
query_dense_bias = np.random.uniform(size=(_NUM_HEADS, _KEY_DIM))
key_dense_weights = np.random.uniform(
size=(_EMBEDDING_SIZE, _NUM_HEADS, _KEY_DIM))
key_dense_bias = np.random.uniform(size=(_NUM_HEADS, _KEY_DIM))
value_dense_weights = np.random.uniform(
size=(_EMBEDDING_SIZE, _NUM_HEADS, _KEY_DIM))
value_dense_bias = np.random.uniform(size=(_NUM_HEADS, _KEY_DIM))
attention_output_dense_weights = np.random.uniform(
size=(_NUM_HEADS, _KEY_DIM, _EMBEDDING_SIZE))
attention_output_dense_bias = np.random.uniform(size=(_EMBEDDING_SIZE,))
orig_layer._query_dense.set_weights([query_dense_weights, query_dense_bias])
orig_layer._key_dense.set_weights([key_dense_weights, key_dense_bias])
orig_layer._value_dense.set_weights([value_dense_weights, value_dense_bias])
orig_layer._output_dense.set_weights(
[attention_output_dense_weights, attention_output_dense_bias])
opt_layer._query_dense.set_weights([query_dense_weights, query_dense_bias])
opt_layer._key_dense.set_weights([key_dense_weights, key_dense_bias])
opt_layer._value_dense.set_weights([value_dense_weights, value_dense_bias])
opt_layer._output_dense.set_weights(
[attention_output_dense_weights, attention_output_dense_bias])
# Calculate two sets of attention outputs and scores and compare.
orig_attn_output, orig_attn_score = orig_layer(
input_tensor_1, input_tensor_2, return_attention_scores=True)
opt_attn_output, opt_attn_score = opt_layer(
input_tensor_1, input_tensor_2, return_attention_scores=True)
self.assertAllClose(orig_attn_output, opt_attn_output)
self.assertAllClose(orig_attn_score, opt_attn_score)
if __name__ == '__main__':
tf.test.main()
| 3,516 | 41.890244 | 88 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPUV2 image classification models."""
from typing import Any, Mapping, Optional
from absl import logging
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model_blocks
ModelConfig = mobilenet_edgetpu_v2_model_blocks.ModelConfig
MODEL_CONFIGS = {
'mobilenet_edgetpu_v2':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_s(),
'mobilenet_edgetpu_v2_tiny':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_tiny(),
'mobilenet_edgetpu_v2_xs':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_xs(),
'mobilenet_edgetpu_v2_s':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_s(),
'mobilenet_edgetpu_v2_m':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_m(),
'mobilenet_edgetpu_v2_l':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_l(),
'autoseg_edgetpu_backbone_xs':
mobilenet_edgetpu_v2_model_blocks.autoseg_edgetpu_backbone_xs(),
'autoseg_edgetpu_backbone_s':
mobilenet_edgetpu_v2_model_blocks.autoseg_edgetpu_backbone_s(),
'autoseg_edgetpu_backbone_m':
mobilenet_edgetpu_v2_model_blocks.autoseg_edgetpu_backbone_m(),
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobilenetEdgeTPUV2(tf.keras.Model):
"""Wrapper class for a MobilenetEdgeTPUV2 Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
model_config_name: Optional[str] = None,
overrides: Optional[Mapping[str, Any]] = None,
**kwargs):
"""Creates a MobilenetEdgeTPUV2 model.
Args:
model_config_name: (optional) the model parameters to create the model.
overrides: (optional) a dict containing keys that can override config.
**kwargs: All the rest model arguments in a dictionary.
"""
self.model_config_name = model_config_name
self._self_setattr_tracking = False
self.overrides = overrides or {}
if model_config_name is None:
model_config = ModelConfig()
else:
if model_config_name not in MODEL_CONFIGS:
supported_model_list = list(MODEL_CONFIGS.keys())
raise ValueError(f'Unknown model name {model_config_name}. Only support'
f'model configs in {supported_model_list}.')
model_config = MODEL_CONFIGS[model_config_name]
self.config = model_config.replace(**self.overrides)
input_channels = self.config.input_channels
model_name = self.config.model_name
if isinstance(self.config.resolution, tuple):
input_shape = (self.config.resolution[0], self.config.resolution[1],
input_channels)
else:
input_shape = (self.config.resolution, self.config.resolution,
input_channels)
image_input = tf.keras.layers.Input(shape=input_shape)
output = mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2(
image_input, self.config)
if not isinstance(output, list):
# Cast to float32 in case we have a different model dtype
output = tf.cast(output, tf.float32)
self._output_specs = output.get_shape()
else:
if self.config.features_as_dict:
# Dict output is required for the decoder ASPP module.
self._output_specs = {
str(i): output[i].get_shape() for i in range(len(output))
}
output = {str(i): output[i] for i in range(len(output))}
else:
# edgetpu/tasks/segmentation assumes features as list.
self._output_specs = [feat.get_shape() for feat in output]
logging.info('Building model %s with params %s',
model_name,
self.config)
super(MobilenetEdgeTPUV2, self).__init__(
inputs=image_input, outputs=output, **kwargs)
self._self_setattr_tracking = True
@classmethod
def from_name(cls,
model_name: str,
model_weights_path: Optional[str] = None,
checkpoint_format: Optional[str] = 'tf_checkpoint',
overrides: Optional[Mapping[str, Any]] = None):
"""Constructs an MobilenetEdgeTPUV2 model from a predefined model name.
E.g., `MobilenetEdgeTPUV2.from_name('mobilenet_edgetpu_v2_s')`.
Args:
model_name: the predefined model name
model_weights_path: the path to the weights (h5 file or saved model dir)
checkpoint_format: the model weights format. One of 'tf_checkpoint' or
'keras_checkpoint'.
overrides: (optional) a dict containing keys that can override config
Returns:
A constructed EfficientNet instance.
"""
overrides = dict(overrides) if overrides else {}
# One can define their own custom models if necessary
MODEL_CONFIGS.update(overrides.pop('model_config', {}))
model = cls(model_config_name=model_name, overrides=overrides)
if model_weights_path:
common_modules.load_weights(model,
model_weights_path,
checkpoint_format=checkpoint_format)
return model
def get_config(self):
config = {'model_config_name': self.model_config_name,
'overrides': self.overrides}
keras_model_config = super().get_config()
return dict(list(config.items()) + list(keras_model_config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(model_config_name=config['model_config_name'],
overrides=config['overrides'])
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| 6,403 | 37.812121 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilenet_edgetpu model."""
import os
import tensorflow as tf
from official.legacy.image_classification import preprocessing
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model_blocks
# TODO(b/151324383): Enable once training is supported for mobilenet-edgetpu
EXAMPLE_IMAGE = ('third_party/tensorflow_models/official/vision/'
'image_classification/testdata/panda.jpg')
CKPTS = 'gs://**/efficientnets'
def _copy_recursively(src: str, dst: str) -> None:
"""Recursively copy directory."""
for src_dir, _, src_files in tf.io.gfile.walk(src):
dst_dir = os.path.join(dst, os.path.relpath(src_dir, src))
if not tf.io.gfile.exists(dst_dir):
tf.io.gfile.makedirs(dst_dir)
for src_file in src_files:
tf.io.gfile.copy(
os.path.join(src_dir, src_file),
os.path.join(dst_dir, src_file),
overwrite=True)
class MobilenetEdgeTPUBlocksTest(tf.test.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def test_bottleneck_block(self):
"""Test for creating a model with bottleneck block arguments."""
images = tf.zeros((4, 224, 224, 3), dtype=tf.float32)
tf.keras.backend.set_image_data_format('channels_last')
blocks = [
mobilenet_edgetpu_v1_model_blocks.BlockConfig.from_args(
input_filters=3,
output_filters=6,
kernel_size=3,
num_repeat=3,
expand_ratio=6,
strides=(2, 2),
fused_conv=False,
)
]
config = mobilenet_edgetpu_v1_model.ModelConfig.from_args(
blocks=blocks,
num_classes=10,
use_se=False,
)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU(config)
outputs = model(images, training=True)
self.assertEqual((4, 10), outputs.shape)
ref_var_names = set([
'stem_conv2d/kernel:0',
'stem_bn/gamma:0',
'stem_bn/beta:0',
'stack_0/block_0/expand_conv2d/kernel:0',
'stack_0/block_0/expand_bn/gamma:0',
'stack_0/block_0/expand_bn/beta:0',
'stack_0/block_0/depthwise_conv2d/depthwise_kernel:0',
'stack_0/block_0/depthwise_bn/gamma:0',
'stack_0/block_0/depthwise_bn/beta:0',
'stack_0/block_0/project_conv2d/kernel:0',
'stack_0/block_0/project_bn/gamma:0',
'stack_0/block_0/project_bn/beta:0',
'stack_0/block_1/expand_conv2d/kernel:0',
'stack_0/block_1/expand_bn/gamma:0',
'stack_0/block_1/expand_bn/beta:0',
'stack_0/block_1/depthwise_conv2d/depthwise_kernel:0',
'stack_0/block_1/depthwise_bn/gamma:0',
'stack_0/block_1/depthwise_bn/beta:0',
'stack_0/block_1/project_conv2d/kernel:0',
'stack_0/block_1/project_bn/gamma:0',
'stack_0/block_1/project_bn/beta:0',
'stack_0/block_2/expand_conv2d/kernel:0',
'stack_0/block_2/expand_bn/gamma:0',
'stack_0/block_2/expand_bn/beta:0',
'stack_0/block_2/depthwise_conv2d/depthwise_kernel:0',
'stack_0/block_2/depthwise_bn/gamma:0',
'stack_0/block_2/depthwise_bn/beta:0',
'stack_0/block_2/project_conv2d/kernel:0',
'stack_0/block_2/project_bn/gamma:0',
'stack_0/block_2/project_bn/beta:0',
'top_conv2d/kernel:0',
'top_bn/gamma:0',
'top_bn/beta:0',
'logits/kernel:0',
'logits/bias:0'
])
var_names = set([var.name for var in model.trainable_variables])
self.assertEqual(var_names, ref_var_names)
def test_fused_bottleneck_block(self):
"""Test for creating a model with fused bottleneck block arguments."""
images = tf.zeros((4, 224, 224, 3), dtype=tf.float32)
tf.keras.backend.set_image_data_format('channels_last')
blocks = [
mobilenet_edgetpu_v1_model_blocks.BlockConfig.from_args(
input_filters=3,
output_filters=6,
kernel_size=3,
num_repeat=3,
expand_ratio=6,
strides=(2, 2),
fused_conv=True,
)
]
config = mobilenet_edgetpu_v1_model.ModelConfig.from_args(
blocks=blocks,
num_classes=10,
use_se=False,
)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU(config)
outputs = model(images, training=True)
self.assertEqual((4, 10), outputs.shape)
var_names = {var.name for var in model.trainable_variables}
ref_var_names = [
'stack_0/block_0/fused_conv2d/kernel:0',
'stack_0/block_1/fused_conv2d/kernel:0',
'stack_0/block_2/fused_conv2d/kernel:0',
]
for ref_var_name in ref_var_names:
self.assertIn(ref_var_name, var_names)
def test_variables(self):
"""Test for variables in blocks to be included in `model.variables`."""
images = tf.zeros((4, 224, 224, 3), dtype=tf.float32)
tf.keras.backend.set_image_data_format('channels_last')
blocks = [
mobilenet_edgetpu_v1_model_blocks.BlockConfig.from_args(
input_filters=3,
output_filters=6,
kernel_size=3,
num_repeat=3,
expand_ratio=6,
id_skip=False,
strides=(2, 2),
se_ratio=0.8,
fused_conv=False,
)
]
config = mobilenet_edgetpu_v1_model.ModelConfig.from_args(
blocks=blocks,
num_classes=10,
use_se=True,
)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU(config)
_ = model(images, training=True)
var_names = {var.name for var in model.variables}
self.assertIn('stack_0/block_0/depthwise_conv2d/depthwise_kernel:0',
var_names)
class MobilenetEdgeTPUBuildTest(tf.test.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def test_create_mobilenet_edgetpu(self):
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU()
self.assertEqual(common_modules.count_params(model), 4092713)
class MobilenetEdgeTPUPredictTest(tf.test.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def _copy_saved_model_to_local(self, model_ckpt):
# Copy saved model to local first for speed
tmp_path = '/tmp/saved_model'
_copy_recursively(model_ckpt, tmp_path)
return tmp_path
def _test_prediction(self, model_name, image_size):
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name(model_name)
# Predict image filled with zeros
images = tf.zeros((4, image_size, image_size, 3), dtype=tf.float32)
pred = model(images, training=False)
self.assertEqual(pred.shape, (4, 1000))
# Predict image with loaded weights
images = preprocessing.load_eval_image(EXAMPLE_IMAGE, image_size)
images = tf.expand_dims(images, axis=0)
model_ckpt = os.path.join(CKPTS, model_name)
model_ckpt = self._copy_saved_model_to_local(model_ckpt)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name(
model_name, model_weights_path=model_ckpt)
pred = model(images, training=False)
pred = pred[0].numpy()
pred_idx, pred_prob = pred.argmax(), pred.max()
# 388 is 'giant panda' (see labels_map_file)
self.assertEqual(pred_idx, 388)
self.assertGreater(pred_prob, 0.75)
def test_mobilenet_edgetpu_image_shape(self):
self.skipTest(
'TODO(b/151324383): Enable once training is supported for mobilenet-edgetpu'
)
params = dict(input_channels=5, num_classes=20, rescale_input=False)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name(
'mobilenet_edgetpu', overrides=params)
images = tf.zeros((6, 100, 38, 5), dtype=tf.float32)
pred = model(images, training=False)
self.assertEqual(pred.shape, (6, 20))
def test_mobilenet_edgetpu_predict(self):
self.skipTest(
'TODO(b/151324383): Enable once training is supported for mobilenet-edgetpu'
)
self._test_prediction('mobilenet_edgetpu', 224)
if __name__ == '__main__':
tf.test.main()
| 8,911 | 32.757576 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/backbones/mobilenet_edgetpu_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MobileNet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.edgetpu.vision.modeling.backbones import mobilenet_edgetpu
class TestInputSpec:
def __init__(self, shape):
self.shape = shape
class TestBackboneConfig:
def __init__(self, model_id):
self.model_id = model_id
self.freeze_large_filters = 99
self.pretrained_checkpoint_path = None
self.type = 'mobilenet_edgetpu'
def get(self):
return self
class MobileNetEdgeTPUTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('mobilenet_edgetpu_v2_s', (1, 512, 512, 3)),
('mobilenet_edgetpu_v2_l', (1, None, None, 3)),
('mobilenet_edgetpu', (1, 512, 512, 3)),
('mobilenet_edgetpu_dm1p25', (1, None, None, 3)),
)
def test_mobilenet_creation(self, model_id, input_shape):
"""Test creation of MobileNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
test_model = mobilenet_edgetpu.build_mobilenet_edgetpu(
input_specs=TestInputSpec(input_shape),
backbone_config=TestBackboneConfig(model_id))
self.assertGreater(len(test_model.outputs), 1)
if __name__ == '__main__':
tf.test.main()
| 1,864 | 28.603175 | 81 | py |
models | models-master/official/projects/edgetpu/vision/modeling/backbones/mobilenet_edgetpu.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of mobilenet_edgetpu_v2 Networks."""
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.edgetpu.vision.modeling.mobilenet_edgetpu_v1_model import MobilenetEdgeTPU
from official.projects.edgetpu.vision.modeling.mobilenet_edgetpu_v2_model import MobilenetEdgeTPUV2
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
# MobileNet-EdgeTPU-V2 configs.
MOBILENET_EDGETPU_V2_CONFIGS = frozenset([
'mobilenet_edgetpu_v2_tiny',
'mobilenet_edgetpu_v2_xs',
'mobilenet_edgetpu_v2_s',
'mobilenet_edgetpu_v2_m',
'mobilenet_edgetpu_v2_l',
'autoseg_edgetpu_backbone_xs',
'autoseg_edgetpu_backbone_s',
'autoseg_edgetpu_backbone_m',
])
# MobileNet-EdgeTPU-V1 configs.
MOBILENET_EDGETPU_CONFIGS = frozenset([
'mobilenet_edgetpu',
'mobilenet_edgetpu_dm0p75',
'mobilenet_edgetpu_dm1p25',
'mobilenet_edgetpu_dm1p5',
'mobilenet_edgetpu_dm1p75',
])
def freeze_large_filters(model: tf.keras.Model, threshold: int):
"""Freezes layer with large number of filters."""
for layer in model.layers:
if isinstance(layer.output_shape, tuple):
filter_size = layer.output_shape[-1]
if filter_size >= threshold:
logging.info('Freezing layer: %s', layer.name)
layer.trainable = False
@factory.register_backbone_builder('mobilenet_edgetpu')
def build_mobilenet_edgetpu(input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
**unused_kwargs) -> tf.keras.Model:
"""Builds MobileNetEdgeTpu backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'mobilenet_edgetpu', (f'Inconsistent backbone type '
f'{backbone_type}')
if backbone_cfg.model_id in MOBILENET_EDGETPU_V2_CONFIGS:
model = MobilenetEdgeTPUV2.from_name(
model_name=backbone_cfg.model_id,
overrides={
'batch_norm': 'tpu',
'rescale_input': False,
'resolution': input_specs.shape[1:3],
'backbone_only': True,
'features_as_dict': True,
'dtype': 'bfloat16'
},
model_weights_path=backbone_cfg.pretrained_checkpoint_path)
if backbone_cfg.freeze_large_filters:
freeze_large_filters(model, backbone_cfg.freeze_large_filters)
return model
elif backbone_cfg.model_id in MOBILENET_EDGETPU_CONFIGS:
model = MobilenetEdgeTPU.from_name(
model_name=backbone_cfg.model_id,
overrides={
'batch_norm': 'tpu',
'rescale_input': False,
'resolution': input_specs.shape[1:3],
'backbone_only': True,
'dtype': 'bfloat16'
},
model_weights_path=backbone_cfg.pretrained_checkpoint_path)
if backbone_cfg.freeze_large_filters:
freeze_large_filters(model, backbone_cfg.freeze_large_filters)
return model
else:
raise ValueError(f'Unsupported model/id type {backbone_cfg.model_id}.')
| 3,755 | 35.823529 | 99 | py |
models | models-master/official/projects/edgetpu/vision/modeling/heads/bifpn_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the definitions of Bi-Directional Feature Pyramid Networks (BiFPN)."""
import functools
import itertools
from typing import Text, Optional
# Import libraries
from absl import logging
import numpy as np
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
def activation_fn(features: tf.Tensor, act_type: Text):
"""Customized non-linear activation type."""
if act_type in ('silu', 'swish'):
return tf.nn.swish(features)
elif act_type == 'swish_native':
return features * tf.sigmoid(features)
elif act_type == 'hswish':
return features * tf.nn.relu6(features + 3) / 6
elif act_type == 'relu':
return tf.nn.relu(features)
elif act_type == 'relu6':
return tf.nn.relu6(features)
else:
raise ValueError('Unsupported act_type {}'.format(act_type))
def build_batch_norm(is_training_bn: bool,
beta_initializer: Text = 'zeros',
gamma_initializer: Text = 'ones',
data_format: Text = 'channels_last',
momentum: float = 0.99,
epsilon: float = 1e-3,
strategy: Optional[Text] = None,
name: Text = 'tpu_batch_normalization'):
"""Builds a batch normalization layer.
Args:
is_training_bn: `bool` for whether the model is training.
beta_initializer: `str`, beta initializer.
gamma_initializer: `str`, gamma initializer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
strategy: `str`, whether to use tpu, gpus or other version of batch norm.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
axis = 1 if data_format == 'channels_first' else -1
if is_training_bn:
batch_norm_class = common_modules.get_batch_norm(strategy)
else:
batch_norm_class = tf.keras.layers.BatchNormalization
bn_layer = batch_norm_class(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
name=name)
return bn_layer
def bifpn_config(min_level, max_level):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = {}
# Node id starts from the input features and monotonically increase whenever
# a new node is added. Here is an example for level P3 - P7:
# P7 (4) P7" (12)
# P6 (3) P6' (5) P6" (11)
# P5 (2) P5' (6) P5" (10)
# P4 (1) P4' (7) P4" (9)
# P3 (0) P3" (8)
# So output would be like:
# [
# {'feat_level': 6, 'inputs_offsets': [3, 4]}, # for P6'
# {'feat_level': 5, 'inputs_offsets': [2, 5]}, # for P5'
# {'feat_level': 4, 'inputs_offsets': [1, 6]}, # for P4'
# {'feat_level': 3, 'inputs_offsets': [0, 7]}, # for P3"
# {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}, # for P4"
# {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}, # for P5"
# {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}, # for P6"
# {'feat_level': 7, 'inputs_offsets': [4, 11]}, # for P7"
# ]
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p['nodes'] = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p['nodes'].append({
'feat_level': i,
'inputs_offsets': [level_last_id(i),
level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p['nodes'].append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def get_conv_op(conv_type):
"""Gets convlution op."""
kernel_size = int(conv_type.split('_')[-1])
if conv_type.startswith('sep'):
conv_op = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
kernel_size=(kernel_size, kernel_size))
elif conv_type.startswith('conv'):
conv_op = functools.partial(
tf.keras.layers.Conv2D, kernel_size=(kernel_size, kernel_size))
else:
raise ValueError('Unknown conv type: {}'.format(conv_type))
return conv_op
def add_n(nodes):
"""A customized add_n to add up a list of tensors."""
# tf.add_n is not supported by EdgeTPU, while tf.reduce_sum is not supported
# by GPU and runs slow on EdgeTPU because of the 5-dimension op.
with tf.name_scope('add_n'):
new_node = nodes[0]
for n in nodes[1:]:
new_node = new_node + n
return new_node
def resize_nearest_neighbor(data, height_scale, width_scale):
"""Nearest neighbor upsampling implementation."""
with tf.name_scope('nearest_upsampling'):
bs, h, w, c = data.get_shape().as_list()
bs = -1 if bs is None else bs
# Use reshape to quickly upsample the input. The nearest pixel is selected
# implicitly via broadcasting.
data = tf.reshape(data, [bs, h, 1, w, 1, c]) * tf.ones(
[1, 1, height_scale, 1, width_scale, 1], dtype=data.dtype)
return tf.reshape(data, [bs, h * height_scale, w * width_scale, c])
def resize(feat,
target_height,
target_width,
strategy,
training=False,
method='bilinear'):
"""Resizes the spitial dimensions."""
dtype = feat.dtype
feat_shape = feat.get_shape()
if method == 'bilinear':
if strategy == 'tpu' and training:
if dtype == tf.bfloat16:
feat = tf.cast(feat, tf.float32)
feat = tf.image.resize(feat, [target_height, target_width])
feat = tf.cast(feat, dtype)
elif feat_shape.is_fully_defined():
# Batch dimension is known. Mimic resize[h,w] with
# resize[h,1]+resize[1,w] to reduce HBM padding.
b, h, w, c = feat_shape.as_list()
feat = tf.reshape(feat, [b, h, 1, -1])
feat = tf.image.resize(feat, [target_height, 1])
feat = tf.reshape(feat, [-1, 1, w, c])
feat = tf.image.resize(feat, [1, target_width])
feat = tf.reshape(feat, [b, target_height, target_width, c])
else:
feat = tf.image.resize(feat, [target_height, target_width])
else:
feat = tf.image.resize(feat, [target_height, target_width])
elif method == 'nearest':
_, h, w, _ = feat_shape.as_list()
if training and target_height % h == 0 and target_width % w == 0:
feat = resize_nearest_neighbor(feat, target_height // h,
target_width // w)
else:
feat = tf.cast(feat, tf.float32)
feat = tf.image.resize(feat, [target_height, target_width],
tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
raise ValueError('Upsampling type {} is not supported.'.format(method))
return tf.cast(feat, dtype)
class ResampleFeatureMap(tf.keras.layers.Layer):
"""Resamples feature map for downsampling or upsampling."""
def __init__(self,
feat_level,
target_num_channels,
apply_bn=False,
is_training_bn=None,
conv_after_downsample=False,
strategy=None,
data_format=None,
pooling_type=None,
upsampling_type=None,
name='resample_p0'):
super().__init__(name=name)
self.apply_bn = apply_bn
self.is_training_bn = is_training_bn
self.data_format = data_format
self.target_num_channels = target_num_channels
self.feat_level = feat_level
self.strategy = strategy
self.conv_after_downsample = conv_after_downsample
self.pooling_type = pooling_type or 'max'
self.upsampling_type = upsampling_type or 'nearest'
def _pool2d(self, inputs, height, width, target_height, target_width):
"""Pools the inputs to target height and width."""
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if self.pooling_type == 'max':
return tf.keras.layers.MaxPooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(
inputs)
if self.pooling_type == 'avg':
return tf.keras.layers.AveragePooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(
inputs)
raise ValueError('Unsupported pooling type {}.'.format(self.pooling_type))
def _upsample2d(self, inputs, target_height, target_width, training):
return resize(inputs, target_height, target_width, self.strategy, training,
self.upsampling_type)
def _maybe_apply_1x1(self, feat, training, num_channels):
"""Applies 1x1 conv to change layer width if necessary."""
target_num_channels = self.target_num_channels
if target_num_channels is None or num_channels != target_num_channels:
feat = self.conv2d(feat)
if self.apply_bn:
feat = self.bn(feat, training=training)
return feat
def build(self, feat_shape):
num_channels = self.target_num_channels or feat_shape[-1]
self.conv2d = tf.keras.layers.Conv2D(
num_channels, (1, 1),
padding='same',
data_format=self.data_format,
name='conv2d')
self.bn = build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
strategy=self.strategy,
name='bn')
self.built = True
super().build(feat_shape)
def call(self, feat, training, all_feats):
hwc_idx = (2, 3, 1) if self.data_format == 'channels_first' else (1, 2, 3)
height, width, num_channels = [feat.shape.as_list()[i] for i in hwc_idx]
if all_feats:
target_feat_shape = all_feats[self.feat_level].shape.as_list()
target_height, target_width, _ = [target_feat_shape[i] for i in hwc_idx]
else:
# Default to downsampling if all_feats is empty.
target_height, target_width = (height + 1) // 2, (width + 1) // 2
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
feat = self._pool2d(feat, height, width, target_height, target_width)
if self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
elif height <= target_height and width <= target_width:
feat = self._maybe_apply_1x1(feat, training, num_channels)
if height < target_height or width < target_width:
feat = self._upsample2d(feat, target_height, target_width, training)
else:
raise ValueError(
'Incompatible Resampling : feat shape {}x{} target_shape: {}x{}'
.format(height, width, target_height, target_width))
return feat
class FNode(tf.keras.layers.Layer):
"""A Keras Layer implementing BiFPN Node."""
def __init__(self,
feat_level,
inputs_offsets,
fpn_num_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
conv_type,
act_type,
strategy,
weight_method,
data_format,
pooling_type,
upsampling_type,
name='fnode'):
super().__init__(name=name)
self.feat_level = feat_level
self.inputs_offsets = inputs_offsets
self.fpn_num_filters = fpn_num_filters
self.apply_bn_for_resampling = apply_bn_for_resampling
self.conv_type = conv_type
self.act_type = act_type
self.is_training_bn = is_training_bn
self.conv_after_downsample = conv_after_downsample
self.strategy = strategy
self.data_format = data_format
self.weight_method = weight_method
self.conv_bn_act_pattern = conv_bn_act_pattern
self.pooling_type = pooling_type
self.upsampling_type = upsampling_type
self.resample_layers = []
self.vars = []
def fuse_features(self, nodes):
"""Fuses features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if self.weight_method == 'attn':
edge_weights = [tf.cast(var, dtype=dtype) for var in self.vars]
normalized_weights = tf.nn.softmax(tf.stack(edge_weights))
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'fastattn':
edge_weights = [
tf.nn.relu(tf.cast(var, dtype=dtype)) for var in self.vars
]
weights_sum = add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = add_n(nodes)
elif self.weight_method == 'channel_attn':
edge_weights = [tf.cast(var, dtype=dtype) for var in self.vars]
normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'channel_fastattn':
edge_weights = [
tf.nn.relu(tf.cast(var, dtype=dtype)) for var in self.vars
]
weights_sum = add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = add_n(nodes)
elif self.weight_method == 'sum':
new_node = add_n(nodes)
else:
raise ValueError('unknown weight_method %s' % self.weight_method)
return new_node
def _add_wsm(self, initializer, shape=None):
for i, _ in enumerate(self.inputs_offsets):
name = 'WSM' + ('' if i == 0 else '_' + str(i))
self.vars.append(
self.add_weight(initializer=initializer, name=name, shape=shape))
def build(self, feats_shape):
for i, input_offset in enumerate(self.inputs_offsets):
name = 'resample_{}_{}_{}'.format(i, input_offset, len(feats_shape))
self.resample_layers.append(
ResampleFeatureMap(
self.feat_level,
self.fpn_num_filters,
self.apply_bn_for_resampling,
self.is_training_bn,
self.conv_after_downsample,
strategy=self.strategy,
data_format=self.data_format,
pooling_type=self.pooling_type,
upsampling_type=self.upsampling_type,
name=name))
if self.weight_method == 'attn':
self._add_wsm('ones')
elif self.weight_method == 'fastattn':
self._add_wsm('ones')
elif self.weight_method == 'channel_attn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(tf.ones, num_filters)
elif self.weight_method == 'channel_fastattn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(tf.ones, num_filters)
self.op_after_combine = OpAfterCombine(
self.is_training_bn,
self.conv_bn_act_pattern,
self.conv_type,
self.fpn_num_filters,
self.act_type,
self.data_format,
self.strategy,
name='op_after_combine{}'.format(len(feats_shape)))
self.built = True
super().build(feats_shape)
def call(self, feats, training):
nodes = []
for i, input_offset in enumerate(self.inputs_offsets):
input_node = feats[input_offset]
input_node = self.resample_layers[i](input_node, training, feats)
nodes.append(input_node)
new_node = self.fuse_features(nodes)
new_node = self.op_after_combine(new_node)
return feats + [new_node]
class OpAfterCombine(tf.keras.layers.Layer):
"""Operation after combining input features during feature fusiong."""
def __init__(self,
is_training_bn,
conv_bn_act_pattern,
conv_type,
fpn_num_filters,
act_type,
data_format,
strategy,
name='op_after_combine'):
super().__init__(name=name)
self.conv_bn_act_pattern = conv_bn_act_pattern
self.fpn_num_filters = fpn_num_filters
self.act_type = act_type
self.data_format = data_format
self.strategy = strategy
self.is_training_bn = is_training_bn
self.conv_op = get_conv_op(conv_type)(
filters=fpn_num_filters,
padding='same',
use_bias=not self.conv_bn_act_pattern,
data_format=self.data_format,
name='conv')
self.bn = build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
strategy=self.strategy,
name='bn')
def call(self, new_node, training):
if not self.conv_bn_act_pattern:
new_node = activation_fn(new_node, self.act_type)
new_node = self.conv_op(new_node)
new_node = self.bn(new_node, training=training)
if self.conv_bn_act_pattern:
new_node = activation_fn(new_node, self.act_type)
return new_node
class FPNCells(tf.keras.layers.Layer):
"""FPN cells."""
def __init__(self,
min_level=3,
max_level=8,
fpn_num_filters=96,
apply_bn_for_resampling=True,
is_training_bn=True,
conv_after_downsample=True,
conv_bn_act_pattern=True,
conv_type='sep_3',
act_type='swish',
strategy='tpu',
fpn_weight_method='sum',
data_format='channels_last',
pooling_type='avg',
upsampling_type='bilinear',
fpn_name='bifpn',
fpn_cell_repeats=4,
**kwargs):
super(FPNCells, self).__init__(**kwargs)
self.min_level = min_level
self.max_level = max_level
if fpn_name != 'bifpn':
raise ValueError('Only bifpn config is supported.')
self.fpn_config = bifpn_config(min_level, max_level)
self.cells = [
FPNCell( # pylint: disable=g-complex-comprehension
min_level=min_level,
max_level=max_level,
fpn_num_filters=fpn_num_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
conv_type=conv_type,
act_type=act_type,
strategy=strategy,
fpn_weight_method=fpn_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
fpn_name=fpn_name,
name='cell_%d' % rep) for rep in range(fpn_cell_repeats)
]
def call(self, feats, training):
"""Model call function."""
for cell in self.cells:
cell_feats = cell(feats, training)
min_level = self.min_level
max_level = self.max_level
feats = []
for level in range(min_level, max_level + 1):
for i, fnode in enumerate(reversed(self.fpn_config['nodes'])):
if fnode['feat_level'] == level:
feats.append(cell_feats[-1 - i])
break
return feats
class FPNCell(tf.keras.layers.Layer):
"""A single FPN cell."""
def __init__(self,
min_level=3,
max_level=7,
fpn_num_filters=80,
apply_bn_for_resampling=True,
is_training_bn=True,
conv_after_downsample=True,
conv_bn_act_pattern=True,
conv_type='sep_3',
act_type='swish',
strategy='tpu',
fpn_weight_method='sum',
data_format='channels_last',
pooling_type='avg',
upsampling_type='bilinear',
fpn_name='bifpn',
name='fpn_cell',
**kwargs):
super(FPNCell, self).__init__(**kwargs)
if fpn_name != 'bifpn':
raise ValueError('Only bifpn config is supported')
self.fpn_config = bifpn_config(min_level, max_level)
self.fnodes = []
for i, fnode_cfg in enumerate(self.fpn_config['nodes']):
logging.info('fnode %d : %s', i, fnode_cfg)
fnode = FNode(
fnode_cfg['feat_level'] - min_level,
fnode_cfg['inputs_offsets'],
fpn_num_filters=fpn_num_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
conv_type=conv_type,
act_type=act_type,
strategy=strategy,
weight_method=fpn_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
name='fnode%d' % i)
self.fnodes.append(fnode)
def call(self, feats, training):
def _call(feats):
for fnode in self.fnodes:
feats = fnode(feats, training)
return feats
return _call(feats)
class SegClassNet(tf.keras.layers.Layer):
"""Segmentation class prediction network."""
def __init__(self,
min_level=3,
max_level=7,
output_filters=256,
apply_bn_for_resampling=True,
is_training_bn=True,
conv_after_downsample=True,
conv_bn_act_pattern=True,
head_conv_type='sep_3',
act_type='swish',
strategy='tpu',
output_weight_method='attn',
data_format='channels_last',
pooling_type='avg',
upsampling_type='bilinear',
fullres_output=False,
fullres_skip_connections=False,
num_classes=32,
name='seg_class_net'):
"""Initialize the SegClassNet.
Args:
min_level: minimum feature level to use in the head.
max_level: maximum feature level to use in the head.
output_filters: output filter size.
apply_bn_for_resampling:
whether to apply batch normalization for resampling.
is_training_bn: is training mode.
conv_after_downsample: whether to apply conv after downsample.
conv_bn_act_pattern: conv batch norm activation pattern.
head_conv_type: head convolution type.
act_type: activation type.
strategy: device strategy, eg. tpu.
output_weight_method: output weight method.
data_format: data format.
pooling_type: pooling type.
upsampling_type: upsamplihng type.
fullres_output: full resolution output.
fullres_skip_connections: full resolution skip connection.
num_classes: number of classes.
name: the name of this layer.
"""
super().__init__(name=name)
conv2d_layer = get_conv_op(head_conv_type)
self.min_level = min_level
self.max_level = max_level
self.fullres_output = fullres_output
self.fullres_skip_connections = fullres_skip_connections
self.fnode = FNode(
0, # Always use the first level with highest resolution.
list(range(max_level - min_level + 1)),
output_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
head_conv_type,
act_type,
strategy,
output_weight_method,
data_format,
pooling_type,
upsampling_type,
name='seg_class_fusion')
if fullres_output:
self.fullres_conv_transpose = {}
self.fullres_conv = {}
for i in reversed(range(min_level)):
num_filters = min(num_classes * 2**(i + 1),
output_filters)
self.fullres_conv[str(i)] = conv2d_layer(
filters=num_filters,
data_format=data_format,
kernel_size=3,
strides=1,
padding='same',
activation=act_type,
name='fullres_conv_%d' % i)
self.fullres_conv_transpose[str(i)] = tf.keras.layers.Conv2DTranspose(
filters=num_filters,
data_format=data_format,
kernel_size=3,
strides=2,
padding='same',
activation=act_type,
name='fullres_conv_transpose_%d' % i)
self.classes = conv2d_layer(
num_classes,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name='seg-class-predict')
def call(self, inputs, backbone_feats, training):
"""Call SegClassNet."""
seg_output = self.fnode(inputs, training)
net = seg_output[-1]
if self.fullres_output:
for i in reversed(range(self.min_level)):
if self.fullres_skip_connections:
net = tf.keras.layers.Concatenate()([net, backbone_feats[i + 1]])
net = self.fullres_conv[str(i)](net)
net = self.fullres_conv_transpose[str(i)](net)
class_outputs = self.classes(net)
return class_outputs
| 26,355 | 34.809783 | 82 | py |
models | models-master/official/projects/edgetpu/vision/tasks/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image segmentation task definition."""
from typing import Any, Mapping, Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import task_factory
from official.projects.edgetpu.vision.configs import semantic_segmentation_config as exp_cfg
from official.projects.edgetpu.vision.configs import semantic_segmentation_searched_config as searched_cfg
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model
from official.projects.edgetpu.vision.modeling.backbones import mobilenet_edgetpu # pylint: disable=unused-import
from official.projects.edgetpu.vision.modeling.heads import bifpn_head
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import segmentation_input
from official.vision.dataloaders import tfds_factory
from official.vision.ops import preprocess_ops
from official.vision.tasks import semantic_segmentation
class ClassMappingParser(segmentation_input.Parser):
"""Same parser but maps classes max_class+1... to class 0."""
max_class = 31
def _prepare_image_and_label(self, data):
"""Prepare normalized image and label."""
image = tf.io.decode_image(data['image/encoded'], channels=3)
label = tf.io.decode_image(data['image/segmentation/class/encoded'],
channels=1)
height = data['image/height']
width = data['image/width']
image = tf.reshape(image, (height, width, 3))
label = tf.reshape(label, (1, height, width))
label = tf.where(
tf.math.greater(label, self.max_class), tf.zeros_like(label), label)
label = tf.where(tf.math.equal(label, 0), tf.ones_like(label)*255, label)
label = tf.cast(label, tf.float32)
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=[0.5, 0.5, 0.5], scale=[0.5, 0.5, 0.5])
return image, label
@task_factory.register_task_cls(exp_cfg.CustomSemanticSegmentationTaskConfig)
class CustomSemanticSegmentationTask(
semantic_segmentation.SemanticSegmentationTask):
"""A task for semantic segmentation."""
def build_inputs(self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds classification input."""
ignore_label = self.task_config.losses.ignore_label
if params.tfds_name:
decoder = tfds_factory.get_segmentation_decoder(params.tfds_name)
else:
decoder = segmentation_input.Decoder()
parser = ClassMappingParser(
output_size=params.output_size,
crop_size=params.crop_size,
ignore_label=ignore_label,
resize_eval_groundtruth=params.resize_eval_groundtruth,
groundtruth_padded_size=params.groundtruth_padded_size,
aug_scale_min=params.aug_scale_min,
aug_scale_max=params.aug_scale_max,
aug_rand_hflip=params.aug_rand_hflip,
dtype=params.dtype)
parser.max_class = self.task_config.model.num_classes-1
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
class AutosegEdgeTPU(tf.keras.Model):
"""Segmentation keras network without pre/post-processing."""
def __init__(self,
model_params,
min_level=3,
max_level=8,
output_filters=96,
model_config=None,
use_original_backbone_features=False,
is_training_bn=True,
strategy='tpu',
data_format='channels_last',
pooling_type='avg',
fpn_num_filters=96,
apply_bn_for_resampling=True,
conv_after_downsample=True,
upsampling_type='bilinear',
conv_bn_act_pattern=True,
conv_type='sep_3',
head_conv_type='sep_3',
act_type='relu6',
fpn_weight_method='sum',
output_weight_method='sum',
fullres_output=False,
num_classes=32,
name='autoseg_edgetpu'):
"""Initialize model."""
super().__init__()
self.min_level = min_level
self.max_level = max_level
self.use_original_backbone_features = use_original_backbone_features
self.strategy = strategy
self.data_format = data_format
model_name = model_params['model_name']
self.backbone = get_models()[model_name](**model_params)
# Feature network.
self.resample_layers = [] # additional resampling layers.
if use_original_backbone_features:
start_level = 6
else:
# Not using original backbone features will (1) Use convolutions to
# process all backbone features before feeding into FPN. (2) Use an extra
# convolution to get higher level features, while preserve the channel
# size from the last layer of backbone.
start_level = min_level
self.downsample_layers = []
for level in range(start_level, max_level + 1):
self.downsample_layers.append(
bifpn_head.ResampleFeatureMap(
feat_level=(level - min_level),
target_num_channels=fpn_num_filters,
is_training_bn=is_training_bn,
strategy=strategy,
data_format=data_format,
pooling_type=pooling_type,
name='downsample_p%d' % level,
))
for level in range(start_level, max_level + 1):
# Adds a coarser level by downsampling the last feature map.
self.resample_layers.append(
bifpn_head.ResampleFeatureMap(
feat_level=(level - min_level),
target_num_channels=fpn_num_filters,
apply_bn=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
strategy=strategy,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
name='resample_p%d' % level,
))
self.fpn_cells = bifpn_head.FPNCells(
min_level=min_level,
max_level=max_level,
fpn_num_filters=fpn_num_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
conv_type=conv_type,
act_type=act_type,
strategy=strategy,
fpn_weight_method=fpn_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
fpn_name='bifpn')
self.seg_class_net = bifpn_head.SegClassNet(
min_level=min_level,
max_level=max_level,
output_filters=output_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
head_conv_type=head_conv_type,
act_type=act_type,
strategy=strategy,
output_weight_method=output_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
fullres_output=fullres_output,
num_classes=num_classes)
def call(self, inputs, training): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
# call backbone network.
all_feats = self.backbone(inputs, training=training)
if self.use_original_backbone_features:
feats = all_feats[self.min_level:self.max_level + 1]
for resample_layer in self.resample_layers:
feats.append(resample_layer(feats[-1], training, None))
else:
feats = []
for downsample_layer in self.downsample_layers:
all_feats.append(downsample_layer(all_feats[-1], training, None))
for level in range(self.min_level - 1, self.max_level):
feats.append(self.resample_layers[level - self.min_level + 1](
all_feats[level], training, all_feats[self.min_level - 1:]))
# call feature network.
feats = self.fpn_cells(feats, training)
# call class/box output network.
class_outputs = self.seg_class_net(feats, all_feats, training)
return class_outputs
def get_models() -> Mapping[str, tf.keras.Model]:
"""Returns the mapping from model type name to Keras model."""
model_mapping = {}
def add_models(name: str, constructor: Any):
if name in model_mapping:
raise ValueError(f'Model {name} already exists in the mapping.')
model_mapping[name] = constructor
for model in mobilenet_edgetpu_v1_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name)
for model in mobilenet_edgetpu_v2_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2.from_name)
return model_mapping
@task_factory.register_task_cls(searched_cfg.AutosegEdgeTPUTaskConfig)
class AutosegEdgeTPUTask(semantic_segmentation.SemanticSegmentationTask):
"""A task for training the AutosegEdgeTPU models."""
def build_model(self):
"""Builds model for training task."""
model_config = self.task_config.model
model_params = model_config.model_params.as_dict()
model = AutosegEdgeTPU(
model_params,
min_level=model_config.head.min_level,
max_level=model_config.head.max_level,
fpn_num_filters=model_config.head.fpn_num_filters,
num_classes=model_config.num_classes)
logging.info(model_params)
return model
# TODO(suyoggupta): Dedup this function across tasks.
def build_inputs(self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds inputs for the segmentation task."""
ignore_label = self.task_config.losses.ignore_label
if params.tfds_name:
decoder = tfds_factory.get_segmentation_decoder(params.tfds_name)
else:
decoder = segmentation_input.Decoder()
parser = ClassMappingParser(
output_size=params.output_size,
crop_size=params.crop_size,
ignore_label=ignore_label,
resize_eval_groundtruth=params.resize_eval_groundtruth,
groundtruth_padded_size=params.groundtruth_padded_size,
aug_scale_min=params.aug_scale_min,
aug_scale_max=params.aug_scale_max,
aug_rand_hflip=params.aug_rand_hflip,
dtype=params.dtype)
parser.max_class = self.task_config.model.num_classes - 1
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
| 11,868 | 38.171617 | 114 | py |
models | models-master/official/projects/edgetpu/vision/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
import os
import tempfile
from typing import Any, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.modeling import tf_utils
from official.projects.edgetpu.vision.configs import mobilenet_edgetpu_config as edgetpu_cfg
from official.projects.edgetpu.vision.dataloaders import classification_input
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model
from official.vision.configs import image_classification as base_cfg
from official.vision.dataloaders import input_reader_factory
def _copy_recursively(src: str, dst: str) -> None:
"""Recursively copy directory."""
for src_dir, _, src_files in tf.io.gfile.walk(src):
dst_dir = os.path.join(dst, os.path.relpath(src_dir, src))
if not tf.io.gfile.exists(dst_dir):
tf.io.gfile.makedirs(dst_dir)
for src_file in src_files:
tf.io.gfile.copy(
os.path.join(src_dir, src_file),
os.path.join(dst_dir, src_file),
overwrite=True)
def get_models() -> Mapping[str, tf.keras.Model]:
"""Returns the mapping from model type name to Keras model."""
model_mapping = {}
def add_models(name: str, constructor: Any):
if name in model_mapping:
raise ValueError(f'Model {name} already exists in the mapping.')
model_mapping[name] = constructor
for model in mobilenet_edgetpu_v1_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name)
for model in mobilenet_edgetpu_v2_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2.from_name)
return model_mapping
def load_searched_model(saved_model_path: str) -> tf.keras.Model:
"""Loads saved model from file.
Excepting loading MobileNet-EdgeTPU-V1/V2 models, we can also load searched
model directly from saved model path by changing the model path in
mobilenet_edgetpu_search (defined in mobilenet_edgetpu_config.py)
Args:
saved_model_path: Directory path for the saved searched model.
Returns:
Loaded keras model.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
if tf.io.gfile.isdir(saved_model_path):
_copy_recursively(saved_model_path, tmp_dir)
load_path = tmp_dir
else:
raise ValueError('Saved model path is invalid.')
load_options = tf.saved_model.LoadOptions(
experimental_io_device='/job:localhost')
model = tf.keras.models.load_model(load_path, options=load_options)
return model
@task_factory.register_task_cls(edgetpu_cfg.MobilenetEdgeTPUTaskConfig)
class EdgeTPUTask(base_task.Task):
"""A task for training MobileNet-EdgeTPU models."""
def build_model(self):
"""Builds model for MobileNet-EdgeTPU Task."""
model_config = self.task_config.model
model_params = model_config.model_params.as_dict()
model_name = model_params['model_name']
registered_models = get_models()
if model_name in registered_models:
logging.info('Load MobileNet-EdgeTPU-V1/V2 model.')
logging.info(model_params)
model = registered_models[model_name](**model_params)
elif model_name == 'mobilenet_edgetpu_search':
if self.task_config.saved_model_path is None:
raise ValueError('If using MobileNet-EdgeTPU-Search model, please'
'specify the saved model path via the'
'--params_override flag.')
logging.info('Load saved model (model from search) directly.')
model = load_searched_model(self.task_config.saved_model_path)
else:
raise ValueError('Model has to be mobilenet-edgetpu model or searched'
'model with given saved model path.')
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(
self,
params: base_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Builds classification input."""
num_classes = self.task_config.model.num_classes
input_size = self.task_config.model.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
is_multilabel = self.task_config.train_data.is_multilabel
if params.tfds_name:
raise ValueError('TFDS {} is not supported'.format(params.tfds_name))
else:
decoder = classification_input.Decoder(
image_field_key=image_field_key, label_field_key=label_field_key,
is_multilabel=is_multilabel)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
is_multilabel=is_multilabel,
dtype=params.dtype)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: tf.Tensor,
model_outputs: tf.Tensor,
aux_losses: Optional[Any] = None) -> tf.Tensor:
"""Builds sparse categorical cross entropy loss.
Args:
labels: Input groundtruth labels.
model_outputs: Output logits of the classifier.
aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.
Returns:
The total loss tensor.
"""
losses_config = self.task_config.losses
is_multilabel = self.task_config.train_data.is_multilabel
if not is_multilabel:
if losses_config.one_hot:
total_loss = tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=False,
label_smoothing=losses_config.label_smoothing)
else:
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=True)
else:
# Multi-label weighted binary cross entropy loss.
total_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=model_outputs)
total_loss = tf.reduce_sum(total_loss, axis=-1)
total_loss = tf_utils.safe_mean(total_loss)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self,
training: bool = True) -> List[tf.keras.metrics.Metric]:
"""Gets streaming metrics for training/validation."""
is_multilabel = self.task_config.train_data.is_multilabel
if not is_multilabel:
k = self.task_config.evaluation.top_k
if self.task_config.losses.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))]
else:
metrics = []
# These metrics destablize the training if included in training. The jobs
# fail due to OOM.
# TODO(arashwan): Investigate adding following metric to train.
if not training:
metrics = [
tf.keras.metrics.AUC(
name='globalPR-AUC',
curve='PR',
multi_label=False,
from_logits=True),
tf.keras.metrics.AUC(
name='meanPR-AUC',
curve='PR',
multi_label=True,
num_labels=self.task_config.model.num_classes,
from_logits=True),
]
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
is_multilabel = self.task_config.train_data.is_multilabel
if self.task_config.losses.one_hot and not is_multilabel:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Runs validatation step.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
is_multilabel = self.task_config.train_data.is_multilabel
if self.task_config.losses.one_hot and not is_multilabel:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(model_outputs=outputs, labels=labels,
aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model):
"""Performs the forward step."""
return model(inputs, training=False)
| 13,218 | 36.768571 | 92 | py |
models | models-master/official/projects/volumetric_models/evaluation/segmentation_metrics.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for segmentation."""
from typing import Optional
import tensorflow as tf
from official.projects.volumetric_models.losses import segmentation_losses
class DiceScore:
"""Dice score metric for semantic segmentation.
This class follows the same function interface as tf.keras.metrics.Metric but
does not derive from tf.keras.metrics.Metric or utilize its functions. The
reason is a tf.keras.metrics.Metric object does not run well on CPU while
created on GPU, when running with MirroredStrategy. The same interface allows
for minimal change to the upstream tasks.
Attributes:
name: The name of the metric.
dtype: The dtype of the metric, for example, tf.float32.
"""
def __init__(self,
num_classes: int,
metric_type: Optional[str] = None,
per_class_metric: bool = False,
name: Optional[str] = None,
dtype: Optional[str] = None):
"""Constructs segmentation evaluator class.
Args:
num_classes: The number of classes.
metric_type: An optional `str` of type of dice scores.
per_class_metric: Whether to report per-class metric.
name: A `str`, name of the metric instance..
dtype: The data type of the metric result.
"""
self._num_classes = num_classes
self._per_class_metric = per_class_metric
self._dice_op_overall = segmentation_losses.SegmentationLossDiceScore(
metric_type=metric_type)
self._dice_scores_overall = tf.Variable(0.0)
self._count = tf.Variable(0.0)
if self._per_class_metric:
# Always use raw dice score for per-class metrics, so metric_type is None
# by default.
self._dice_op_per_class = segmentation_losses.SegmentationLossDiceScore()
self._dice_scores_per_class = [
tf.Variable(0.0) for _ in range(num_classes)
]
self._count_per_class = [tf.Variable(0.0) for _ in range(num_classes)]
self.name = name
self.dtype = dtype
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor):
"""Updates metric state.
Args:
y_true: The true labels of size [batch, width, height, volume,
num_classes].
y_pred: The prediction of size [batch, width, height, volume,
num_classes].
Raises:
ValueError: If number of classes from groundtruth label does not equal to
`num_classes`.
"""
if self._num_classes != y_true.get_shape()[-1]:
raise ValueError(
'The number of classes from groundtruth labels and `num_classes` '
'should equal, but they are {0} and {1}.'.format(
self._num_classes,
y_true.get_shape()[-1]))
# If both y_pred and y_true are all 0s, we skip computing the metrics;
# otherwise the averaged metrics will be erroneously lower.
if tf.reduce_sum(y_true) != 0 or tf.reduce_sum(y_pred) != 0:
self._count.assign_add(1.)
self._dice_scores_overall.assign_add(
1 - self._dice_op_overall(y_pred, y_true))
if self._per_class_metric:
for class_id in range(self._num_classes):
if tf.reduce_sum(y_true[..., class_id]) != 0 or tf.reduce_sum(
y_pred[..., class_id]) != 0:
self._count_per_class[class_id].assign_add(1.)
self._dice_scores_per_class[class_id].assign_add(
1 - self._dice_op_per_class(y_pred[...,
class_id], y_true[...,
class_id]))
def result(self) -> tf.Tensor:
"""Computes and returns the metric.
The first one is `generalized` or `adaptive` overall dice score, depending
on `metric_type`. If `per_class_metric` is True, `num_classes` elements are
also appended to the overall metric, as the per-class raw dice scores.
Returns:
The resulting dice scores.
"""
if self._per_class_metric:
dice_scores = [
tf.math.divide_no_nan(self._dice_scores_overall, self._count)
]
for class_id in range(self._num_classes):
dice_scores.append(
tf.math.divide_no_nan(self._dice_scores_per_class[class_id],
self._count_per_class[class_id]))
return tf.stack(dice_scores)
else:
return tf.math.divide_no_nan(self._dice_scores_overall, self._count)
def reset_states(self):
"""Resets the metrcis to the initial state."""
self._count = tf.Variable(0.0)
self._dice_scores_overall = tf.Variable(0.0)
if self._per_class_metric:
for class_id in range(self._num_classes):
self._dice_scores_per_class[class_id] = tf.Variable(0.0)
self._count_per_class[class_id] = tf.Variable(0.0)
| 5,352 | 38.072993 | 80 | py |
models | models-master/official/projects/volumetric_models/serving/semantic_segmentation_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D semantic segmentation input and model functions for serving/inference."""
from typing import Mapping
import tensorflow as tf
# pylint: disable=unused-import
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.modeling import factory
from official.vision.serving import export_base
class SegmentationModule(export_base.ExportModule):
"""Segmentation Module."""
def _build_model(self) -> tf.keras.Model:
"""Builds and returns a segmentation model."""
num_channels = self.params.task.model.num_channels
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [num_channels])
return factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def serve(
self, images: tf.Tensor) -> Mapping[str, tf.Tensor]:
"""Casts an image tensor to float and runs inference.
Args:
images: A uint8 tf.Tensor of shape [batch_size, None, None, None,
num_channels].
Returns:
A dictionary holding segmentation outputs.
"""
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
outputs = self.inference_step(images)
output_key = 'logits' if self.params.task.model.head.output_logits else 'probs'
return {output_key: outputs['logits']}
| 2,089 | 33.833333 | 83 | py |
models | models-master/official/projects/volumetric_models/modeling/nn_blocks_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for neural networks."""
from typing import Sequence, Union
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.modeling.layers import nn_layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class BasicBlock3DVolume(tf.keras.layers.Layer):
"""A basic 3d convolution block."""
def __init__(self,
filters: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
kernel_size: Union[int, Sequence[int]],
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_batch_normalization: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Creates a basic 3d convolution block applying one or more convolutions.
Args:
filters: A list of `int` numbers or an `int` number of filters. Given an
`int` input, a single convolution is applied; otherwise a series of
convolutions are applied.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the convolution along each spatial dimension. Can be a single integer to
specify the same value for all spatial dimensions.
kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
height and width of the 3D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
use_batch_normalization: Wheher to use batch normalizaion or not.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
if isinstance(filters, int):
self._filters = [filters]
else:
self._filters = filters
self._strides = strides
self._kernel_size = kernel_size
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._use_batch_normalization = use_batch_normalization
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape: tf.TensorShape):
"""Builds the basic 3d convolution block."""
self._convs = []
self._norms = []
for filters in self._filters:
self._convs.append(
tf.keras.layers.Conv3D(
filters=filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding='same',
data_format=tf.keras.backend.image_data_format(),
activation=None))
self._norms.append(
self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon))
super(BasicBlock3DVolume, self).build(input_shape)
def get_config(self):
"""Returns the config of the basic 3d convolution block."""
config = {
'filters': self._filters,
'strides': self._strides,
'kernel_size': self._kernel_size,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'use_batch_normalization': self._use_batch_normalization
}
base_config = super(BasicBlock3DVolume, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, training: bool = None) -> tf.Tensor:
"""Runs forward pass on the input tensor."""
x = inputs
for conv, norm in zip(self._convs, self._norms):
x = conv(x)
if self._use_batch_normalization:
x = norm(x)
x = self._activation_fn(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualBlock3DVolume(tf.keras.layers.Layer):
"""A residual 3d block."""
def __init__(self,
filters,
strides,
use_projection=False,
se_ratio=None,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A residual 3d block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: `float` or None. Ratio of the Squeeze-and-Excitation layer.
stochastic_depth_drop_rate: `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._se_ratio = se_ratio
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=self._filters,
out_filters=self._filters,
se_ratio=self._se_ratio,
use_3d_input=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
super(ResidualBlock3DVolume, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(ResidualBlock3DVolume, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
return self._activation_fn(x + shortcut)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock3DVolume(tf.keras.layers.Layer):
"""A standard bottleneck block."""
def __init__(self,
filters,
strides,
dilation_rate=1,
use_projection=False,
se_ratio=None,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A standard bottleneck 3d block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
dilation_rate: `int` dilation_rate of convolutions. Default to 1.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: `float` or None. Ratio of the Squeeze-and-Excitation layer.
stochastic_depth_drop_rate: `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._filters = filters
self._strides = strides
self._dilation_rate = dilation_rate
self._use_projection = use_projection
self._se_ratio = se_ratio
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv3D(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
dilation_rate=self._dilation_rate,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv3 = tf.keras.layers.Conv3D(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm3 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=self._filters * 4,
out_filters=self._filters * 4,
se_ratio=self._se_ratio,
use_3d_input=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
super(BottleneckBlock3DVolume, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'dilation_rate': self._dilation_rate,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(BottleneckBlock3DVolume, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
x = self._activation_fn(x)
x = self._conv3(x)
x = self._norm3(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
return self._activation_fn(x + shortcut)
| 19,158 | 36.714567 | 112 | py |
models | models-master/official/projects/volumetric_models/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
from absl.testing import parameterized
import tensorflow as tf
# pylint: disable=unused-import
from official.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.modeling import factory
class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(((128, 128, 128), 5e-5, True),
((64, 64, 64), None, False))
def test_unet3d_builder(self, input_size, weight_decay, use_bn):
num_classes = 3
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], input_size[2], 3])
model_config = exp_cfg.SemanticSegmentationModel3D(num_classes=num_classes)
model_config.head.use_batch_normalization = use_bn
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
self.assertIsInstance(
model, tf.keras.Model,
'Output should be a tf.keras.Model instance but got %s' % type(model))
if __name__ == '__main__':
tf.test.main()
| 1,994 | 38.9 | 91 | py |
models | models-master/official/projects/volumetric_models/modeling/nn_blocks_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for 3D volumeric convoluion blocks."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.modeling import nn_blocks_3d
class NNBlocks3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((128, 128, 32, 1), (256, 256, 16, 2))
def test_bottleneck_block_3d_volume_creation(self, spatial_size, volume_size,
filters, strides):
inputs = tf.keras.Input(
shape=(spatial_size, spatial_size, volume_size, filters * 4),
batch_size=1)
block = nn_blocks_3d.BottleneckBlock3DVolume(
filters=filters,
strides=strides,
use_projection=True,
se_ratio=0.2,
stochastic_depth_drop_rate=0.2)
features = block(inputs)
self.assertAllEqual([
1, spatial_size // strides, spatial_size // strides,
volume_size // strides, filters * 4
], features.shape.as_list())
@parameterized.parameters((128, 128, 32, 1), (256, 256, 64, 2))
def test_residual_block_3d_volume_creation(self, spatial_size, volume_size,
filters, strides):
inputs = tf.keras.Input(
shape=(spatial_size, spatial_size, volume_size, filters), batch_size=1)
block = nn_blocks_3d.ResidualBlock3DVolume(
filters=filters,
strides=strides,
use_projection=True,
se_ratio=0.2,
stochastic_depth_drop_rate=0.2)
features = block(inputs)
self.assertAllEqual([
1, spatial_size // strides, spatial_size // strides,
volume_size // strides, filters
], features.shape.as_list())
@parameterized.parameters((128, 128, 64, 1, 3), (256, 256, 128, 2, 1))
def test_basic_block_3d_volume_creation(self, spatial_size, volume_size,
filters, strides, kernel_size):
inputs = tf.keras.Input(
shape=(spatial_size, spatial_size, volume_size, filters), batch_size=1)
block = nn_blocks_3d.BasicBlock3DVolume(
filters=filters, strides=strides, kernel_size=kernel_size)
features = block(inputs)
self.assertAllEqual([
1, spatial_size // strides, spatial_size // strides,
volume_size // strides, filters
], features.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 2,976 | 34.86747 | 79 | py |
models | models-master/official/projects/volumetric_models/modeling/segmentation_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.modeling.heads import segmentation_heads_3d
from official.vision.modeling import segmentation_model
class SegmentationNetworkUNet3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([32, 32], 4),
([64, 64], 4),
([64, 64], 2),
([128, 64], 2),
)
def test_segmentation_network_unet3d_creation(self, input_size, depth):
"""Test for creation of a segmentation network."""
num_classes = 2
inputs = np.random.rand(2, input_size[0], input_size[0], input_size[1], 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.UNet3D(model_id=depth)
decoder = decoders.UNet3DDecoder(
model_id=depth, input_specs=backbone.output_specs)
head = segmentation_heads_3d.SegmentationHead3D(
num_classes, level=1, num_convs=0)
model = segmentation_model.SegmentationModel(
backbone=backbone, decoder=decoder, head=head)
outputs = model(inputs)
self.assertAllEqual(
[2, input_size[0], input_size[0], input_size[1], num_classes],
outputs['logits'].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the network can be serialized and deserialized."""
num_classes = 3
backbone = backbones.UNet3D(model_id=4)
decoder = decoders.UNet3DDecoder(
model_id=4, input_specs=backbone.output_specs)
head = segmentation_heads_3d.SegmentationHead3D(
num_classes, level=1, num_convs=0)
model = segmentation_model.SegmentationModel(
backbone=backbone, decoder=decoder, head=head)
config = model.get_config()
new_model = segmentation_model.SegmentationModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,856 | 36.103896 | 84 | py |
models | models-master/official/projects/volumetric_models/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
from typing import Sequence, Union
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.volumetric_models.modeling.decoders import factory as decoder_factory
from official.projects.volumetric_models.modeling.heads import segmentation_heads_3d
from official.vision.modeling import segmentation_model
from official.vision.modeling.backbones import factory as backbone_factory
def build_segmentation_model_3d(
input_specs: Union[tf.keras.layers.InputSpec,
Sequence[tf.keras.layers.InputSpec]],
model_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds Segmentation model."""
norm_activation_config = model_config.norm_activation
backbone = backbone_factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
head_config = model_config.head
head = segmentation_heads_3d.SegmentationHead3D(
num_classes=model_config.num_classes,
level=head_config.level,
num_convs=head_config.num_convs,
num_filters=head_config.num_filters,
upsample_factor=head_config.upsample_factor,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_batch_normalization=head_config.use_batch_normalization,
kernel_regularizer=l2_regularizer,
output_logits=head_config.output_logits)
model = segmentation_model.SegmentationModel(backbone, decoder, head)
return model
| 2,620 | 39.323077 | 92 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/unet_3d_decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for 3D UNet decoder."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.modeling.backbones import unet_3d
from official.projects.volumetric_models.modeling.decoders import unet_3d_decoder
class UNet3DDecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([128, 64], 4),
([256, 128], 6),
)
def test_network_creation(self, input_size, model_id):
"""Test creation of UNet3D family models."""
tf.keras.backend.set_image_data_format('channels_last')
# `input_size` consists of [spatial size, volume size].
inputs = tf.keras.Input(
shape=(input_size[0], input_size[0], input_size[1], 3), batch_size=1)
backbone = unet_3d.UNet3D(model_id=model_id)
network = unet_3d_decoder.UNet3DDecoder(
model_id=model_id, input_specs=backbone.output_specs)
endpoints = backbone(inputs)
feats = network(endpoints)
self.assertIn('1', feats)
self.assertAllEqual([1, input_size[0], input_size[0], input_size[1], 64],
feats['1'].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=4,
input_specs=unet_3d.UNet3D(model_id=4).output_specs,
pool_size=(2, 2, 2),
kernel_size=(3, 3, 3),
kernel_regularizer=None,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
use_sync_bn=False,
use_batch_normalization=True,
use_deconvolution=True)
network = unet_3d_decoder.UNet3DDecoder(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = unet_3d_decoder.UNet3DDecoder.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,822 | 33.851852 | 81 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoder registers and factory method.
One can register a new decoder model by the following two steps:
1 Import the factory and register the build in the decoder file.
2 Import the decoder class and add a build in __init__.py.
```
# my_decoder.py
from modeling.decoders import factory
class MyDecoder():
...
@factory.register_decoder_builder('my_decoder')
def build_my_decoder():
return MyDecoder()
# decoders/__init__.py adds import
from modeling.decoders.my_decoder import MyDecoder
```
If one wants the MyDecoder class to be used only by those binary
then don't imported the decoder module in decoders/__init__.py, but import it
in place that uses it.
"""
from typing import Union, Mapping, Optional
# Import libraries
import tensorflow as tf
from official.core import registry
from official.modeling import hyperparams
_REGISTERED_DECODER_CLS = {}
def register_decoder_builder(key: str):
"""Decorates a builder of decoder class.
The builder should be a Callable (a class or a function).
This decorator supports registration of decoder builder as follows:
```
class MyDecoder(tf.keras.Model):
pass
@register_decoder_builder('mydecoder')
def builder(input_specs, config, l2_reg):
return MyDecoder(...)
# Builds a MyDecoder object.
my_decoder = build_decoder_3d(input_specs, config, l2_reg)
```
Args:
key: A `str` of key to look up the builder.
Returns:
A callable for using as class decorator that registers the decorated class
for creation from an instance of task_config_cls.
"""
return registry.register(_REGISTERED_DECODER_CLS, key)
@register_decoder_builder('identity')
def build_identity(
input_specs: Optional[Mapping[str, tf.TensorShape]] = None,
model_config: Optional[hyperparams.Config] = None,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None) -> None:
del input_specs, model_config, l2_regularizer # Unused by identity decoder.
return None
def build_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
**kwargs) -> Union[None, tf.keras.Model, tf.keras.layers.Layer]: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A `OneOfConfig` of model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to
None.
**kwargs: Additional keyword args to be passed to decoder builder.
Returns:
An instance of the decoder.
"""
decoder_builder = registry.lookup(_REGISTERED_DECODER_CLS,
model_config.decoder.type)
return decoder_builder(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer,
**kwargs)
| 3,565 | 29.478632 | 127 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/unet_3d_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of 3D UNet Model decoder part.
[1] Özgün Çiçek, Ahmed Abdulkadir, Soeren S. Lienkamp, Thomas Brox, Olaf
Ronneberger. 3D U-Net: Learning Dense Volumetric Segmentation from Sparse
Annotation. arXiv:1606.06650.
"""
from typing import Any, Dict, Mapping, Optional, Sequence
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.volumetric_models.modeling import nn_blocks_3d
from official.projects.volumetric_models.modeling.decoders import factory
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class UNet3DDecoder(tf.keras.Model):
"""Class to build 3D UNet decoder."""
def __init__(self,
model_id: int,
input_specs: Mapping[str, tf.TensorShape],
pool_size: Sequence[int] = (2, 2, 2),
kernel_size: Sequence[int] = (3, 3, 3),
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_sync_bn: bool = False,
use_batch_normalization: bool = False,
use_deconvolution: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""3D UNet decoder initialization function.
Args:
model_id: The depth of UNet3D backbone model. The greater the depth, the
more max pooling layers will be added to the model. Lowering the depth
may reduce the amount of memory required for training.
input_specs: The input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
pool_size: The pooling size for the max pooling operations.
kernel_size: The kernel size for 3D convolution.
kernel_regularizer: A tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
activation: The name of the activation function.
norm_momentum: The normalization momentum for the moving average.
norm_epsilon: A float added to variance to avoid dividing by zero.
use_sync_bn: If True, use synchronized batch normalization.
use_batch_normalization: If set to True, use batch normalization after
convolution and before activation. Default to False.
use_deconvolution: If set to True, the model will use transpose
convolution (deconvolution) instead of up-sampling. This increases the
amount memory required during training. Default to False.
**kwargs: Keyword arguments to be passed.
"""
self._config_dict = {
'model_id': model_id,
'input_specs': input_specs,
'pool_size': pool_size,
'kernel_size': kernel_size,
'kernel_regularizer': kernel_regularizer,
'activation': activation,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'use_sync_bn': use_sync_bn,
'use_batch_normalization': use_batch_normalization,
'use_deconvolution': use_deconvolution
}
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
self._use_batch_normalization = use_batch_normalization
if tf.keras.backend.image_data_format() == 'channels_last':
channel_dim = -1
else:
channel_dim = 1
# Build 3D UNet.
inputs = self._build_input_pyramid(input_specs, model_id) # pytype: disable=wrong-arg-types # dynamic-method-lookup
# Add levels with up-convolution or up-sampling.
x = inputs[str(model_id)]
for layer_depth in range(model_id - 1, 0, -1):
# Apply deconvolution or upsampling.
if use_deconvolution:
x = layers.Conv3DTranspose(
filters=x.get_shape().as_list()[channel_dim],
kernel_size=pool_size,
strides=(2, 2, 2))(
x)
else:
x = layers.UpSampling3D(size=pool_size)(x)
# Concatenate upsampled features with input features from one layer up.
x = tf.concat([x, tf.cast(inputs[str(layer_depth)], dtype=x.dtype)],
axis=channel_dim)
filter_num = inputs[str(layer_depth)].get_shape().as_list()[channel_dim]
x = nn_blocks_3d.BasicBlock3DVolume(
filters=[filter_num, filter_num],
strides=(1, 1, 1),
kernel_size=kernel_size,
kernel_regularizer=kernel_regularizer,
activation=activation,
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
use_batch_normalization=use_batch_normalization)(
x)
feats = {'1': x}
self._output_specs = {l: feats[l].get_shape() for l in feats}
super(UNet3DDecoder, self).__init__(inputs=inputs, outputs=feats, **kwargs)
def _build_input_pyramid(self, input_specs: Dict[str, tf.TensorShape],
depth: int) -> Dict[str, tf.Tensor]:
"""Builds input pyramid features."""
assert isinstance(input_specs, dict)
if len(input_specs.keys()) > depth:
raise ValueError(
'Backbone depth should be equal to 3D UNet decoder\'s depth.')
inputs = {}
for level, spec in input_specs.items():
inputs[level] = tf.keras.Input(shape=spec[1:])
return inputs
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any], custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_decoder_builder('unet_3d_decoder')
def build_unet_3d_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds UNet3D decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A OneOfConfig. Model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
Returns:
A `tf.keras.Model` instance of the UNet3D decoder.
"""
decoder_type = model_config.decoder.type
decoder_cfg = model_config.decoder.get()
assert decoder_type == 'unet_3d_decoder', (f'Inconsistent decoder type '
f'{decoder_type}')
norm_activation_config = model_config.norm_activation
return UNet3DDecoder(
model_id=decoder_cfg.model_id,
input_specs=input_specs,
pool_size=decoder_cfg.pool_size,
kernel_regularizer=l2_regularizer,
activation=norm_activation_config.activation,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_sync_bn=norm_activation_config.use_sync_bn,
use_batch_normalization=decoder_cfg.use_batch_normalization,
use_deconvolution=decoder_cfg.use_deconvolution)
| 7,760 | 39.005155 | 121 | py |
models | models-master/official/projects/volumetric_models/modeling/backbones/unet_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for 3D UNet backbone."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.modeling.backbones import unet_3d
class UNet3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([128, 64], 4),
([256, 128], 6),
)
def test_network_creation(self, input_size, model_id):
"""Test creation of UNet3D family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = unet_3d.UNet3D(model_id=model_id)
inputs = tf.keras.Input(
shape=(input_size[0], input_size[0], input_size[1], 3), batch_size=1)
endpoints = network(inputs)
for layer_depth in range(model_id):
self.assertAllEqual([
1, input_size[0] / 2**layer_depth, input_size[0] / 2**layer_depth,
input_size[1] / 2**layer_depth, 64 * 2**layer_depth
], endpoints[str(layer_depth + 1)].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=4,
pool_size=(2, 2, 2),
kernel_size=(3, 3, 3),
activation='relu',
base_filters=32,
kernel_regularizer=None,
norm_momentum=0.99,
norm_epsilon=0.001,
use_sync_bn=False,
use_batch_normalization=True)
network = unet_3d.UNet3D(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = unet_3d.UNet3D.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,530 | 33.202703 | 79 | py |
models | models-master/official/projects/volumetric_models/modeling/backbones/unet_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of 3D UNet Model encoder part.
[1] Özgün Çiçek, Ahmed Abdulkadir, Soeren S. Lienkamp, Thomas Brox, Olaf
Ronneberger. 3D U-Net: Learning Dense Volumetric Segmentation from Sparse
Annotation. arXiv:1606.06650.
"""
from typing import Any, Mapping, Sequence
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.volumetric_models.modeling import nn_blocks_3d
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class UNet3D(tf.keras.Model):
"""Class to build 3D UNet backbone."""
def __init__(
self,
model_id: int,
input_specs: layers = layers.InputSpec(shape=[None, None, None, None, 3]),
pool_size: Sequence[int] = (2, 2, 2),
kernel_size: Sequence[int] = (3, 3, 3),
base_filters: int = 32,
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_sync_bn: bool = False,
use_batch_normalization: bool = False, # type: ignore # typed-keras
**kwargs):
"""3D UNet backbone initialization function.
Args:
model_id: The depth of UNet3D backbone model. The greater the depth, the
more max pooling layers will be added to the model. Lowering the depth
may reduce the amount of memory required for training.
input_specs: The specs of the input tensor. It specifies a 5D input of
[batch, height, width, volume, channel] for `channel_last` data format
or [batch, channel, height, width, volume] for `channel_first` data
format.
pool_size: The pooling size for the max pooling operations.
kernel_size: The kernel size for 3D convolution.
base_filters: The number of filters that the first layer in the
convolution network will have. Following layers will contain a multiple
of this number. Lowering this number will likely reduce the amount of
memory required to train the model.
kernel_regularizer: A tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
activation: The name of the activation function.
norm_momentum: The normalization momentum for the moving average.
norm_epsilon: A float added to variance to avoid dividing by zero.
use_sync_bn: If True, use synchronized batch normalization.
use_batch_normalization: If set to True, use batch normalization after
convolution and before activation. Default to False.
**kwargs: Keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._pool_size = pool_size
self._kernel_size = kernel_size
self._activation = activation
self._base_filters = base_filters
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._use_sync_bn = use_sync_bn
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
self._kernel_regularizer = kernel_regularizer
self._use_batch_normalization = use_batch_normalization
# Build 3D UNet.
inputs = tf.keras.Input(
shape=input_specs.shape[1:], dtype=input_specs.dtype)
x = inputs
endpoints = {}
# Add levels with max pooling to downsample input.
for layer_depth in range(model_id):
# Two convoluions are applied sequentially without downsampling.
filter_num = base_filters * (2**layer_depth)
x2 = nn_blocks_3d.BasicBlock3DVolume(
filters=[filter_num, filter_num * 2],
strides=(1, 1, 1),
kernel_size=self._kernel_size,
kernel_regularizer=self._kernel_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
use_batch_normalization=self._use_batch_normalization)(
x)
if layer_depth < model_id - 1:
x = layers.MaxPool3D(
pool_size=pool_size,
strides=(2, 2, 2),
padding='valid',
data_format=tf.keras.backend.image_data_format())(
x2)
else:
x = x2
endpoints[str(layer_depth + 1)] = x2
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(UNet3D, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def get_config(self) -> Mapping[str, Any]:
return {
'model_id': self._model_id,
'pool_size': self._pool_size,
'kernel_size': self._kernel_size,
'activation': self._activation,
'base_filters': self._base_filters,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'use_sync_bn': self._use_sync_bn,
'kernel_regularizer': self._kernel_regularizer,
'use_batch_normalization': self._use_batch_normalization
}
@classmethod
def from_config(cls, config: Mapping[str, Any], custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""Returns a dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('unet_3d')
def build_unet3d(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds 3D UNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'unet_3d', (f'Inconsistent backbone type '
f'{backbone_type}')
return UNet3D(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
pool_size=backbone_cfg.pool_size,
base_filters=backbone_cfg.base_filters,
kernel_regularizer=l2_regularizer,
activation=norm_activation_config.activation,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_sync_bn=norm_activation_config.use_sync_bn,
use_batch_normalization=backbone_cfg.use_batch_normalization)
| 7,065 | 38.920904 | 139 | py |
models | models-master/official/projects/volumetric_models/modeling/heads/segmentation_heads_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segmentation heads."""
from typing import Any, Union, Sequence, Mapping, Tuple
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class SegmentationHead3D(tf.keras.layers.Layer):
"""Segmentation head for 3D input."""
def __init__(self,
num_classes: int,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
upsample_factor: int = 1,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_batch_normalization: bool = False,
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
output_logits: bool = True, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initialize params to build segmentation head.
Args:
num_classes: `int` number of mask classification categories. The number of
classes does not include background class.
level: `int` or `str`, level to use to build segmentation head.
num_convs: `int` number of stacked convolution before the last prediction
layer.
num_filters: `int` number to specify the number of filters used. Default
is 256.
upsample_factor: `int` number to specify the upsampling factor to generate
finer mask. Default 1 means no upsampling is applied.
activation: `string`, indicating which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: `bool`, whether to use synchronized batch normalization
across different replicas.
norm_momentum: `float`, the momentum parameter of the normalization
layers.
norm_epsilon: `float`, the epsilon parameter of the normalization layers.
use_batch_normalization: A bool of whether to use batch normalization or
not.
kernel_regularizer: `tf.keras.regularizers.Regularizer` object for layer
kernel.
bias_regularizer: `tf.keras.regularizers.Regularizer` object for bias.
output_logits: A `bool` of whether to output logits or not. Default
is True. If set to False, output softmax.
**kwargs: other keyword arguments passed to Layer.
"""
super(SegmentationHead3D, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'level': level,
'num_convs': num_convs,
'num_filters': num_filters,
'upsample_factor': upsample_factor,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'use_batch_normalization': use_batch_normalization,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'output_logits': output_logits
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation, use_keras_layer=True)
def build(self, input_shape: Union[tf.TensorShape, Sequence[tf.TensorShape]]):
"""Creates the variables of the segmentation head."""
conv_op = tf.keras.layers.Conv3D
conv_kwargs = {
'kernel_size': (3, 3, 3),
'padding': 'same',
'use_bias': False,
'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=0.01),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
final_kernel_size = (1, 1, 1)
bn_op = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn'] else
tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
# Segmentation head layers.
self._convs = []
self._norms = []
for i in range(self._config_dict['num_convs']):
conv_name = 'segmentation_head_conv_{}'.format(i)
self._convs.append(
conv_op(
name=conv_name,
filters=self._config_dict['num_filters'],
**conv_kwargs))
norm_name = 'segmentation_head_norm_{}'.format(i)
if self._config_dict['use_batch_normalization']:
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
self._classifier = conv_op(
name='segmentation_output',
filters=self._config_dict['num_classes'],
kernel_size=final_kernel_size,
padding='valid',
activation=None,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
super(SegmentationHead3D, self).build(input_shape)
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]]):
"""Forward pass of the segmentation head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
The first is backbone endpoints, and the second is decoder endpoints.
Returns:
segmentation prediction mask: A `tf.Tensor` of the segmentation mask
scores predicted from input features.
"""
decoder_output = inputs[1]
x = decoder_output[str(self._config_dict['level'])] if isinstance(
decoder_output, dict) else decoder_output
for i, conv in enumerate(self._convs):
x = conv(x)
if self._norms:
x = self._norms[i](x)
x = self._activation(x)
x = tf.keras.layers.UpSampling3D(size=self._config_dict['upsample_factor'])(
x)
x = self._classifier(x)
return x if self._config_dict['output_logits'] else tf.keras.layers.Softmax(
dtype='float32')(
x)
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]):
return cls(**config)
| 7,501 | 39.333333 | 101 | py |
models | models-master/official/projects/volumetric_models/tasks/semantic_segmentation_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image segmentation task definition."""
from typing import Any, Dict, Mapping, Optional, Sequence, Union
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
from official.projects.volumetric_models.dataloaders import segmentation_input_3d
from official.projects.volumetric_models.evaluation import segmentation_metrics
from official.projects.volumetric_models.losses import segmentation_losses
from official.projects.volumetric_models.modeling import factory
@task_factory.register_task_cls(exp_cfg.SemanticSegmentation3DTask)
class SemanticSegmentation3DTask(base_task.Task):
"""A task for semantic segmentation."""
def build_model(self) -> tf.keras.Model:
"""Builds segmentation model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size +
[self.task_config.model.num_channels],
dtype=self.task_config.train_data.dtype)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
model = factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
# Create a dummy input and call model instance to initialize the model. This
# is needed when launching multiple experiments using the same model
# directory. Since there is already a trained model, forward pass will not
# run and the model will never be built. This is only done when spatial
# partitioning is not enabled; otherwise it will fail with OOM due to
# extremely large input.
if (not self.task_config.train_input_partition_dims) and (
not self.task_config.eval_input_partition_dims):
dummy_input = tf.random.uniform(shape=[1] + list(input_specs.shape[1:]))
_ = model(dummy_input)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self, params, input_context=None) -> tf.data.Dataset:
"""Builds classification input."""
decoder = segmentation_input_3d.Decoder(
image_field_key=params.image_field_key,
label_field_key=params.label_field_key)
parser = segmentation_input_3d.Parser(
input_size=params.input_size,
num_classes=params.num_classes,
num_channels=params.num_channels,
image_field_key=params.image_field_key,
label_field_key=params.label_field_key,
dtype=params.dtype,
label_dtype=params.label_dtype)
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: tf.Tensor,
model_outputs: tf.Tensor,
aux_losses=None) -> tf.Tensor:
"""Segmentation loss.
Args:
labels: labels.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
segmentation_loss_fn = segmentation_losses.SegmentationLossDiceScore(
metric_type='adaptive')
total_loss = segmentation_loss_fn(model_outputs, labels)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self,
training: bool = True) -> Sequence[tf.keras.metrics.Metric]:
"""Gets streaming metrics for training/validation."""
metrics = []
num_classes = self.task_config.model.num_classes
if training:
metrics.extend([
tf.keras.metrics.CategoricalAccuracy(
name='train_categorical_accuracy', dtype=tf.float32)
])
else:
self.metrics = [
segmentation_metrics.DiceScore(
num_classes=num_classes,
metric_type='generalized',
per_class_metric=self.task_config.evaluation
.report_per_class_metric,
name='val_generalized_dice',
dtype=tf.float32)
]
return metrics
def train_step(
self,
inputs,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[Sequence[tf.keras.metrics.Metric]] = None
) -> Dict[Any, Any]:
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.train_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features = strategy.experimental_split_to_logical_devices(
features, input_partition_dims)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
outputs = outputs['logits']
if self.task_config.model.head.output_logits:
outputs = tf.nn.softmax(outputs)
# Computes per-replica loss.
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
# Compute all metrics within strategy scope for training.
if metrics:
labels = tf.cast(labels, tf.float32)
outputs = tf.cast(outputs, tf.float32)
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
return logs
def validation_step(
self,
inputs,
model: tf.keras.Model,
metrics: Optional[Sequence[tf.keras.metrics.Metric]] = None
) -> Dict[Any, Any]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.eval_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features = strategy.experimental_split_to_logical_devices(
features, input_partition_dims)
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
outputs = outputs['logits']
if self.task_config.model.head.output_logits:
outputs = tf.nn.softmax(outputs)
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
# Compute dice score metrics on CPU.
for metric in self.metrics:
labels = tf.cast(labels, tf.float32)
logits = tf.cast(outputs, tf.float32)
logs.update({metric.name: (labels, logits)})
return logs
def inference_step(self, inputs, model: tf.keras.Model) -> tf.Tensor:
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(
self,
state: Optional[Sequence[Union[segmentation_metrics.DiceScore,
tf.keras.metrics.Metric]]] = None,
step_outputs: Optional[Mapping[str, Any]] = None
) -> Sequence[tf.keras.metrics.Metric]:
"""Aggregates statistics to compute metrics over training.
Args:
state: A sequence of tf.keras.metrics.Metric objects. Each element records
a metric.
step_outputs: A dictionary of [metric_name, (labels, output)] from a step.
Returns:
An updated sequence of tf.keras.metrics.Metric objects.
"""
if state is None:
for metric in self.metrics:
metric.reset_states()
state = self.metrics
for metric in self.metrics:
labels = step_outputs[metric.name][0]
predictions = step_outputs[metric.name][1]
# If `step_output` is distributed, it contains a tuple of Tensors instead
# of a single Tensor, so we need to concatenate them along the batch
# dimension in this case to have a single Tensor.
if isinstance(labels, tuple):
labels = tf.concat(list(labels), axis=0)
if isinstance(predictions, tuple):
predictions = tf.concat(list(predictions), axis=0)
labels = tf.cast(labels, tf.float32)
predictions = tf.cast(predictions, tf.float32)
metric.update_state(labels, predictions)
return state
def reduce_aggregated_logs(
self,
aggregated_logs: Optional[Mapping[str, Any]] = None,
global_step: Optional[tf.Tensor] = None) -> Mapping[str, float]:
"""Reduces logs to obtain per-class metrics if needed.
Args:
aggregated_logs: An optional dictionary containing aggregated logs.
global_step: An optional `tf.Tensor` of current global training steps.
Returns:
The reduced logs containing per-class metrics and overall metrics.
Raises:
ValueError: If `self.metrics` does not contain exactly 1 metric object.
"""
result = {}
if len(self.metrics) != 1:
raise ValueError('Exact one metric must be present, but {0} are '
'present.'.format(len(self.metrics)))
metric = self.metrics[0].result().numpy()
if self.task_config.evaluation.report_per_class_metric:
for i, metric_val in enumerate(metric):
metric_name = self.metrics[0].name + '/class_{0}'.format(
i - 1) if i > 0 else self.metrics[0].name
result.update({metric_name: metric_val})
else:
result.update({self.metrics[0].name: metric})
return result
| 12,897 | 35.851429 | 91 | py |
models | models-master/official/projects/volumetric_models/losses/segmentation_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for segmentation models."""
from typing import Optional, Sequence
import tensorflow as tf
class SegmentationLossDiceScore(object):
"""Semantic segmentation loss using generalized dice score.
Dice score (DSC) is a similarity measure that equals twice the number of
elements common to both sets divided by the sum of the number of elements
in each set. It is commonly used to evaluate segmentation performance to
measure the overlap of predicted and groundtruth regions.
(https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient)
Generalized dice score is the dice score weighted by the volume of groundtruth
labels per class. Adaptive dice score adds weights to generalized dice score.
It assigns larger weights to lower dice score, so that wrong predictions
contribute more to the total loss. Model will then be trained to focus more on
these hard examples.
"""
def __init__(self,
metric_type: Optional[str] = None,
axis: Optional[Sequence[int]] = (1, 2, 3)):
"""Initializes dice score loss object.
Args:
metric_type: An optional `str` specifying the type of the dice score to
compute. Compute generalized or adaptive dice score if metric type is
`generalized` or `adaptive`; otherwise compute original dice score.
axis: An optional sequence of `int` specifying the axis to perform reduce
ops for raw dice score.
"""
self._dice_score = 0
self._metric_type = metric_type
self._axis = axis
def __call__(self, logits: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
"""Computes and returns a loss based on 1 - dice score.
Args:
logits: A Tensor of the prediction.
labels: A Tensor of the groundtruth label.
Returns:
The loss value of (1 - dice score).
"""
labels = tf.cast(labels, logits.dtype)
if labels.get_shape().ndims < 2 or logits.get_shape().ndims < 2:
raise ValueError('The labels and logits must be at least rank 2.')
epsilon = tf.keras.backend.epsilon()
keep_label_axis = list(range(len(logits.shape) - 1))
keep_batch_axis = list(range(1, len(logits.shape)))
# Compute sample mask to filter out samples with both all-0's labels and
# predictions because such samples should not contribute to mean dice score
# in this batch.
sample_mask = tf.logical_or(
tf.cast(tf.reduce_sum(labels, axis=keep_batch_axis), dtype=tf.bool),
tf.cast(tf.reduce_sum(logits, axis=keep_batch_axis), dtype=tf.bool))
labels = tf.boolean_mask(labels, sample_mask)
logits = tf.boolean_mask(logits, sample_mask)
# If all samples are filtered out, return 0 as the loss so this batch does
# not contribute.
if labels.shape[0] == 0:
return tf.convert_to_tensor(0.0)
# Calculate intersections and unions per class.
intersection = tf.reduce_sum(labels * logits, axis=keep_label_axis)
union = tf.reduce_sum(labels + logits, axis=keep_label_axis)
if self._metric_type == 'generalized':
# Calculate the volume of groundtruth labels.
w = tf.math.reciprocal(
tf.square(tf.reduce_sum(labels, axis=keep_label_axis)) + epsilon)
# Calculate the weighted dice score and normalizer.
dice = 2 * tf.reduce_sum(w * intersection)
normalizer = tf.reduce_sum(w * union)
if normalizer == 0:
return tf.convert_to_tensor(1.0)
dice = tf.cast(dice, dtype=tf.float32)
normalizer = tf.cast(normalizer, dtype=tf.float32)
return 1 - tf.reduce_mean(dice / normalizer)
elif self._metric_type == 'adaptive':
dice = 2.0 * intersection / (union + epsilon)
# Calculate weights based on Dice scores.
weights = tf.exp(-1.0 * dice)
# Multiply weights by corresponding scores and get sum.
weighted_dice = tf.reduce_sum(weights * dice)
# Calculate normalization factor.
normalizer = tf.cast(tf.size(input=dice), dtype=tf.float32) * tf.exp(-1.0)
if normalizer == 0:
return tf.convert_to_tensor(1.0)
weighted_dice = tf.cast(weighted_dice, dtype=tf.float32)
return 1 - tf.reduce_mean(weighted_dice / normalizer)
else:
summation = tf.reduce_sum(
labels, axis=self._axis) + tf.reduce_sum(
logits, axis=self._axis)
dice = (2 * tf.reduce_sum(labels * logits, axis=self._axis)) / (
summation + epsilon)
return 1 - tf.reduce_mean(dice)
| 5,068 | 39.552 | 80 | py |
models | models-master/official/projects/triviaqa/modeling.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modeling for TriviaQA."""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.configs import encoders
class TriviaQaHead(tf.keras.layers.Layer):
"""Computes logits given token and global embeddings."""
def __init__(self,
intermediate_size,
intermediate_activation=tf_utils.get_activation('gelu'),
dropout_rate=0.0,
attention_dropout_rate=0.0,
**kwargs):
super(TriviaQaHead, self).__init__(**kwargs)
self._attention_dropout = tf.keras.layers.Dropout(attention_dropout_rate)
self._intermediate_dense = tf.keras.layers.Dense(intermediate_size)
self._intermediate_activation = tf.keras.layers.Activation(
intermediate_activation)
self._output_dropout = tf.keras.layers.Dropout(dropout_rate)
self._output_layer_norm = tf.keras.layers.LayerNormalization()
self._logits_dense = tf.keras.layers.Dense(2)
def build(self, input_shape):
output_shape = input_shape['token_embeddings'][-1]
self._output_dense = tf.keras.layers.Dense(output_shape)
super(TriviaQaHead, self).build(input_shape)
def call(self, inputs, training=None):
token_embeddings = inputs['token_embeddings']
token_ids = inputs['token_ids']
question_lengths = inputs['question_lengths']
x = self._attention_dropout(token_embeddings, training=training)
intermediate_outputs = self._intermediate_dense(x)
intermediate_outputs = self._intermediate_activation(intermediate_outputs)
outputs = self._output_dense(intermediate_outputs)
outputs = self._output_dropout(outputs, training=training)
outputs = self._output_layer_norm(outputs + token_embeddings)
logits = self._logits_dense(outputs)
logits -= tf.expand_dims(
tf.cast(tf.equal(token_ids, 0), tf.float32) + tf.sequence_mask(
question_lengths, logits.shape[-2], dtype=tf.float32), -1) * 1e6
return logits
class TriviaQaModel(tf.keras.Model):
"""Model for TriviaQA."""
def __init__(self, model_config: encoders.EncoderConfig, sequence_length: int,
**kwargs):
inputs = dict(
token_ids=tf.keras.Input((sequence_length,), dtype=tf.int32),
question_lengths=tf.keras.Input((), dtype=tf.int32))
encoder = encoders.build_encoder(model_config)
x = encoder(
dict(
input_word_ids=inputs['token_ids'],
input_mask=tf.cast(inputs['token_ids'] > 0, tf.int32),
input_type_ids=1 -
tf.sequence_mask(inputs['question_lengths'], sequence_length,
tf.int32)))['sequence_output']
logits = TriviaQaHead(
model_config.get().intermediate_size,
dropout_rate=model_config.get().dropout_rate,
attention_dropout_rate=model_config.get().attention_dropout_rate)(
dict(
token_embeddings=x,
token_ids=inputs['token_ids'],
question_lengths=inputs['question_lengths']))
super(TriviaQaModel, self).__init__(inputs, logits, **kwargs)
self._encoder = encoder
@property
def encoder(self):
return self._encoder
class SpanOrCrossEntropyLoss(tf.keras.losses.Loss):
"""Cross entropy loss for multiple correct answers.
See https://arxiv.org/abs/1710.10723.
"""
def call(self, y_true, y_pred):
y_pred_masked = y_pred - tf.cast(y_true < 0.5, tf.float32) * 1e6
or_cross_entropy = (
tf.math.reduce_logsumexp(y_pred, axis=-2) -
tf.math.reduce_logsumexp(y_pred_masked, axis=-2))
return tf.math.reduce_sum(or_cross_entropy, -1)
def smooth_labels(label_smoothing, labels, question_lengths, token_ids):
mask = 1. - (
tf.cast(tf.equal(token_ids, 0), tf.float32) +
tf.sequence_mask(question_lengths, labels.shape[-2], dtype=tf.float32))
num_classes = tf.expand_dims(tf.math.reduce_sum(mask, -1, keepdims=True), -1)
labels = (1. - label_smoothing) * labels + (label_smoothing / num_classes)
return labels * tf.expand_dims(mask, -1)
| 4,629 | 39.26087 | 80 | py |
models | models-master/official/projects/triviaqa/predict.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TriviaQA script for inference."""
import collections
import contextlib
import functools
import json
import operator
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
import sentencepiece as spm
from official.nlp.configs import encoders # pylint: disable=unused-import
from official.projects.triviaqa import evaluation
from official.projects.triviaqa import inputs
from official.projects.triviaqa import prediction
flags.DEFINE_string('data_dir', None, 'TensorFlow Datasets directory.')
flags.DEFINE_enum('split', None,
[tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST],
'For which split to generate predictions.')
flags.DEFINE_string('predictions_path', None, 'Output for predictions.')
flags.DEFINE_string('sentencepiece_model_path', None,
'Path to sentence piece model.')
flags.DEFINE_integer('bigbird_block_size', 64,
'Size of blocks for sparse block attention.')
flags.DEFINE_string('saved_model_dir', None,
'Path from which to initialize model and weights.')
flags.DEFINE_integer('sequence_length', 4096, 'Maximum number of tokens.')
flags.DEFINE_integer('global_sequence_length', 320,
'Maximum number of global tokens.')
flags.DEFINE_integer('batch_size', 32, 'Size of batch.')
flags.DEFINE_string('master', '', 'Address of the TPU master.')
flags.DEFINE_integer('decode_top_k', 8,
'Maximum number of tokens to consider for begin/end.')
flags.DEFINE_integer('decode_max_size', 16,
'Maximum number of sentence pieces in an answer.')
FLAGS = flags.FLAGS
@contextlib.contextmanager
def worker_context():
if FLAGS.master:
with tf.device('/job:worker') as d:
yield d
else:
yield
def read_sentencepiece_model(path):
with tf.io.gfile.GFile(path, 'rb') as file:
processor = spm.SentencePieceProcessor()
processor.LoadFromSerializedProto(file.read())
return processor
def predict(sp_processor, features_map_fn, logits_fn, decode_logits_fn,
split_and_pad_fn, distribute_strategy, dataset):
"""Make predictions."""
predictions = collections.defaultdict(list)
for _, features in dataset.enumerate():
token_ids = features['token_ids']
x = split_and_pad_fn(features_map_fn(features))
logits = tf.concat(
distribute_strategy.experimental_local_results(logits_fn(x)), 0)
logits = logits[:features['token_ids'].shape[0]]
end_limit = token_ids.row_lengths() - 1 # inclusive
begin, end, scores = decode_logits_fn(logits, end_limit)
answers = prediction.decode_answer(features['context'], begin, end,
features['token_offsets'],
end_limit).numpy()
for j, (qid, token_id, offset, score, answer) in enumerate(
zip(features['qid'].numpy(),
tf.gather(features['token_ids'], begin, batch_dims=1).numpy(),
tf.gather(features['token_offsets'], begin, batch_dims=1).numpy(),
scores, answers)):
if not answer:
logging.info('%s: %s | NO_ANSWER, %f',
features['id'][j].numpy().decode('utf-8'),
features['question'][j].numpy().decode('utf-8'), score)
continue
if sp_processor.IdToPiece(int(token_id)).startswith('▁') and offset > 0:
answer = answer[1:]
logging.info('%s: %s | %s, %f', features['id'][j].numpy().decode('utf-8'),
features['question'][j].numpy().decode('utf-8'),
answer.decode('utf-8'), score)
predictions[qid.decode('utf-8')].append((score, answer.decode('utf-8')))
predictions = {
qid: evaluation.normalize_answer(
sorted(answers, key=operator.itemgetter(0), reverse=True)[0][1])
for qid, answers in predictions.items()
}
return predictions
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Configure input processing.
sp_processor = read_sentencepiece_model(FLAGS.sentencepiece_model_path)
features_map_fn = tf.function(
functools.partial(
inputs.features_map_fn,
local_radius=FLAGS.bigbird_block_size,
relative_pos_max_distance=24,
use_hard_g2l_mask=True,
sequence_length=FLAGS.sequence_length,
global_sequence_length=FLAGS.global_sequence_length,
padding_id=sp_processor.PieceToId('<pad>'),
eos_id=sp_processor.PieceToId('</s>'),
null_id=sp_processor.PieceToId('<empty>'),
cls_id=sp_processor.PieceToId('<ans>'),
sep_id=sp_processor.PieceToId('<sep_0>')),
autograph=False)
# Connect to TPU cluster.
if FLAGS.master:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(FLAGS.master)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
# Initialize datasets.
with worker_context():
_ = tf.random.get_global_generator()
dataset = inputs.read_batches(
FLAGS.data_dir, FLAGS.split, FLAGS.batch_size, include_answers=False)
# Initialize model and compile.
with strategy.scope():
model = tf.keras.models.load_model(FLAGS.saved_model_dir, compile=False)
logging.info('Model initialized. Beginning prediction loop.')
logits_fn = tf.function(
functools.partial(prediction.distributed_logits_fn, model))
decode_logits_fn = tf.function(
functools.partial(prediction.decode_logits, FLAGS.decode_top_k,
FLAGS.decode_max_size))
split_and_pad_fn = tf.function(
functools.partial(prediction.split_and_pad, strategy, FLAGS.batch_size))
# Prediction strategy.
predict_fn = functools.partial(
predict,
sp_processor=sp_processor,
features_map_fn=features_map_fn,
logits_fn=logits_fn,
decode_logits_fn=decode_logits_fn,
split_and_pad_fn=split_and_pad_fn,
distribute_strategy=strategy,
dataset=dataset)
with worker_context():
predictions = predict_fn()
with tf.io.gfile.GFile(FLAGS.predictions_path, 'w') as f:
json.dump(predictions, f)
if __name__ == '__main__':
flags.mark_flags_as_required(['split', 'predictions_path', 'saved_model_dir'])
app.run(main)
| 7,085 | 37.096774 | 80 | py |
models | models-master/official/projects/triviaqa/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TriviaQA training script."""
import collections
import contextlib
import functools
import json
import operator
import os
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
import tensorflow_datasets as tfds
import sentencepiece as spm
from official.nlp import optimization as nlp_optimization
from official.nlp.configs import encoders
from official.projects.triviaqa import evaluation
from official.projects.triviaqa import inputs
from official.projects.triviaqa import modeling
from official.projects.triviaqa import prediction
flags.DEFINE_string('data_dir', None, 'Data directory for TensorFlow Datasets.')
flags.DEFINE_string(
'validation_gold_path', None,
'Path to golden validation. Usually, the wikipedia-dev.json file.')
flags.DEFINE_string('model_dir', None,
'Directory for checkpoints and summaries.')
flags.DEFINE_string('model_config_path', None,
'JSON file containing model coniguration.')
flags.DEFINE_string('sentencepiece_model_path', None,
'Path to sentence piece model.')
flags.DEFINE_enum('encoder', 'bigbird',
['bert', 'bigbird', 'albert', 'mobilebert'],
'Which transformer encoder model to use.')
flags.DEFINE_integer('bigbird_block_size', 64,
'Size of blocks for sparse block attention.')
flags.DEFINE_string('init_checkpoint_path', None,
'Path from which to initialize weights.')
flags.DEFINE_integer('train_sequence_length', 4096,
'Maximum number of tokens for training.')
flags.DEFINE_integer('train_global_sequence_length', 320,
'Maximum number of global tokens for training.')
flags.DEFINE_integer('validation_sequence_length', 4096,
'Maximum number of tokens for validation.')
flags.DEFINE_integer('validation_global_sequence_length', 320,
'Maximum number of global tokens for validation.')
flags.DEFINE_integer('batch_size', 32, 'Size of batch.')
flags.DEFINE_string('master', '', 'Address of the TPU master.')
flags.DEFINE_integer('decode_top_k', 8,
'Maximum number of tokens to consider for begin/end.')
flags.DEFINE_integer('decode_max_size', 16,
'Maximum number of sentence pieces in an answer.')
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate for hidden layers.')
flags.DEFINE_float('attention_dropout_rate', 0.3,
'Dropout rate for attention layers.')
flags.DEFINE_float('label_smoothing', 1e-1, 'Degree of label smoothing.')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files')
FLAGS = flags.FLAGS
@contextlib.contextmanager
def worker_context():
if FLAGS.master:
with tf.device('/job:worker') as d:
yield d
else:
yield
def read_sentencepiece_model(path):
with tf.io.gfile.GFile(path, 'rb') as file:
processor = spm.SentencePieceProcessor()
processor.LoadFromSerializedProto(file.read())
return processor
# Rename old BERT v1 configuration parameters.
_MODEL_CONFIG_REPLACEMENTS = {
'num_hidden_layers': 'num_layers',
'attention_probs_dropout_prob': 'attention_dropout_rate',
'hidden_dropout_prob': 'dropout_rate',
'hidden_act': 'hidden_activation',
'window_size': 'block_size',
}
def read_model_config(encoder,
path,
bigbird_block_size=None) -> encoders.EncoderConfig:
"""Merges the JSON configuration into the encoder configuration."""
with tf.io.gfile.GFile(path) as f:
model_config = json.load(f)
for key, value in _MODEL_CONFIG_REPLACEMENTS.items():
if key in model_config:
model_config[value] = model_config.pop(key)
model_config['attention_dropout_rate'] = FLAGS.attention_dropout_rate
model_config['dropout_rate'] = FLAGS.dropout_rate
model_config['block_size'] = bigbird_block_size
encoder_config = encoders.EncoderConfig(type=encoder)
# Override the default config with those loaded from the JSON file.
encoder_config_keys = encoder_config.get().as_dict().keys()
overrides = {}
for key, value in model_config.items():
if key in encoder_config_keys:
overrides[key] = value
else:
logging.warning('Ignoring config parameter %s=%s', key, value)
encoder_config.get().override(overrides)
return encoder_config
@gin.configurable(denylist=[
'model',
'strategy',
'train_dataset',
'model_dir',
'init_checkpoint_path',
'evaluate_fn',
])
def fit(model,
strategy,
train_dataset,
model_dir,
init_checkpoint_path=None,
evaluate_fn=None,
learning_rate=1e-5,
learning_rate_polynomial_decay_rate=1.,
weight_decay_rate=1e-1,
num_warmup_steps=5000,
num_decay_steps=51000,
num_epochs=6):
"""Train and evaluate."""
hparams = dict(
learning_rate=learning_rate,
num_decay_steps=num_decay_steps,
num_warmup_steps=num_warmup_steps,
num_epochs=num_epochs,
weight_decay_rate=weight_decay_rate,
dropout_rate=FLAGS.dropout_rate,
attention_dropout_rate=FLAGS.attention_dropout_rate,
label_smoothing=FLAGS.label_smoothing)
logging.info(hparams)
learning_rate_schedule = nlp_optimization.WarmUp(
learning_rate,
tf.keras.optimizers.schedules.PolynomialDecay(
learning_rate,
num_decay_steps,
end_learning_rate=0.,
power=learning_rate_polynomial_decay_rate), num_warmup_steps)
with strategy.scope():
optimizer = nlp_optimization.AdamWeightDecay(
learning_rate_schedule,
weight_decay_rate=weight_decay_rate,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
model.compile(optimizer, loss=modeling.SpanOrCrossEntropyLoss())
def init_fn(init_checkpoint_path):
ckpt = tf.train.Checkpoint(encoder=model.encoder)
ckpt.restore(init_checkpoint_path).assert_existing_objects_matched()
with worker_context():
ckpt_manager = tf.train.CheckpointManager(
tf.train.Checkpoint(model=model, optimizer=optimizer),
model_dir,
max_to_keep=None,
init_fn=(functools.partial(init_fn, init_checkpoint_path)
if init_checkpoint_path else None))
with strategy.scope():
ckpt_manager.restore_or_initialize()
val_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, 'val'))
best_exact_match = 0.
for epoch in range(len(ckpt_manager.checkpoints), num_epochs):
model.fit(
train_dataset,
callbacks=[
tf.keras.callbacks.TensorBoard(model_dir, write_graph=False),
])
ckpt_path = ckpt_manager.save()
if evaluate_fn is None:
continue
metrics = evaluate_fn()
logging.info('Epoch %d: %s', epoch + 1, metrics)
if best_exact_match < metrics['exact_match']:
best_exact_match = metrics['exact_match']
model.save(os.path.join(model_dir, 'export'), include_optimizer=False)
logging.info('Exporting %s as SavedModel.', ckpt_path)
with val_summary_writer.as_default():
for name, data in metrics.items():
tf.summary.scalar(name, data, epoch + 1)
def evaluate(sp_processor, features_map_fn, labels_map_fn, logits_fn,
decode_logits_fn, split_and_pad_fn, distribute_strategy,
validation_dataset, ground_truth):
"""Run evaluation."""
loss_metric = tf.keras.metrics.Mean()
@tf.function
def update_loss(y, logits):
loss_fn = modeling.SpanOrCrossEntropyLoss(
reduction=tf.keras.losses.Reduction.NONE)
return loss_metric(loss_fn(y, logits))
predictions = collections.defaultdict(list)
for _, (features, labels) in validation_dataset.enumerate():
token_ids = features['token_ids']
y = labels_map_fn(token_ids, labels)
x = split_and_pad_fn(features_map_fn(features))
logits = tf.concat(
distribute_strategy.experimental_local_results(logits_fn(x)), 0)
logits = logits[:features['token_ids'].shape[0]]
update_loss(y, logits)
end_limit = token_ids.row_lengths() - 1 # inclusive
begin, end, scores = decode_logits_fn(logits, end_limit)
answers = prediction.decode_answer(features['context'], begin, end,
features['token_offsets'],
end_limit).numpy()
for _, (qid, token_id, offset, score, answer) in enumerate(
zip(features['qid'].numpy(),
tf.gather(features['token_ids'], begin, batch_dims=1).numpy(),
tf.gather(features['token_offsets'], begin, batch_dims=1).numpy(),
scores, answers)):
if not answer:
continue
if sp_processor.IdToPiece(int(token_id)).startswith('▁') and offset > 0:
answer = answer[1:]
predictions[qid.decode('utf-8')].append((score, answer.decode('utf-8')))
predictions = {
qid: evaluation.normalize_answer(
sorted(answers, key=operator.itemgetter(0), reverse=True)[0][1])
for qid, answers in predictions.items()
}
metrics = evaluation.evaluate_triviaqa(ground_truth, predictions, mute=True)
metrics['loss'] = loss_metric.result().numpy()
return metrics
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gin.parse_config(FLAGS.gin_bindings)
model_config = read_model_config(
FLAGS.encoder,
FLAGS.model_config_path,
bigbird_block_size=FLAGS.bigbird_block_size)
logging.info(model_config.get().as_dict())
# Configure input processing.
sp_processor = read_sentencepiece_model(FLAGS.sentencepiece_model_path)
features_map_fn = functools.partial(
inputs.features_map_fn,
local_radius=FLAGS.bigbird_block_size,
relative_pos_max_distance=24,
use_hard_g2l_mask=True,
padding_id=sp_processor.PieceToId('<pad>'),
eos_id=sp_processor.PieceToId('</s>'),
null_id=sp_processor.PieceToId('<empty>'),
cls_id=sp_processor.PieceToId('<ans>'),
sep_id=sp_processor.PieceToId('<sep_0>'))
train_features_map_fn = tf.function(
functools.partial(
features_map_fn,
sequence_length=FLAGS.train_sequence_length,
global_sequence_length=FLAGS.train_global_sequence_length),
autograph=False)
train_labels_map_fn = tf.function(
functools.partial(
inputs.labels_map_fn, sequence_length=FLAGS.train_sequence_length))
# Connect to TPU cluster.
if FLAGS.master:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(FLAGS.master)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
# Initialize datasets.
with worker_context():
_ = tf.random.get_global_generator()
train_dataset = inputs.read_batches(
FLAGS.data_dir,
tfds.Split.TRAIN,
FLAGS.batch_size,
shuffle=True,
drop_final_batch=True)
validation_dataset = inputs.read_batches(FLAGS.data_dir,
tfds.Split.VALIDATION,
FLAGS.batch_size)
def train_map_fn(x, y):
features = train_features_map_fn(x)
labels = modeling.smooth_labels(FLAGS.label_smoothing,
train_labels_map_fn(x['token_ids'], y),
features['question_lengths'],
features['token_ids'])
return features, labels
train_dataset = train_dataset.map(train_map_fn, 16).prefetch(16)
# Initialize model and compile.
with strategy.scope():
model = modeling.TriviaQaModel(model_config, FLAGS.train_sequence_length)
logits_fn = tf.function(
functools.partial(prediction.distributed_logits_fn, model))
decode_logits_fn = tf.function(
functools.partial(prediction.decode_logits, FLAGS.decode_top_k,
FLAGS.decode_max_size))
split_and_pad_fn = tf.function(
functools.partial(prediction.split_and_pad, strategy, FLAGS.batch_size))
# Evaluation strategy.
with tf.io.gfile.GFile(FLAGS.validation_gold_path) as f:
ground_truth = {
datum['QuestionId']: datum['Answer'] for datum in json.load(f)['Data']
}
validation_features_map_fn = tf.function(
functools.partial(
features_map_fn,
sequence_length=FLAGS.validation_sequence_length,
global_sequence_length=FLAGS.validation_global_sequence_length),
autograph=False)
validation_labels_map_fn = tf.function(
functools.partial(
inputs.labels_map_fn,
sequence_length=FLAGS.validation_sequence_length))
evaluate_fn = functools.partial(
evaluate,
sp_processor=sp_processor,
features_map_fn=validation_features_map_fn,
labels_map_fn=validation_labels_map_fn,
logits_fn=logits_fn,
decode_logits_fn=decode_logits_fn,
split_and_pad_fn=split_and_pad_fn,
distribute_strategy=strategy,
validation_dataset=validation_dataset,
ground_truth=ground_truth)
logging.info('Model initialized. Beginning training fit loop.')
fit(model, strategy, train_dataset, FLAGS.model_dir,
FLAGS.init_checkpoint_path, evaluate_fn)
if __name__ == '__main__':
flags.mark_flags_as_required([
'model_config_path', 'model_dir', 'sentencepiece_model_path',
'validation_gold_path'
])
app.run(main)
| 14,285 | 36.106494 | 80 | py |
models | models-master/official/projects/pixel/modeling/pixel.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pixel models."""
import tensorflow as tf
from official.vision.modeling.backbones import vit
layers = tf.keras.layers
class ViTEncoder(vit.Encoder):
"""ViT Encoder.
The original vit implementation in official/vision/modeling/backbones/vit.py
does not support attention masks. This version allows passing the attention
mask in call along with inputs as a (bs, seqlen) tensor.
"""
def call(self, inputs, training=None):
x, mask = inputs
if self._add_pos_embed:
x = self._pos_embed(x, inputs_positions=self._inputs_positions)
x = self._dropout(x, training=training)
for encoder_layer in self._encoder_layers:
x = encoder_layer((x, mask), training=training)
x = self._norm(x)
return x
class VisionTransformer(tf.keras.layers.Layer):
"""ViT backbone."""
def __init__(
self,
patch_h,
patch_w,
filters,
num_layers,
mlp_dim,
num_heads,
dropout_rate,
attention_dropout_rate,
init_stochastic_depth_rate,
**kwargs
):
super().__init__(**kwargs)
self.patch_h = patch_h
self.patch_w = patch_w
self.filters = filters
self.num_layers = num_layers
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.dropout_rate = dropout_rate
self.attention_dropout_rate = attention_dropout_rate
self.init_stochastic_depth_rate = init_stochastic_depth_rate
def build(self, input_shape):
self.patch_to_embed = tf.keras.layers.Conv2D(
filters=self.filters,
kernel_size=(self.patch_h, self.patch_w),
strides=(self.patch_h, self.patch_w),
padding='valid',
kernel_initializer='lecun_normal',
)
self.encoder = ViTEncoder(
num_layers=self.num_layers,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
init_stochastic_depth_rate=self.init_stochastic_depth_rate,
add_pos_embed=True,
)
self.token_cls = vit.TokenLayer()
super().build(input_shape)
def to_embed(self, patches):
return self.patch_to_embed(patches)
def insert_cls(self, patch_embeds):
return self.token_cls(patch_embeds)
def call(self, inputs): # pylint:disable=signature-mismatch
if isinstance(inputs, dict):
images = inputs.get('pixel_values', None)
attention_mask = inputs.get('attention_mask', None)
attention_mask = tf.transpose(
tf.concat(
values=[
tf.ones((1, tf.shape(attention_mask)[0]), tf.float32),
tf.transpose(attention_mask),
],
axis=0,
)
)
attention_mask = tf.einsum('ij,ik->ijk', attention_mask, attention_mask)
attention_mask = tf.cast(attention_mask, tf.int32)
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
images = tf.transpose(images, perm=[0, 2, 3, 1])
patch_embeds = self.to_embed(images)
patch_shape = tf.shape(patch_embeds)
patch_embeds = tf.reshape(
patch_embeds, (patch_shape[0], -1, patch_shape[-1])
)
patch_embeds = self.insert_cls(patch_embeds)
return self.encoder((patch_embeds, attention_mask))
class PixelClassifier(tf.keras.layers.Layer):
"""Pixel classifier for finetuning. Uses the cls token."""
def __init__(self, encoder, num_classes, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.linear = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
)
def call(self, inputs):
encoded = self.encoder(inputs)
return self.linear(encoded[:, 0])
class PixelLinearClassifier(tf.keras.layers.Layer):
"""Pixel classifier for finetuning.
This is a layer with additional layer norm and linear layer in the
classification head. Uses the average of all token representations
"""
def __init__(self, encoder, num_classes, num_filters, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.num_filters = num_filters
self.linear_clas = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
)
self.norm = tf.keras.layers.LayerNormalization(
name='classification_layer_norm',
axis=-1,
epsilon=1e-6,
dtype=tf.float32,
)
self.linear_trans = tf.keras.layers.Dense(
num_filters,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
)
self.activation = tf.keras.layers.Activation('gelu')
self.dropout = tf.keras.layers.Dropout(0.1)
def call(self, inputs, training=False):
attention_mask = inputs.get('attention_mask')
mask_lengths = tf.expand_dims(tf.reduce_sum(attention_mask, axis=1), 1)
attention_mask = tf.tile(
tf.expand_dims(attention_mask, 2), [1, 1, self.num_filters]
)
encoded = self.encoder(inputs)
encoded = self.norm(self.activation(self.linear_trans(encoded)))
encoded = self.dropout(encoded, training=training)
mean_pooling = (
tf.reduce_sum(encoded[:, 1:, :] * attention_mask, axis=1) / mask_lengths
)
return self.linear_clas(mean_pooling)
| 5,897 | 30.37234 | 80 | py |
models | models-master/official/projects/pixel/tasks/classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Text classification task with ViT."""
import dataclasses
from typing import Tuple
import numpy as np
from scipy import stats
from sklearn import metrics as sklearn_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.data import data_loader_factory
from official.projects.pixel.modeling import pixel
@dataclasses.dataclass
class PixelModelConfig(base_config.Config):
"""The model configuration."""
filters: int = 768
num_layers: int = 12
mlp_dim: int = 3072
num_heads: int = 12
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
init_stochastic_depth_rate: float = 0.0
@dataclasses.dataclass
class PixelConfig(cfg.TaskConfig):
"""The task configuration."""
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
patch_h: int = 16
patch_w: int = 16
num_classes: int = 2
num_channels: int = 3
input_size: Tuple[int, int] = (16, 4096)
model: PixelModelConfig = PixelModelConfig()
@task_factory.register_task_cls(PixelConfig)
class PixelClassificationTask(base_task.Task):
"""Text classificaiton with Pixel and load checkpoint if exists."""
label_field: str = 'label'
metric_type: str = 'accuracy'
def build_model(self) -> tf.keras.Model:
encoder = pixel.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
self.task_config.model.filters,
self.task_config.model.num_layers,
self.task_config.model.mlp_dim,
self.task_config.model.num_heads,
self.task_config.model.dropout_rate,
self.task_config.model.attention_dropout_rate,
self.task_config.model.init_stochastic_depth_rate,
)
model = pixel.PixelLinearClassifier(
encoder, self.task_config.num_classes, self.task_config.model.filters
)
h, w = self.task_config.input_size
positions = h // self.task_config.patch_h * w // self.task_config.patch_w
model({
'label': tf.zeros((1,)),
'pixel_values': tf.zeros((1, self.task_config.num_channels, h, w)),
'attention_mask': tf.zeros((1, positions)),
})
return model
def build_inputs(self, params, input_context=None):
return data_loader_factory.get_data_loader(params).load(input_context)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
label_ids = labels[self.label_field]
if self.task_config.num_classes == 1:
loss = tf.keras.losses.mean_squared_error(label_ids, model_outputs)
else:
loss = tf.keras.losses.sparse_categorical_crossentropy(
label_ids, tf.cast(model_outputs, tf.float32), from_logits=True
)
if aux_losses:
loss += tf.add_n(aux_losses)
return tf_utils.safe_mean(loss)
def initialize(self, model: tf.keras.Model):
"""Load encoder if checkpoint exists.
Args:
model: The keras.Model built or used by this task.
"""
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
ckpt = tf.train.Checkpoint(encoder=model.encoder)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
def build_metrics(self, training=None):
del training
if self.task_config.num_classes == 1:
metrics = [tf.keras.metrics.MeanSquaredError()]
elif self.task_config.num_classes == 2:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
tf.keras.metrics.AUC(name='auc', curve='PR'),
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
for metric in metrics:
if metric.name == 'auc':
# Convert the logit to probability and extract the probability of True..
metric.update_state(
labels[self.label_field],
tf.expand_dims(tf.nn.softmax(model_outputs)[:, 1], axis=1),
)
if metric.name == 'cls_accuracy':
metric.update_state(labels[self.label_field], model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
compiled_metrics.update_state(labels[self.label_field], model_outputs)
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses
)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
if self.metric_type == 'matthews_corrcoef':
logs.update({
'sentence_prediction': (
tf.expand_dims( # Ensure one prediction along batch dimension.
tf.math.argmax(outputs, axis=1), axis=1
)
),
'labels': labels[self.label_field],
})
else:
logs.update({
'sentence_prediction': outputs,
'labels': labels[self.label_field],
})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self.metric_type == 'accuracy':
return None
if state is None:
state = {'sentence_prediction': [], 'labels': []}
state['sentence_prediction'].append(
np.concatenate(
[v.numpy() for v in step_outputs['sentence_prediction']], axis=0
)
)
state['labels'].append(
np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0)
)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.metric_type == 'accuracy':
return None
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
if self.metric_type == 'f1':
preds = np.argmax(preds, axis=1)
return {self.metric_type: sklearn_metrics.f1_score(labels, preds)}
elif self.metric_type == 'matthews_corrcoef':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
return {
self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
}
elif self.metric_type == 'pearson_spearman_corr':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
pearson_corr = stats.pearsonr(preds, labels)[0]
spearman_corr = stats.spearmanr(preds, labels)[0]
corr_metric = (pearson_corr + spearman_corr) / 2
return {self.metric_type: corr_metric}
| 7,720 | 34.255708 | 80 | py |
models | models-master/official/projects/simclr/modeling/multitask_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-task image multi-taskSimCLR model definition."""
from typing import Dict, Text
from absl import logging
import tensorflow as tf
from official.modeling.multitask import base_model
from official.projects.simclr.configs import multitask_config as simclr_multitask_config
from official.projects.simclr.heads import simclr_head
from official.projects.simclr.modeling import simclr_model
from official.vision.modeling import backbones
PROJECTION_OUTPUT_KEY = 'projection_outputs'
SUPERVISED_OUTPUT_KEY = 'supervised_outputs'
class SimCLRMTModel(base_model.MultiTaskBaseModel):
"""A multi-task SimCLR model that does both pretrain and finetune."""
def __init__(self, config: simclr_multitask_config.SimCLRMTModelConfig,
**kwargs):
self._config = config
# Build shared backbone.
self._input_specs = tf.keras.layers.InputSpec(shape=[None] +
config.input_size)
l2_weight_decay = config.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
self._l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
self._backbone = backbones.factory.build_backbone(
input_specs=self._input_specs,
backbone_config=config.backbone,
norm_activation_config=config.norm_activation,
l2_regularizer=self._l2_regularizer)
# Build the shared projection head
norm_activation_config = self._config.norm_activation
projection_head_config = self._config.projection_head
self._projection_head = simclr_head.ProjectionHead(
proj_output_dim=projection_head_config.proj_output_dim,
num_proj_layers=projection_head_config.num_proj_layers,
ft_proj_idx=projection_head_config.ft_proj_idx,
kernel_regularizer=self._l2_regularizer,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
super().__init__(**kwargs)
def _instantiate_sub_tasks(self) -> Dict[Text, tf.keras.Model]:
tasks = {}
for model_config in self._config.heads:
# Build supervised head
supervised_head_config = model_config.supervised_head
if supervised_head_config:
if supervised_head_config.zero_init:
s_kernel_initializer = 'zeros'
else:
s_kernel_initializer = 'random_uniform'
supervised_head = simclr_head.ClassificationHead(
num_classes=supervised_head_config.num_classes,
kernel_initializer=s_kernel_initializer,
kernel_regularizer=self._l2_regularizer)
else:
supervised_head = None
tasks[model_config.task_name] = simclr_model.SimCLRModel(
input_specs=self._input_specs,
backbone=self._backbone,
projection_head=self._projection_head,
supervised_head=supervised_head,
mode=model_config.mode,
backbone_trainable=self._config.backbone_trainable)
return tasks
def initialize(self):
"""Loads the multi-task SimCLR model with a pretrained checkpoint."""
ckpt_dir_or_file = self._config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
logging.info('Loading pretrained %s', self._config.init_checkpoint_modules)
if self._config.init_checkpoint_modules == 'backbone':
pretrained_items = dict(backbone=self._backbone)
elif self._config.init_checkpoint_modules == 'backbone_projection':
pretrained_items = dict(
backbone=self._backbone, projection_head=self._projection_head)
else:
raise ValueError(
"Only 'backbone_projection' or 'backbone' can be used to "
'initialize the model.')
ckpt = tf.train.Checkpoint(**pretrained_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self._backbone, projection_head=self._projection_head)
| 5,142 | 39.496063 | 88 | py |
models | models-master/official/projects/simclr/modeling/simclr_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build simclr models."""
from typing import Optional
from absl import logging
import tensorflow as tf
layers = tf.keras.layers
PRETRAIN = 'pretrain'
FINETUNE = 'finetune'
PROJECTION_OUTPUT_KEY = 'projection_outputs'
SUPERVISED_OUTPUT_KEY = 'supervised_outputs'
class SimCLRModel(tf.keras.Model):
"""A classification model based on SimCLR framework."""
def __init__(self,
backbone: tf.keras.models.Model,
projection_head: tf.keras.layers.Layer,
supervised_head: Optional[tf.keras.layers.Layer] = None,
input_specs=layers.InputSpec(shape=[None, None, None, 3]),
mode: str = PRETRAIN,
backbone_trainable: bool = True,
**kwargs):
"""A classification model based on SimCLR framework.
Args:
backbone: a backbone network.
projection_head: a projection head network.
supervised_head: a head network for supervised learning, e.g.
classification head.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
mode: `str` indicates mode of training to be executed.
backbone_trainable: `bool` whether the backbone is trainable or not.
**kwargs: keyword arguments to be passed.
"""
super(SimCLRModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'projection_head': projection_head,
'supervised_head': supervised_head,
'input_specs': input_specs,
'mode': mode,
'backbone_trainable': backbone_trainable,
}
self._input_specs = input_specs
self._backbone = backbone
self._projection_head = projection_head
self._supervised_head = supervised_head
self._mode = mode
self._backbone_trainable = backbone_trainable
# Set whether the backbone is trainable
self._backbone.trainable = backbone_trainable
def call(self, inputs, training=None, **kwargs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
model_outputs = {}
if training and self._mode == PRETRAIN:
num_transforms = 2
# Split channels, and optionally apply extra batched augmentation.
# (bsz, h, w, c*num_transforms) -> [(bsz, h, w, c), ....]
features_list = tf.split(
inputs, num_or_size_splits=num_transforms, axis=-1)
# (num_transforms * bsz, h, w, c)
features = tf.concat(features_list, 0)
else:
num_transforms = 1
features = inputs
# Base network forward pass.
endpoints = self._backbone(
features, training=training and self._backbone_trainable)
features = endpoints[max(endpoints.keys())]
projection_inputs = layers.GlobalAveragePooling2D()(features)
# Add heads.
projection_outputs, supervised_inputs = self._projection_head(
projection_inputs, training)
if self._supervised_head is not None:
if self._mode == PRETRAIN:
logging.info('Ignoring gradient from supervised outputs !')
# When performing pretraining and supervised_head together, we do not
# want information from supervised evaluation flowing back into
# pretraining network. So we put a stop_gradient.
supervised_outputs = self._supervised_head(
tf.stop_gradient(supervised_inputs), training)
else:
supervised_outputs = self._supervised_head(supervised_inputs, training)
else:
supervised_outputs = None
model_outputs.update({
PROJECTION_OUTPUT_KEY: projection_outputs,
SUPERVISED_OUTPUT_KEY: supervised_outputs
})
return model_outputs
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
if self._supervised_head is not None:
items = dict(
backbone=self.backbone,
projection_head=self.projection_head,
supervised_head=self.supervised_head)
else:
items = dict(backbone=self.backbone, projection_head=self.projection_head)
return items
@property
def backbone(self):
return self._backbone
@property
def projection_head(self):
return self._projection_head
@property
def supervised_head(self):
return self._supervised_head
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
@property
def backbone_trainable(self):
return self._backbone_trainable
@backbone_trainable.setter
def backbone_trainable(self, value):
self._backbone_trainable = value
self._backbone.trainable = value
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,335 | 31.536585 | 125 | py |
models | models-master/official/projects/simclr/modeling/simclr_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for SimCLR model."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.simclr.heads import simclr_head
from official.projects.simclr.modeling import simclr_model
from official.vision.modeling import backbones
class SimCLRModelTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 3, 0),
(128, 3, 1),
(128, 1, 0),
(128, 1, 1),
)
def test_model_creation(self, project_dim, num_proj_layers, ft_proj_idx):
input_size = 224
inputs = np.random.rand(2, input_size, input_size, 3)
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3])
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.ResNet(model_id=50, activation='relu',
input_specs=input_specs)
projection_head = simclr_head.ProjectionHead(
proj_output_dim=project_dim,
num_proj_layers=num_proj_layers,
ft_proj_idx=ft_proj_idx
)
num_classes = 10
supervised_head = simclr_head.ClassificationHead(
num_classes=10
)
model = simclr_model.SimCLRModel(
input_specs=input_specs,
backbone=backbone,
projection_head=projection_head,
supervised_head=supervised_head,
mode=simclr_model.PRETRAIN
)
outputs = model(inputs)
projection_outputs = outputs[simclr_model.PROJECTION_OUTPUT_KEY]
supervised_outputs = outputs[simclr_model.SUPERVISED_OUTPUT_KEY]
self.assertAllEqual(projection_outputs.shape.as_list(),
[2, project_dim])
self.assertAllEqual([2, num_classes],
supervised_outputs.numpy().shape)
if __name__ == '__main__':
tf.test.main()
| 2,398 | 32.319444 | 75 | py |
models | models-master/official/projects/simclr/modeling/layers/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from official.projects.simclr.modeling.layers import nn_blocks
class DenseBNTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(64, True, True),
(64, True, False),
(64, False, True),
)
def test_pass_through(self, output_dim, use_bias, use_normalization):
test_layer = nn_blocks.DenseBN(
output_dim=output_dim,
use_bias=use_bias,
use_normalization=use_normalization
)
x = tf.keras.Input(shape=(64,))
out_x = test_layer(x)
self.assertAllEqual(out_x.shape.as_list(), [None, output_dim])
# kernel of the dense layer
train_var_len = 1
if use_normalization:
if use_bias:
# batch norm introduce two trainable variables
train_var_len += 2
else:
# center is set to False if not use bias
train_var_len += 1
else:
if use_bias:
# bias of dense layer
train_var_len += 1
self.assertLen(test_layer.trainable_variables, train_var_len)
if __name__ == '__main__':
tf.test.main()
| 1,723 | 28.220339 | 74 | py |
models | models-master/official/projects/simclr/modeling/layers/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for simclr neural networks."""
from typing import Text, Optional
import tensorflow as tf
from official.modeling import tf_utils
regularizers = tf.keras.regularizers
class DenseBN(tf.keras.layers.Layer):
"""Modified Dense layer to help build simclr system.
The layer is a standards combination of Dense, BatchNorm and Activation.
"""
def __init__(
self,
output_dim: int,
use_bias: bool = True,
use_normalization: bool = False,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
activation: Optional[Text] = 'relu',
kernel_initializer: Text = 'VarianceScaling',
kernel_regularizer: Optional[regularizers.Regularizer] = None,
bias_regularizer: Optional[regularizers.Regularizer] = None,
name='linear_layer',
**kwargs):
"""Customized Dense layer.
Args:
output_dim: `int` size of output dimension.
use_bias: if True, use biase in the dense layer.
use_normalization: if True, use batch normalization.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization momentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
activation: `str` name of the activation function.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
name: `str`, name of the layer.
**kwargs: keyword arguments to be passed.
"""
# Note: use_bias is ignored for the dense layer when use_bn=True.
# However, it is still used for batch norm.
super(DenseBN, self).__init__(**kwargs)
self._output_dim = output_dim
self._use_bias = use_bias
self._use_normalization = use_normalization
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._name = name
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
if activation:
self._activation_fn = tf_utils.get_activation(activation)
else:
self._activation_fn = None
def get_config(self):
config = {
'output_dim': self._output_dim,
'use_bias': self._use_bias,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'use_normalization': self._use_normalization,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
base_config = super(DenseBN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._dense0 = tf.keras.layers.Dense(
self._output_dim,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_bias=self._use_bias and not self._use_normalization)
if self._use_normalization:
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
center=self._use_bias,
scale=True)
super(DenseBN, self).build(input_shape)
def call(self, inputs, training=None):
assert inputs.shape.ndims == 2, inputs.shape
x = self._dense0(inputs)
if self._use_normalization:
x = self._norm0(x)
if self._activation:
x = self._activation_fn(x)
return x
| 4,823 | 35 | 78 | py |
models | models-master/official/projects/simclr/tasks/simclr.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image SimCLR task definition.
SimCLR training two different modes:
- pretrain
- fine-tuning
For the above two different modes, the following components are different in
the task definition:
- training data format
- training loss
- projection_head and/or supervised_head
"""
from typing import Dict, Optional
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions
from official.core import input_reader
from official.core import task_factory
from official.modeling import optimization
from official.modeling import performance
from official.modeling import tf_utils
from official.projects.simclr.configs import simclr as exp_cfg
from official.projects.simclr.dataloaders import simclr_input
from official.projects.simclr.heads import simclr_head
from official.projects.simclr.losses import contrastive_losses
from official.projects.simclr.modeling import simclr_model
from official.vision.modeling import backbones
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
@task_factory.register_task_cls(exp_cfg.SimCLRPretrainTask)
class SimCLRPretrainTask(base_task.Task):
"""A task for image classification."""
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
if (optimizer_config.optimizer.type == 'lars' and
self.task_config.loss.l2_weight_decay > 0.0):
raise ValueError('The l2_weight_decay cannot be used together with lars '
'optimizer. Please set it to 0.')
opt_factory = optimization.OptimizerFactory(optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
# Configuring optimizer when loss_scale is set in runtime config. This helps
# avoiding overflow/underflow for float16 computations.
if runtime_config and runtime_config.loss_scale:
optimizer = performance.configure_optimizer(
optimizer,
use_float16=runtime_config.mixed_precision_dtype == 'float16',
loss_scale=runtime_config.loss_scale)
return optimizer
def build_model(self):
model_config = self.task_config.model
input_specs = tf.keras.layers.InputSpec(shape=[None] +
model_config.input_size)
l2_weight_decay = self.task_config.loss.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
# Build backbone
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=model_config.norm_activation,
l2_regularizer=l2_regularizer)
# Build projection head
norm_activation_config = model_config.norm_activation
projection_head_config = model_config.projection_head
projection_head = simclr_head.ProjectionHead(
proj_output_dim=projection_head_config.proj_output_dim,
num_proj_layers=projection_head_config.num_proj_layers,
ft_proj_idx=projection_head_config.ft_proj_idx,
kernel_regularizer=l2_regularizer,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
# Build supervised head
supervised_head_config = model_config.supervised_head
if supervised_head_config:
if supervised_head_config.zero_init:
s_kernel_initializer = 'zeros'
else:
s_kernel_initializer = 'random_uniform'
supervised_head = simclr_head.ClassificationHead(
num_classes=supervised_head_config.num_classes,
kernel_initializer=s_kernel_initializer,
kernel_regularizer=l2_regularizer)
else:
supervised_head = None
model = simclr_model.SimCLRModel(
input_specs=input_specs,
backbone=backbone,
projection_head=projection_head,
supervised_head=supervised_head,
mode=model_config.mode,
backbone_trainable=model_config.backbone_trainable)
logging.info(model.get_config())
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self, params, input_context=None):
input_size = self.task_config.model.input_size
if params.tfds_name:
decoder = simclr_input.TFDSDecoder(params.decoder.decode_label)
else:
decoder = simclr_input.Decoder(params.decoder.decode_label)
parser = simclr_input.Parser(
output_size=input_size[:2],
aug_rand_crop=params.parser.aug_rand_crop,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_color_distort=params.parser.aug_color_distort,
aug_color_jitter_strength=params.parser.aug_color_jitter_strength,
aug_color_jitter_impl=params.parser.aug_color_jitter_impl,
aug_rand_blur=params.parser.aug_rand_blur,
parse_label=params.parser.parse_label,
test_crop=params.parser.test_crop,
mode=params.parser.mode,
dtype=params.dtype)
reader = input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels,
model_outputs,
aux_losses=None) -> Dict[str, tf.Tensor]:
# Compute contrastive relative loss
con_losses_obj = contrastive_losses.ContrastiveLoss(
projection_norm=self.task_config.loss.projection_norm,
temperature=self.task_config.loss.temperature)
# The projection outputs from model has the size of
# (2 * bsz, project_dim)
projection_outputs = model_outputs[simclr_model.PROJECTION_OUTPUT_KEY]
projection1, projection2 = tf.split(projection_outputs, 2, 0)
contrast_loss, (contrast_logits, contrast_labels) = con_losses_obj(
projection1=projection1, projection2=projection2)
contrast_accuracy = tf.equal(
tf.argmax(contrast_labels, axis=1), tf.argmax(contrast_logits, axis=1))
contrast_accuracy = tf.reduce_mean(tf.cast(contrast_accuracy, tf.float32))
contrast_prob = tf.nn.softmax(contrast_logits)
contrast_entropy = -tf.reduce_mean(
tf.reduce_sum(contrast_prob * tf.math.log(contrast_prob + 1e-8), -1))
model_loss = contrast_loss
losses = {
'contrast_loss': contrast_loss,
'contrast_accuracy': contrast_accuracy,
'contrast_entropy': contrast_entropy
}
if self.task_config.model.supervised_head is not None:
outputs = model_outputs[simclr_model.SUPERVISED_OUTPUT_KEY]
labels = tf.concat([labels, labels], 0)
if self.task_config.evaluation.one_hot:
sup_loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels,
outputs)
else:
sup_loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels,
outputs)
sup_loss = tf.reduce_mean(sup_loss)
label_acc = tf.equal(
tf.argmax(labels, axis=1), tf.argmax(outputs, axis=1))
label_acc = tf.reduce_mean(tf.cast(label_acc, tf.float32))
model_loss = contrast_loss + sup_loss
losses.update({
'accuracy': label_acc,
'supervised_loss': sup_loss,
})
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
losses['total_loss'] = total_loss
return losses
def build_metrics(self, training=True):
if training:
metrics = []
metric_names = [
'total_loss', 'contrast_loss', 'contrast_accuracy', 'contrast_entropy'
]
if self.task_config.model.supervised_head:
metric_names.extend(['supervised_loss', 'accuracy'])
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
else:
k = self.task_config.evaluation.top_k
if self.task_config.evaluation.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
features, labels = inputs
# To do a sanity check that we absolutely use no labels when pretraining, we
# can set the labels here to zero.
if self.task_config.train_data.input_set_label_to_zero:
labels *= 0
if (self.task_config.model.supervised_head is not None and
self.task_config.evaluation.one_hot):
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
logging.info('Trainable variables:')
for var in tvars:
logging.info(var.name)
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
for m in metrics:
m.update_state(losses[m.name])
logs.update({m.name: m.result()})
return logs
def validation_step(self, inputs, model, metrics=None):
if self.task_config.model.supervised_head is None:
raise ValueError(
'Skipping eval during pretraining without supervised head.')
features, labels = inputs
if self.task_config.evaluation.one_hot:
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
outputs = model(
features, training=False)[simclr_model.SUPERVISED_OUTPUT_KEY]
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
logs = {self.loss: 0}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
@task_factory.register_task_cls(exp_cfg.SimCLRFinetuneTask)
class SimCLRFinetuneTask(base_task.Task):
"""A task for image classification."""
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
if (optimizer_config.optimizer.type == 'lars' and
self.task_config.loss.l2_weight_decay > 0.0):
raise ValueError('The l2_weight_decay cannot be used together with lars '
'optimizer. Please set it to 0.')
opt_factory = optimization.OptimizerFactory(optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
# Configuring optimizer when loss_scale is set in runtime config. This helps
# avoiding overflow/underflow for float16 computations.
if runtime_config and runtime_config.loss_scale:
optimizer = performance.configure_optimizer(
optimizer,
use_float16=runtime_config.mixed_precision_dtype == 'float16',
loss_scale=runtime_config.loss_scale)
return optimizer
def build_model(self):
model_config = self.task_config.model
input_specs = tf.keras.layers.InputSpec(shape=[None] +
model_config.input_size)
l2_weight_decay = self.task_config.loss.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=model_config.norm_activation,
l2_regularizer=l2_regularizer)
norm_activation_config = model_config.norm_activation
projection_head_config = model_config.projection_head
projection_head = simclr_head.ProjectionHead(
proj_output_dim=projection_head_config.proj_output_dim,
num_proj_layers=projection_head_config.num_proj_layers,
ft_proj_idx=projection_head_config.ft_proj_idx,
kernel_regularizer=l2_regularizer,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
supervised_head_config = model_config.supervised_head
if supervised_head_config.zero_init:
s_kernel_initializer = 'zeros'
else:
s_kernel_initializer = 'random_uniform'
supervised_head = simclr_head.ClassificationHead(
num_classes=supervised_head_config.num_classes,
kernel_initializer=s_kernel_initializer,
kernel_regularizer=l2_regularizer)
model = simclr_model.SimCLRModel(
input_specs=input_specs,
backbone=backbone,
projection_head=projection_head,
supervised_head=supervised_head,
mode=model_config.mode,
backbone_trainable=model_config.backbone_trainable)
logging.info(model.get_config())
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone_projection':
ckpt = tf.train.Checkpoint(
backbone=model.backbone, projection_head=model.projection_head)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
# If the checkpoint is from pretraining, reset the following parameters
model.backbone_trainable = self.task_config.model.backbone_trainable
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self, params, input_context=None):
input_size = self.task_config.model.input_size
if params.tfds_name:
decoder = simclr_input.TFDSDecoder(params.decoder.decode_label)
else:
decoder = simclr_input.Decoder(params.decoder.decode_label)
parser = simclr_input.Parser(
output_size=input_size[:2],
parse_label=params.parser.parse_label,
test_crop=params.parser.test_crop,
mode=params.parser.mode,
dtype=params.dtype)
reader = input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, labels, model_outputs, aux_losses=None):
"""Sparse categorical cross entropy loss.
Args:
labels: labels.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
losses_config = self.task_config.loss
if losses_config.one_hot:
total_loss = tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=True,
label_smoothing=losses_config.label_smoothing)
else:
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=True)
total_loss = tf_utils.safe_mean(total_loss)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self, training=True):
"""Gets streaming metrics for training/validation."""
k = self.task_config.evaluation.top_k
if self.task_config.evaluation.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))
]
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
if self.task_config.loss.one_hot:
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(
features, training=True)[simclr_model.SUPERVISED_OUTPUT_KEY]
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
logging.info('Trainable variables:')
for var in tvars:
logging.info(var.name)
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
if self.task_config.loss.one_hot:
num_classes = self.task_config.model.supervised_head.num_classes
labels = tf.one_hot(labels, num_classes)
outputs = self.inference_step(features,
model)[simclr_model.SUPERVISED_OUTPUT_KEY]
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
| 24,422 | 37.400943 | 80 | py |
models | models-master/official/projects/simclr/heads/simclr_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SimCLR prediction heads."""
from typing import Optional, Text
import tensorflow as tf
from official.projects.simclr.modeling.layers import nn_blocks
regularizers = tf.keras.regularizers
layers = tf.keras.layers
class ProjectionHead(tf.keras.layers.Layer):
"""Projection head."""
def __init__(
self,
num_proj_layers: int = 3,
proj_output_dim: Optional[int] = None,
ft_proj_idx: int = 0,
kernel_initializer: Text = 'VarianceScaling',
kernel_regularizer: Optional[regularizers.Regularizer] = None,
bias_regularizer: Optional[regularizers.Regularizer] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
**kwargs):
"""The projection head used during pretraining of SimCLR.
Args:
num_proj_layers: `int` number of Dense layers used.
proj_output_dim: `int` output dimension of projection head, i.e., output
dimension of the final layer.
ft_proj_idx: `int` index of layer to use during fine-tuning. 0 means no
projection head during fine tuning, -1 means the final layer.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(ProjectionHead, self).__init__(**kwargs)
assert proj_output_dim is not None or num_proj_layers == 0
assert ft_proj_idx <= num_proj_layers, (num_proj_layers, ft_proj_idx)
self._proj_output_dim = proj_output_dim
self._num_proj_layers = num_proj_layers
self._ft_proj_idx = ft_proj_idx
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._layers = []
def get_config(self):
config = {
'proj_output_dim': self._proj_output_dim,
'num_proj_layers': self._num_proj_layers,
'ft_proj_idx': self._ft_proj_idx,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(ProjectionHead, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._layers = []
if self._num_proj_layers > 0:
intermediate_dim = int(input_shape[-1])
for j in range(self._num_proj_layers):
if j != self._num_proj_layers - 1:
# for the middle layers, use bias and relu for the output.
layer = nn_blocks.DenseBN(
output_dim=intermediate_dim,
use_bias=True,
use_normalization=True,
activation='relu',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
name='nl_%d' % j)
else:
# for the final layer, neither bias nor relu is used.
layer = nn_blocks.DenseBN(
output_dim=self._proj_output_dim,
use_bias=False,
use_normalization=True,
activation=None,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
name='nl_%d' % j)
self._layers.append(layer)
super(ProjectionHead, self).build(input_shape)
def call(self, inputs, training=None):
hiddens_list = [tf.identity(inputs, 'proj_head_input')]
if self._num_proj_layers == 0:
proj_head_output = inputs
proj_finetune_output = inputs
else:
for j in range(self._num_proj_layers):
hiddens = self._layers[j](hiddens_list[-1], training)
hiddens_list.append(hiddens)
proj_head_output = tf.identity(
hiddens_list[-1], 'proj_head_output')
proj_finetune_output = tf.identity(
hiddens_list[self._ft_proj_idx], 'proj_finetune_output')
# The first element is the output of the projection head.
# The second element is the input of the finetune head.
return proj_head_output, proj_finetune_output
class ClassificationHead(tf.keras.layers.Layer):
"""Classification Head."""
def __init__(
self,
num_classes: int,
kernel_initializer: Text = 'random_uniform',
kernel_regularizer: Optional[regularizers.Regularizer] = None,
bias_regularizer: Optional[regularizers.Regularizer] = None,
name: Text = 'head_supervised',
**kwargs):
"""The classification head used during pretraining or fine tuning.
Args:
num_classes: `int` size of the output dimension or number of classes
for classification task.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
name: `str`, name of the layer.
**kwargs: keyword arguments to be passed.
"""
super(ClassificationHead, self).__init__(name=name, **kwargs)
self._num_classes = num_classes
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._name = name
def get_config(self):
config = {
'num_classes': self._num_classes,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
base_config = super(ClassificationHead, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._dense0 = layers.Dense(
units=self._num_classes,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=None)
super(ClassificationHead, self).build(input_shape)
def call(self, inputs, training=None):
inputs = self._dense0(inputs)
return inputs
| 7,666 | 37.527638 | 78 | py |
models | models-master/official/projects/simclr/heads/simclr_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.simclr.heads import simclr_head
class ProjectionHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(0, None),
(1, 128),
(2, 128),
)
def test_head_creation(self, num_proj_layers, proj_output_dim):
test_layer = simclr_head.ProjectionHead(
num_proj_layers=num_proj_layers,
proj_output_dim=proj_output_dim)
input_dim = 64
x = tf.keras.Input(shape=(input_dim,))
proj_head_output, proj_finetune_output = test_layer(x)
proj_head_output_dim = input_dim
if num_proj_layers > 0:
proj_head_output_dim = proj_output_dim
self.assertAllEqual(proj_head_output.shape.as_list(),
[None, proj_head_output_dim])
if num_proj_layers > 0:
proj_finetune_output_dim = input_dim
self.assertAllEqual(proj_finetune_output.shape.as_list(),
[None, proj_finetune_output_dim])
@parameterized.parameters(
(0, None, 0),
(1, 128, 0),
(2, 128, 1),
(2, 128, 2),
)
def test_outputs(self, num_proj_layers, proj_output_dim, ft_proj_idx):
test_layer = simclr_head.ProjectionHead(
num_proj_layers=num_proj_layers,
proj_output_dim=proj_output_dim,
ft_proj_idx=ft_proj_idx
)
input_dim = 64
batch_size = 2
inputs = np.random.rand(batch_size, input_dim)
proj_head_output, proj_finetune_output = test_layer(inputs)
if num_proj_layers == 0:
self.assertAllClose(inputs, proj_head_output)
self.assertAllClose(inputs, proj_finetune_output)
else:
self.assertAllEqual(proj_head_output.shape.as_list(),
[batch_size, proj_output_dim])
if ft_proj_idx == 0:
self.assertAllClose(inputs, proj_finetune_output)
elif ft_proj_idx < num_proj_layers:
self.assertAllEqual(proj_finetune_output.shape.as_list(),
[batch_size, input_dim])
else:
self.assertAllEqual(proj_finetune_output.shape.as_list(),
[batch_size, proj_output_dim])
class ClassificationHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
10, 20
)
def test_head_creation(self, num_classes):
test_layer = simclr_head.ClassificationHead(num_classes=num_classes)
input_dim = 64
x = tf.keras.Input(shape=(input_dim,))
out_x = test_layer(x)
self.assertAllEqual(out_x.shape.as_list(),
[None, num_classes])
if __name__ == '__main__':
tf.test.main()
| 3,253 | 30.901961 | 74 | py |
models | models-master/official/projects/teams/teams_task_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for teams_task."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.projects.teams import teams
from official.projects.teams import teams_task
class TeamsPretrainTaskTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters((1, 1), (0, 1), (0, 0), (1, 0))
def test_task(self, num_shared_hidden_layers,
num_task_agnostic_layers):
config = teams_task.TeamsPretrainTaskConfig(
model=teams.TeamsPretrainerConfig(
generator=encoders.BertEncoderConfig(
vocab_size=30522, num_layers=2),
discriminator=encoders.BertEncoderConfig(
vocab_size=30522, num_layers=2),
num_shared_generator_hidden_layers=num_shared_hidden_layers,
num_discriminator_task_agnostic_layers=num_task_agnostic_layers,
),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path="dummy",
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
task = teams_task.TeamsPretrainTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
if __name__ == "__main__":
tf.test.main()
| 2,184 | 37.333333 | 76 | py |
models | models-master/official/projects/teams/teams_pretrainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TEAMS pre trainer network."""
import tensorflow as tf
from official.modeling import activations
from official.nlp.modeling.networks import encoder_scaffold
from official.nlp.modeling.networks import packed_sequence_embedding
from official.projects.teams import teams_pretrainer
class TeamsPretrainerTest(tf.test.TestCase):
# Build a transformer network to use within the TEAMS trainer.
def _get_network(self, vocab_size):
sequence_length = 512
hidden_size = 50
embedding_cfg = {
'vocab_size': vocab_size,
'type_vocab_size': 1,
'hidden_size': hidden_size,
'embedding_width': hidden_size,
'max_seq_length': sequence_length,
'initializer': tf.keras.initializers.TruncatedNormal(stddev=0.02),
'dropout_rate': 0.1,
}
embedding_inst = packed_sequence_embedding.PackedSequenceEmbedding(
**embedding_cfg)
hidden_cfg = {
'num_attention_heads':
2,
'intermediate_size':
3072,
'intermediate_activation':
activations.gelu,
'dropout_rate':
0.1,
'attention_dropout_rate':
0.1,
'kernel_initializer':
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
return encoder_scaffold.EncoderScaffold(
num_hidden_instances=2,
pooled_output_dim=hidden_size,
embedding_cfg=embedding_cfg,
embedding_cls=embedding_inst,
hidden_cfg=hidden_cfg,
dict_outputs=True)
def test_teams_pretrainer(self):
"""Validate that the Keras object can be created."""
vocab_size = 100
test_generator_network = self._get_network(vocab_size)
test_discriminator_network = self._get_network(vocab_size)
# Create a TEAMS trainer with the created network.
candidate_size = 3
teams_trainer_model = teams_pretrainer.TeamsPretrainer(
generator_network=test_generator_network,
discriminator_mws_network=test_discriminator_network,
num_discriminator_task_agnostic_layers=1,
vocab_size=vocab_size,
candidate_size=candidate_size)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
num_token_predictions = 2
sequence_length = 128
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
lm_positions = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
lm_ids = tf.keras.Input(shape=(num_token_predictions,), dtype=tf.int32)
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
'masked_lm_positions': lm_positions,
'masked_lm_ids': lm_ids
}
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = teams_trainer_model(inputs)
lm_outs = outputs['lm_outputs']
disc_rtd_logits = outputs['disc_rtd_logits']
disc_rtd_label = outputs['disc_rtd_label']
disc_mws_logits = outputs['disc_mws_logits']
disc_mws_label = outputs['disc_mws_label']
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
expected_disc_rtd_logits_shape = [None, sequence_length]
expected_disc_rtd_label_shape = [None, sequence_length]
expected_disc_disc_mws_logits_shape = [
None, num_token_predictions, candidate_size
]
expected_disc_disc_mws_label_shape = [None, num_token_predictions]
self.assertAllEqual(expected_lm_shape, lm_outs.shape.as_list())
self.assertAllEqual(expected_disc_rtd_logits_shape,
disc_rtd_logits.shape.as_list())
self.assertAllEqual(expected_disc_rtd_label_shape,
disc_rtd_label.shape.as_list())
self.assertAllEqual(expected_disc_disc_mws_logits_shape,
disc_mws_logits.shape.as_list())
self.assertAllEqual(expected_disc_disc_mws_label_shape,
disc_mws_label.shape.as_list())
def test_teams_trainer_tensor_call(self):
"""Validate that the Keras object can be invoked."""
vocab_size = 100
test_generator_network = self._get_network(vocab_size)
test_discriminator_network = self._get_network(vocab_size)
# Create a TEAMS trainer with the created network.
teams_trainer_model = teams_pretrainer.TeamsPretrainer(
generator_network=test_generator_network,
discriminator_mws_network=test_discriminator_network,
num_discriminator_task_agnostic_layers=2,
vocab_size=vocab_size,
candidate_size=2)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1, 1], [1, 0, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32)
lm_positions = tf.constant([[0, 1], [0, 2]], dtype=tf.int32)
lm_ids = tf.constant([[10, 20], [20, 30]], dtype=tf.int32)
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
'masked_lm_positions': lm_positions,
'masked_lm_ids': lm_ids
}
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = teams_trainer_model(inputs)
def test_serialize_deserialize(self):
"""Validate that the TEAMS trainer can be serialized and deserialized."""
vocab_size = 100
test_generator_network = self._get_network(vocab_size)
test_discriminator_network = self._get_network(vocab_size)
# Create a TEAMS trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
teams_trainer_model = teams_pretrainer.TeamsPretrainer(
generator_network=test_generator_network,
discriminator_mws_network=test_discriminator_network,
num_discriminator_task_agnostic_layers=2,
vocab_size=vocab_size,
candidate_size=2)
# Create another TEAMS trainer via serialization and deserialization.
config = teams_trainer_model.get_config()
new_teams_trainer_model = teams_pretrainer.TeamsPretrainer.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_teams_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(teams_trainer_model.get_config(),
new_teams_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 7,436 | 39.2 | 80 | py |
models | models-master/official/projects/teams/teams_pretrainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for TEAMS models."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import models
_LOGIT_PENALTY_MULTIPLIER = 10000
class ReplacedTokenDetectionHead(tf.keras.layers.Layer):
"""Replaced token detection discriminator head.
Arguments:
encoder_cfg: Encoder config, used to create hidden layers and head.
num_task_agnostic_layers: Number of task agnostic layers in the
discriminator.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
encoder_cfg,
num_task_agnostic_layers,
output='logits',
name='rtd',
**kwargs):
super(ReplacedTokenDetectionHead, self).__init__(name=name, **kwargs)
self.num_task_agnostic_layers = num_task_agnostic_layers
self.hidden_size = encoder_cfg['embedding_cfg']['hidden_size']
self.num_hidden_instances = encoder_cfg['num_hidden_instances']
self.hidden_cfg = encoder_cfg['hidden_cfg']
self.activation = self.hidden_cfg['intermediate_activation']
self.initializer = self.hidden_cfg['kernel_initializer']
self.hidden_layers = []
for i in range(self.num_task_agnostic_layers, self.num_hidden_instances):
self.hidden_layers.append(
layers.Transformer(
num_attention_heads=self.hidden_cfg['num_attention_heads'],
intermediate_size=self.hidden_cfg['intermediate_size'],
intermediate_activation=self.activation,
dropout_rate=self.hidden_cfg['dropout_rate'],
attention_dropout_rate=self.hidden_cfg['attention_dropout_rate'],
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='transformer/layer_%d_rtd' % i))
self.dense = tf.keras.layers.Dense(
self.hidden_size,
activation=self.activation,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='transform/rtd_dense')
self.rtd_head = tf.keras.layers.Dense(
units=1,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='transform/rtd_head')
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
def call(self, sequence_data, input_mask):
"""Compute inner-products of hidden vectors with sampled element embeddings.
Args:
sequence_data: A [batch_size, seq_length, num_hidden] tensor.
input_mask: A [batch_size, seq_length] binary mask to separate the input
from the padding.
Returns:
A [batch_size, seq_length] tensor.
"""
attention_mask = layers.SelfAttentionMask()([sequence_data, input_mask])
data = sequence_data
for hidden_layer in self.hidden_layers:
data = hidden_layer([sequence_data, attention_mask])
rtd_logits = self.rtd_head(self.dense(data))
return tf.squeeze(rtd_logits, axis=-1)
class MultiWordSelectionHead(tf.keras.layers.Layer):
"""Multi-word selection discriminator head.
Arguments:
embedding_table: The embedding table.
activation: The activation, if any, for the dense layer.
initializer: The intializer for the dense layer. Defaults to a Glorot
uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
embedding_table,
activation=None,
initializer='glorot_uniform',
output='logits',
name='mws',
**kwargs):
super(MultiWordSelectionHead, self).__init__(name=name, **kwargs)
self.embedding_table = embedding_table
self.activation = activation
self.initializer = tf.keras.initializers.get(initializer)
self._vocab_size, self.embed_size = self.embedding_table.shape
self.dense = tf.keras.layers.Dense(
self.embed_size,
activation=self.activation,
kernel_initializer=self.initializer,
name='transform/mws_dense')
self.layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name='transform/mws_layernorm')
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
def call(self, sequence_data, masked_positions, candidate_sets):
"""Compute inner-products of hidden vectors with sampled element embeddings.
Args:
sequence_data: A [batch_size, seq_length, num_hidden] tensor.
masked_positions: A [batch_size, num_prediction] tensor.
candidate_sets: A [batch_size, num_prediction, k] tensor.
Returns:
A [batch_size, num_prediction, k] tensor.
"""
# Gets shapes for later usage
candidate_set_shape = tf_utils.get_shape_list(candidate_sets)
num_prediction = candidate_set_shape[1]
# Gathers hidden vectors -> (batch_size, num_prediction, 1, embed_size)
masked_lm_input = self._gather_indexes(sequence_data, masked_positions)
lm_data = self.dense(masked_lm_input)
lm_data = self.layer_norm(lm_data)
lm_data = tf.expand_dims(
tf.reshape(lm_data, [-1, num_prediction, self.embed_size]), 2)
# Gathers embeddings -> (batch_size, num_prediction, embed_size, k)
flat_candidate_sets = tf.reshape(candidate_sets, [-1])
candidate_embeddings = tf.gather(self.embedding_table, flat_candidate_sets)
candidate_embeddings = tf.reshape(
candidate_embeddings,
tf.concat([tf.shape(candidate_sets), [self.embed_size]], axis=0)
)
candidate_embeddings.set_shape(
candidate_sets.shape.as_list() + [self.embed_size])
candidate_embeddings = tf.transpose(candidate_embeddings, [0, 1, 3, 2])
# matrix multiplication + squeeze -> (batch_size, num_prediction, k)
logits = tf.matmul(lm_data, candidate_embeddings)
logits = tf.squeeze(logits, 2)
if self._output_type == 'logits':
return logits
return tf.nn.log_softmax(logits)
def _gather_indexes(self, sequence_tensor, positions):
"""Gathers the vectors at the specific positions.
Args:
sequence_tensor: Sequence output of shape
(`batch_size`, `seq_length`, `num_hidden`) where `num_hidden` is
number of hidden units.
positions: Positions ids of tokens in batched sequences.
Returns:
Sequence tensor of shape (batch_size * num_predictions,
num_hidden).
"""
sequence_shape = tf_utils.get_shape_list(
sequence_tensor, name='sequence_output_tensor')
batch_size, seq_length, width = sequence_shape
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
@tf.keras.utils.register_keras_serializable(package='Text')
class TeamsPretrainer(tf.keras.Model):
"""TEAMS network training model.
This is an implementation of the network structure described in "Training
ELECTRA Augmented with Multi-word Selection"
(https://arxiv.org/abs/2106.00139).
The TeamsPretrainer allows a user to pass in two transformer encoders, one
for generator, the other for discriminator (multi-word selection). The
pretrainer then instantiates the masked language model (at generator side) and
classification networks (including both multi-word selection head and replaced
token detection head) that are used to create the training objectives.
*Note* that the model is constructed by Keras Subclass API, where layers are
defined inside `__init__` and `call()` implements the computation.
Args:
generator_network: A transformer encoder for generator, this network should
output a sequence output.
discriminator_mws_network: A transformer encoder for multi-word selection
discriminator, this network should output a sequence output.
num_discriminator_task_agnostic_layers: Number of layers shared between
multi-word selection and random token detection discriminators.
vocab_size: Size of generator output vocabulary
candidate_size: Candidate size for multi-word selection task,
including the correct word.
mlm_activation: The activation (if any) to use in the masked LM and
classification networks. If None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM and
classification networks. Defaults to a Glorot uniform initializer.
output_type: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
generator_network,
discriminator_mws_network,
num_discriminator_task_agnostic_layers,
vocab_size,
candidate_size=5,
mlm_activation=None,
mlm_initializer='glorot_uniform',
output_type='logits',
**kwargs):
super().__init__()
self._config = {
'generator_network':
generator_network,
'discriminator_mws_network':
discriminator_mws_network,
'num_discriminator_task_agnostic_layers':
num_discriminator_task_agnostic_layers,
'vocab_size':
vocab_size,
'candidate_size':
candidate_size,
'mlm_activation':
mlm_activation,
'mlm_initializer':
mlm_initializer,
'output_type':
output_type,
}
for k, v in kwargs.items():
self._config[k] = v
self.generator_network = generator_network
self.discriminator_mws_network = discriminator_mws_network
self.vocab_size = vocab_size
self.candidate_size = candidate_size
self.mlm_activation = mlm_activation
self.mlm_initializer = mlm_initializer
self.output_type = output_type
self.masked_lm = layers.MaskedLM(
embedding_table=self.generator_network.embedding_network
.get_embedding_table(),
activation=mlm_activation,
initializer=mlm_initializer,
output=output_type,
name='generator_masked_lm')
discriminator_cfg = self.discriminator_mws_network.get_config()
self.num_task_agnostic_layers = num_discriminator_task_agnostic_layers
self.discriminator_rtd_head = ReplacedTokenDetectionHead(
encoder_cfg=discriminator_cfg,
num_task_agnostic_layers=self.num_task_agnostic_layers,
output=output_type,
name='discriminator_rtd')
hidden_cfg = discriminator_cfg['hidden_cfg']
self.discriminator_mws_head = MultiWordSelectionHead(
embedding_table=self.discriminator_mws_network.embedding_network
.get_embedding_table(),
activation=hidden_cfg['intermediate_activation'],
initializer=hidden_cfg['kernel_initializer'],
output=output_type,
name='discriminator_mws')
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""TEAMS forward pass.
Args:
inputs: A dict of all inputs, same as the standard BERT model.
Returns:
outputs: A dict of pretrainer model outputs, including
(1) lm_outputs: A `[batch_size, num_token_predictions, vocab_size]`
tensor indicating logits on masked positions.
(2) disc_rtd_logits: A `[batch_size, sequence_length]` tensor indicating
logits for discriminator replaced token detection task.
(3) disc_rtd_label: A `[batch_size, sequence_length]` tensor indicating
target labels for discriminator replaced token detection task.
(4) disc_mws_logits: A `[batch_size, num_token_predictions,
candidate_size]` tensor indicating logits for discriminator multi-word
selection task.
(5) disc_mws_labels: A `[batch_size, num_token_predictions]` tensor
indicating target labels for discriminator multi-word selection task.
"""
input_word_ids = inputs['input_word_ids']
input_mask = inputs['input_mask']
input_type_ids = inputs['input_type_ids']
masked_lm_positions = inputs['masked_lm_positions']
# Runs generator.
sequence_output = self.generator_network(
[input_word_ids, input_mask, input_type_ids])['sequence_output']
lm_outputs = self.masked_lm(sequence_output, masked_lm_positions)
# Samples tokens from generator.
fake_data = self._get_fake_data(inputs, lm_outputs)
# Runs discriminator.
disc_input = fake_data['inputs']
disc_rtd_label = fake_data['is_fake_tokens']
disc_mws_candidates = fake_data['candidate_set']
mws_sequence_outputs = self.discriminator_mws_network([
disc_input['input_word_ids'], disc_input['input_mask'],
disc_input['input_type_ids']
])['encoder_outputs']
# Applies replaced token detection with input selected based on
# self.num_discriminator_task_agnostic_layers
disc_rtd_logits = self.discriminator_rtd_head(
mws_sequence_outputs[self.num_task_agnostic_layers - 1], input_mask)
# Applies multi-word selection.
disc_mws_logits = self.discriminator_mws_head(mws_sequence_outputs[-1],
masked_lm_positions,
disc_mws_candidates)
disc_mws_label = tf.zeros_like(masked_lm_positions, dtype=tf.int32)
outputs = {
'lm_outputs': lm_outputs,
'disc_rtd_logits': disc_rtd_logits,
'disc_rtd_label': disc_rtd_label,
'disc_mws_logits': disc_mws_logits,
'disc_mws_label': disc_mws_label,
}
return outputs
def _get_fake_data(self, inputs, mlm_logits):
"""Generate corrupted data for discriminator.
Note it is poosible for sampled token to be the same as the correct one.
Args:
inputs: A dict of all inputs, same as the input of `call()` function
mlm_logits: The generator's output logits
Returns:
A dict of generated fake data
"""
inputs = models.electra_pretrainer.unmask(inputs, duplicate=True)
# Samples replaced token.
sampled_tokens = tf.stop_gradient(
models.electra_pretrainer.sample_from_softmax(
mlm_logits, disallow=None))
sampled_tokids = tf.argmax(sampled_tokens, axis=-1, output_type=tf.int32)
# Prepares input and label for replaced token detection task.
updated_input_ids, masked = models.electra_pretrainer.scatter_update(
inputs['input_word_ids'], sampled_tokids, inputs['masked_lm_positions'])
rtd_labels = masked * (1 - tf.cast(
tf.equal(updated_input_ids, inputs['input_word_ids']), tf.int32))
updated_inputs = models.electra_pretrainer.get_updated_inputs(
inputs, duplicate=True, input_word_ids=updated_input_ids)
# Samples (candidate_size-1) negatives and concat with true tokens
disallow = tf.one_hot(
inputs['masked_lm_ids'], depth=self.vocab_size, dtype=tf.float32)
sampled_candidates = tf.stop_gradient(
sample_k_from_softmax(mlm_logits, k=self.candidate_size-1,
disallow=disallow))
true_token_id = tf.expand_dims(inputs['masked_lm_ids'], -1)
candidate_set = tf.concat([true_token_id, sampled_candidates], -1)
return {
'inputs': updated_inputs,
'is_fake_tokens': rtd_labels,
'sampled_tokens': sampled_tokens,
'candidate_set': candidate_set
}
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.discriminator_mws_network)
return items
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def sample_k_from_softmax(logits, k, disallow=None, use_topk=False):
"""Implement softmax sampling using gumbel softmax trick to select k items.
Args:
logits: A [batch_size, num_token_predictions, vocab_size] tensor indicating
the generator output logits for each masked position.
k: Number of samples
disallow: If `None`, we directly sample tokens from the logits. Otherwise,
this is a tensor of size [batch_size, num_token_predictions, vocab_size]
indicating the true word id in each masked position.
use_topk: Whether to use tf.nn.top_k or using iterative approach where the
latter is empirically faster.
Returns:
sampled_tokens: A [batch_size, num_token_predictions, k] tensor indicating
the sampled word id in each masked position.
"""
if use_topk:
if disallow is not None:
logits -= _LOGIT_PENALTY_MULTIPLIER * disallow
uniform_noise = tf.random.uniform(
tf_utils.get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = -tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9)
_, sampled_tokens = tf.nn.top_k(logits + gumbel_noise, k=k, sorted=False)
else:
sampled_tokens_list = []
vocab_size = tf_utils.get_shape_list(logits)[-1]
if disallow is not None:
logits -= _LOGIT_PENALTY_MULTIPLIER * disallow
uniform_noise = tf.random.uniform(
tf_utils.get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = -tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9)
logits += gumbel_noise
for _ in range(k):
token_ids = tf.argmax(logits, -1, output_type=tf.int32)
sampled_tokens_list.append(token_ids)
logits -= _LOGIT_PENALTY_MULTIPLIER * tf.one_hot(
token_ids, depth=vocab_size, dtype=tf.float32)
sampled_tokens = tf.stack(sampled_tokens_list, -1)
return sampled_tokens
| 18,683 | 39.267241 | 100 | py |
models | models-master/official/projects/teams/teams.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TEAMS model configurations and instantiation methods."""
import dataclasses
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.modeling import layers
from official.nlp.modeling import networks
@dataclasses.dataclass
class TeamsPretrainerConfig(base_config.Config):
"""Teams pretrainer configuration."""
# Candidate size for multi-word selection task, including the correct word.
candidate_size: int = 5
# Weight for the generator masked language model task.
generator_loss_weight: float = 1.0
# Weight for the replaced token detection task.
discriminator_rtd_loss_weight: float = 5.0
# Weight for the multi-word selection task.
discriminator_mws_loss_weight: float = 2.0
# Whether share embedding network between generator and discriminator.
tie_embeddings: bool = True
# Number of bottom layers shared between generator and discriminator.
# Non-positive value implies no sharing.
num_shared_generator_hidden_layers: int = 3
# Number of bottom layers shared between different discriminator tasks.
num_discriminator_task_agnostic_layers: int = 11
generator: encoders.BertEncoderConfig = dataclasses.field(
default_factory=encoders.BertEncoderConfig
)
discriminator: encoders.BertEncoderConfig = dataclasses.field(
default_factory=encoders.BertEncoderConfig
)
class TeamsEncoderConfig(encoders.BertEncoderConfig):
pass
@gin.configurable
@base_config.bind(TeamsEncoderConfig)
def get_encoder(bert_config: TeamsEncoderConfig,
embedding_network=None,
hidden_layers=None):
"""Gets a 'EncoderScaffold' object.
Args:
bert_config: A 'modeling.BertConfig'.
embedding_network: Embedding network instance.
hidden_layers: List of hidden layer instances.
Returns:
A encoder object.
"""
embedding_cfg = dict(
vocab_size=bert_config.vocab_size,
type_vocab_size=bert_config.type_vocab_size,
hidden_size=bert_config.hidden_size,
embedding_width=bert_config.embedding_size,
max_seq_length=bert_config.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
dropout_rate=bert_config.dropout_rate,
)
hidden_cfg = dict(
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
intermediate_activation=tf_utils.get_activation(
bert_config.hidden_activation),
dropout_rate=bert_config.dropout_rate,
attention_dropout_rate=bert_config.attention_dropout_rate,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
)
if embedding_network is None:
embedding_network = networks.PackedSequenceEmbedding
if hidden_layers is None:
hidden_layers = layers.Transformer
kwargs = dict(
embedding_cfg=embedding_cfg,
embedding_cls=embedding_network,
hidden_cls=hidden_layers,
hidden_cfg=hidden_cfg,
num_hidden_instances=bert_config.num_layers,
pooled_output_dim=bert_config.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
dict_outputs=True)
# Relies on gin configuration to define the Transformer encoder arguments.
return networks.EncoderScaffold(**kwargs)
| 4,096 | 36.245455 | 77 | py |
models | models-master/official/projects/teams/teams_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TEAMS pretraining task (Joint Masked LM, Replaced Token Detection and )."""
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.projects.teams import teams
from official.projects.teams import teams_pretrainer
@dataclasses.dataclass
class TeamsPretrainTaskConfig(cfg.TaskConfig):
"""The model config."""
model: teams.TeamsPretrainerConfig = dataclasses.field(
default_factory=teams.TeamsPretrainerConfig
)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
def _get_generator_hidden_layers(discriminator_network, num_hidden_layers,
num_shared_layers):
if num_shared_layers <= 0:
num_shared_layers = 0
hidden_layers = []
else:
hidden_layers = discriminator_network.hidden_layers[:num_shared_layers]
for _ in range(num_shared_layers, num_hidden_layers):
hidden_layers.append(layers.Transformer)
return hidden_layers
def _build_pretrainer(
config: teams.TeamsPretrainerConfig) -> teams_pretrainer.TeamsPretrainer:
"""Instantiates TeamsPretrainer from the config."""
generator_encoder_cfg = config.generator
discriminator_encoder_cfg = config.discriminator
discriminator_network = teams.get_encoder(discriminator_encoder_cfg)
# Copy discriminator's embeddings to generator for easier model serialization.
hidden_layers = _get_generator_hidden_layers(
discriminator_network, generator_encoder_cfg.num_layers,
config.num_shared_generator_hidden_layers)
if config.tie_embeddings:
generator_network = teams.get_encoder(
generator_encoder_cfg,
embedding_network=discriminator_network.embedding_network,
hidden_layers=hidden_layers)
else:
generator_network = teams.get_encoder(
generator_encoder_cfg, hidden_layers=hidden_layers)
return teams_pretrainer.TeamsPretrainer(
generator_network=generator_network,
discriminator_mws_network=discriminator_network,
num_discriminator_task_agnostic_layers=config
.num_discriminator_task_agnostic_layers,
vocab_size=generator_encoder_cfg.vocab_size,
candidate_size=config.candidate_size,
mlm_activation=tf_utils.get_activation(
generator_encoder_cfg.hidden_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=generator_encoder_cfg.initializer_range))
@task_factory.register_task_cls(TeamsPretrainTaskConfig)
class TeamsPretrainTask(base_task.Task):
"""TEAMS Pretrain Task (Masked LM + RTD + MWS)."""
def build_model(self):
return _build_pretrainer(self.task_config.model)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> tf.Tensor:
with tf.name_scope('TeamsPretrainTask/losses'):
metrics = dict([(metric.name, metric) for metric in metrics])
# Generator MLM loss.
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['lm_outputs'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['masked_lm_loss'].update_state(mlm_loss)
weight = self.task_config.model.generator_loss_weight
total_loss = weight * mlm_loss
# Discriminator RTD loss.
rtd_logits = model_outputs['disc_rtd_logits']
rtd_labels = tf.cast(model_outputs['disc_rtd_label'], tf.float32)
input_mask = tf.cast(labels['input_mask'], tf.float32)
rtd_ind_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=rtd_logits, labels=rtd_labels)
rtd_numerator = tf.reduce_sum(input_mask * rtd_ind_loss)
rtd_denominator = tf.reduce_sum(input_mask)
rtd_loss = tf.math.divide_no_nan(rtd_numerator, rtd_denominator)
metrics['replaced_token_detection_loss'].update_state(rtd_loss)
weight = self.task_config.model.discriminator_rtd_loss_weight
total_loss = total_loss + weight * rtd_loss
# Discriminator MWS loss.
mws_logits = model_outputs['disc_mws_logits']
mws_labels = model_outputs['disc_mws_label']
mws_loss = tf.keras.losses.sparse_categorical_crossentropy(
mws_labels, mws_logits, from_logits=True)
mws_numerator_loss = tf.reduce_sum(mws_loss * lm_label_weights)
mws_denominator_loss = tf.reduce_sum(lm_label_weights)
mws_loss = tf.math.divide_no_nan(mws_numerator_loss, mws_denominator_loss)
metrics['multiword_selection_loss'].update_state(mws_loss)
weight = self.task_config.model.discriminator_mws_loss_weight
total_loss = total_loss + weight * mws_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
metrics['total_loss'].update_state(total_loss)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return pretrain_dataloader.BertPretrainDataLoader(params).load(
input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='masked_lm_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='replaced_token_detection_accuracy'),
tf.keras.metrics.Mean(name='replaced_token_detection_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='multiword_selection_accuracy'),
tf.keras.metrics.Mean(name='multiword_selection_loss'),
tf.keras.metrics.Mean(name='total_loss'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
with tf.name_scope('TeamsPretrainTask/process_metrics'):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(labels['masked_lm_ids'],
model_outputs['lm_outputs'],
labels['masked_lm_weights'])
if 'replaced_token_detection_accuracy' in metrics:
rtd_logits_expanded = tf.expand_dims(model_outputs['disc_rtd_logits'],
-1)
rtd_full_logits = tf.concat(
[-1.0 * rtd_logits_expanded, rtd_logits_expanded], -1)
metrics['replaced_token_detection_accuracy'].update_state(
model_outputs['disc_rtd_label'], rtd_full_logits,
labels['input_mask'])
if 'multiword_selection_accuracy' in metrics:
metrics['multiword_selection_accuracy'].update_state(
model_outputs['disc_mws_label'], model_outputs['disc_mws_logits'],
labels['masked_lm_weights'])
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = model(inputs, training=False)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 10,289 | 39.352941 | 80 | py |
models | models-master/official/projects/roformer/roformer_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer attention layer."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
EinsumDense = tf.keras.layers.EinsumDense
MultiHeadAttention = tf.keras.layers.MultiHeadAttention
def _build_trig_vector(length, key_dim):
"""Builds the trig vector."""
tf_dtype = tf.keras.mixed_precision.global_policy().compute_dtype
position_ids = tf.cast(tf.range(length), dtype=tf_dtype)
position_ids = tf.expand_dims(position_ids, axis=0)
steps = key_dim // 2
# 2 (i - 1) / key_dim = (i - 1) / steps: (-1 achieved with zero-indexing)
wavenumber_exponent = -tf.cast(tf.range(steps), dtype=tf_dtype) / steps
wavenumbers = tf.pow(
tf.constant(10000.0, dtype=tf_dtype), wavenumber_exponent
)
vec = tf.einsum('bl,d->bld', position_ids, wavenumbers)
sin_vec = tf.repeat(tf.sin(vec), repeats=2, axis=-1)
cos_vec = tf.repeat(tf.cos(vec), repeats=2, axis=-1)
sin_vec, cos_vec = tf.expand_dims(sin_vec, 2), tf.expand_dims(cos_vec, 2)
return sin_vec, cos_vec
@tf.keras.utils.register_keras_serializable(package='Text')
class RoformerAttention(tf.keras.layers.MultiHeadAttention):
"""Roformer Attention."""
def __init__(self,
q_max_sequence_length,
kv_max_sequence_length,
output_range=None,
**kwargs):
"""Instantiates a roformer attention layer.
Roformer paper: https://arxiv.org/abs/2104.09864
Args:
q_max_sequence_length: maximum length in input for the query
kv_max_sequence_length: maximum length in input for key and value, can be
different from q_max_sequence_length
output_range: length of the query tensor to consider.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._q_max_sequence_length = q_max_sequence_length
self._kv_max_sequence_length = kv_max_sequence_length
assert self._key_dim % 2 == 0
q_sin_vec, q_cos_vec = _build_trig_vector(self._q_max_sequence_length,
self._key_dim)
k_sin_vec, k_cos_vec = _build_trig_vector(self._kv_max_sequence_length,
self._key_dim)
# pylint:disable=g-long-ternary
self.q_sin_vec, self.q_cos_vec = (q_sin_vec,
q_cos_vec) if output_range is None else (
q_sin_vec[:, 0:output_range, ...],
q_cos_vec[:, 0:output_range, ...])
# pylint:enable=g-long-ternary
self.k_sin_vec, self.k_cos_vec = (k_sin_vec, k_cos_vec)
def roformer_recompute_qkv(self, q, k, v):
q_shape = tf.shape(q)
q_len = q_shape[1]
k_shape = tf.shape(k)
k_len = k_shape[1]
q2 = tf.stack([-q[..., 1::2], q[..., ::2]], axis=4)
q2 = tf.reshape(q2, q_shape)
k2 = tf.stack([-k[..., 1::2], k[..., ::2]], axis=4)
k2 = tf.reshape(k2, k_shape)
ret_q = q * self.q_cos_vec[:, 0:q_len,
...] + q2 * self.q_sin_vec[:, 0:q_len, ...]
ret_w = k * self.k_cos_vec[:, 0:k_len,
...] + k2 * self.k_sin_vec[:, 0:k_len, ...]
return ret_q, ret_w, v
def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None):
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
query = self._query_dense(query)
key = self._key_dense(key)
value = self._value_dense(value)
query, key, value = self.roformer_recompute_qkv(query, key, value)
attention_output, attention_scores = self._compute_attention(
query, key, value, attention_mask, training)
attention_output = self._output_dense(attention_output)
if return_attention_scores:
return attention_output, attention_scores
return attention_output
| 4,638 | 38.313559 | 91 | py |
models | models-master/official/projects/roformer/roformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer model configurations and instantiation methods."""
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.roformer import roformer_encoder
class RoformerEncoderConfig(encoders.BertEncoderConfig):
pass
@base_config.bind(RoformerEncoderConfig)
def get_encoder(encoder_cfg: RoformerEncoderConfig):
"""Gets a 'RoformerEncoder' object.
Args:
encoder_cfg: A 'RoformerEncoderConfig'.
Returns:
A encoder object.
"""
return roformer_encoder.RoformerEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first)
| 2,002 | 36.092593 | 74 | py |
models | models-master/official/projects/roformer/roformer_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.roformer import roformer_encoder
class RoformerEncoderTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(RoformerEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
def test_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_all_encoder_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 21),
("output_range", 1, 1),
)
def test_network_invocation(self, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = roformer_encoder.RoformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = roformer_encoder.RoformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = roformer_encoder.RoformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
inner_dim=512,
inner_activation="relu",
output_dropout=0.05,
attention_dropout=0.22,
initializer="glorot_uniform",
output_range=-1,
embedding_width=16,
embedding_layer=None,
norm_first=False)
network = roformer_encoder.RoformerEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["inner_activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["inner_activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = roformer_encoder.RoformerEncoder.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
# Tests model saving/loading.
model_path = self.get_temp_dir() + "/model"
network.save(model_path)
_ = tf.keras.models.load_model(model_path)
if __name__ == "__main__":
tf.test.main()
| 9,514 | 40.190476 | 80 | py |
models | models-master/official/projects/roformer/roformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer TransformerEncoder block layer."""
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.roformer import roformer_attention
@tf.keras.utils.register_keras_serializable(package="Text")
class RoformerEncoderBlock(tf.keras.layers.Layer):
"""RoformerEncoderBlock layer."""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
q_max_sequence_length=512,
kv_max_sequence_length=512,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
"""Initializes `RoformerEncoderBlock`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
q_max_sequence_length: The maximum sequence length of queries.
kv_max_sequence_length: The maximum sequence length of keys and values.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
if inner_dim % 2 != 0:
raise ValueError(f"The inner_dim of f{self.__class__} must be an even "
f"integer. However, inner_dim is f{inner_dim}")
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
self._q_max_sequence_length = q_max_sequence_length
self._kv_max_sequence_length = kv_max_sequence_length
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = roformer_attention.RoformerAttention(
q_max_sequence_length=self._q_max_sequence_length,
kv_max_sequence_length=self._kv_max_sequence_length,
output_range=self._output_range,
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super(RoformerEncoderBlock, self).build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes":
self._attention_axes,
}
base_config = super(RoformerEncoderBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
| 13,507 | 42.434084 | 80 | py |
models | models-master/official/projects/roformer/roformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Roformer encoder network."""
# pylint: disable=g-classes-have-attributes
import collections
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.projects.roformer import roformer_encoder_block
@tf.keras.utils.register_keras_serializable(package='Text')
class RoformerEncoder(tf.keras.Model):
"""Bi-directional Transformer-based encoder network with Roformer.
Roformer paper: https://arxiv.org/abs/2104.09864
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size,
hidden_size=768, # FIXME: hidden_size per head should be even!
num_layers=12,
num_attention_heads=12,
max_sequence_length=512,
type_vocab_size=16,
inner_dim=3072,
inner_activation=lambda x: tf.keras.activations.gelu(x, approximate=True),
output_dropout=0.1,
attention_dropout=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
output_range=None,
embedding_width=None,
embedding_layer=None,
norm_first=False,
**kwargs):
if 'intermediate_size' in kwargs:
inner_dim = kwargs['intermediate_size']
del kwargs['intermediate_size']
if 'activation' in kwargs:
inner_activation = kwargs['activation']
del kwargs['activation']
if 'dropout_rate' in kwargs:
output_dropout = kwargs['dropout_rate']
del kwargs['dropout_rate']
if 'attention_dropout_rate' in kwargs:
attention_dropout = kwargs['attention_dropout_rate']
del kwargs['attention_dropout_rate']
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
embedding_layer_inst = layers.on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
embedding_layer_inst = embedding_layer
word_embeddings = embedding_layer_inst(word_ids)
# Roformer does not need a position embedding layer
type_embedding_layer = layers.on_device_embedding.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
type_embeddings = type_embedding_layer(type_ids)
# Roformer does not have absolute position embedding
embeddings = tf.keras.layers.Add()([word_embeddings, type_embeddings])
embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
embeddings = embedding_norm_layer(embeddings)
embeddings = (tf.keras.layers.Dropout(rate=output_dropout)(embeddings))
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
embeddings = embedding_projection(embeddings)
else:
embedding_projection = None
transformer_layers = []
data = embeddings
attention_mask = layers.SelfAttentionMask()(data, mask)
encoder_outputs = []
for i in range(num_layers):
if i == num_layers - 1 and output_range is not None:
transformer_output_range = output_range
else:
transformer_output_range = None
layer = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
q_max_sequence_length=max_sequence_length,
kv_max_sequence_length=max_sequence_length,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=transformer_output_range,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='roformer/layer_%d' % i)
transformer_layers.append(layer)
data = layer([data, attention_mask])
encoder_outputs.append(data)
last_encoder_output = encoder_outputs[-1]
# Applying a tf.slice op (through subscript notation) to a Keras tensor
# like this will create a SliceOpLambda layer. This is better than a Lambda
# layer with Python code, because that is fundamentally less portable.
first_token_tensor = last_encoder_output[:, 0, :]
pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
cls_output = pooler_layer(first_token_tensor)
outputs = dict(
sequence_output=encoder_outputs[-1],
pooled_output=cls_output,
encoder_outputs=encoder_outputs,
)
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(RoformerEncoder, self).__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
config_dict = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._pooler_layer = pooler_layer
self._transformer_layers = transformer_layers
self._embedding_norm_layer = embedding_norm_layer
self._embedding_layer = embedding_layer_inst
# self._position_embedding_layer = position_embedding_layer
self._position_embedding_layer = None
self._type_embedding_layer = type_embedding_layer
if embedding_projection is not None:
self._embedding_projection = embedding_projection
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config._asdict())
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
| 11,374 | 40.514599 | 80 | py |
models | models-master/official/projects/roformer/roformer_encoder_block_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.roformer import roformer_encoder_block
@parameterized.named_parameters(
('base', roformer_encoder_block.RoformerEncoderBlock))
class RoformerEncoderBlockTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(RoformerEncoderBlockTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer(input_data)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer(input_data)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_separate_qkv(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
class RoformerArgumentTest(tf.test.TestCase, parameterized.TestCase):
def test_raises(self):
num_attention_heads = 2
with self.assertRaisesRegex(ValueError, 'The inner_dim of.*'):
_ = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=31,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_mask]
output = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_get_config(self):
num_attention_heads = 2
encoder_block = roformer_encoder_block.RoformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
encoder_block_config = encoder_block.get_config()
new_encoder_block = roformer_encoder_block.RoformerEncoderBlock.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
@parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},
{'attention_axes': [2]}, {'attention_axes': [1, 2]})
def test_several_attention_axes(self, attention_axes):
test_layer = roformer_encoder_block.RoformerEncoderBlock(
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
num_attention_heads=10,
attention_axes=attention_axes)
seq_len = 21
dimensions = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(seq_len, dimensions))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 12,909 | 38.96904 | 80 | py |
models | models-master/official/projects/yt8m/modeling/yt8m_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YT8M prediction model definition."""
import functools
from typing import Any, Optional
from absl import logging
import tensorflow as tf
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.modeling import backbones # pylint: disable=unused-import
from official.projects.yt8m.modeling import heads
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
class VideoClassificationModel(tf.keras.Model):
"""A video classification model class builder.
The model consists of a backbone (dbof) and a classification head.
The dbof backbone projects features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
"""
def __init__(
self,
params: yt8m_cfg.VideoClassificationModel,
backbone: Optional[tf.keras.Model] = None,
num_classes: int = 3862,
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, 1152]
),
l2_weight_decay: Optional[float] = None,
**kwargs,
):
"""YT8M video classification model initialization function.
Args:
params: Model configuration parameters.
backbone: Optional backbone model. Will build a backbone if None.
num_classes: `int` number of classes in dataset.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
[batch_size x num_frames x num_features]
l2_weight_decay: An optional `float` of kernel regularizer weight decay.
**kwargs: keyword arguments to be passed.
"""
super().__init__()
self._params = params
self._num_classes = num_classes
self._input_specs = input_specs
self._l2_weight_decay = l2_weight_decay
self._config_dict = {
"params": params,
"input_specs": input_specs,
"num_classes": num_classes,
"l2_weight_decay": l2_weight_decay,
}
if backbone is None:
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay / 2.0)
if l2_weight_decay
else None
)
backbone = factory.build_backbone(
input_specs=input_specs,
backbone_config=params.backbone,
norm_activation_config=params.norm_activation,
l2_regularizer=l2_regularizer,
**kwargs,
)
self.backbone = backbone
self.build_head()
def build_head(self):
logging.info("Build DbofModel with %s.", self._params.head.type)
head_cfg = self._params.head.get()
if self._params.head.type == "moe":
normalizer_params = dict(
synchronized=self._params.norm_activation.use_sync_bn,
momentum=self._params.norm_activation.norm_momentum,
epsilon=self._params.norm_activation.norm_epsilon,
)
aggregation_head = functools.partial(
heads.MoeModel, normalizer_params=normalizer_params
)
elif self._params.head.type == "logistic":
aggregation_head = heads.LogisticModel
else:
logging.warn("Skip build head type: %s", self._params.head.type)
return
l2_regularizer = (
tf.keras.regularizers.l2(self._l2_weight_decay / 2.0)
if self._l2_weight_decay
else None
)
self.head = aggregation_head(
input_specs=layers.InputSpec(
shape=[None, self._params.backbone.get().hidden_size]
),
vocab_size=self._num_classes,
l2_regularizer=l2_regularizer,
**head_cfg.as_dict(),
)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
def call(
self, inputs: tf.Tensor, training: Any = None, mask: Any = None
) -> dict[str, tf.Tensor]:
features = self.backbone(inputs)
outputs = self.head(features)
return outputs
@property
def checkpoint_items(self) -> dict[str, Any]:
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone, head=self.head)
| 5,030 | 33.22449 | 86 | py |
models | models-master/official/projects/yt8m/modeling/yt8m_model_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of util functions for model construction."""
from typing import Any, Dict, Optional, Union
import tensorflow as tf
def frame_pooling(frames, method):
"""Pools over the frames of a video.
Args:
frames: tensor of shape [batch_size, num_frames, feature_size].
method: string indicating pooling method, one of: "average", "max",
"attention", or "none".
Returns:
tensor of shape [batch_size, feature_size] for average, max, or
attention pooling, and shape [batch_size*num_frames, feature_size]
for none pooling.
Raises:
ValueError: if method is other than "average", "max", "attention", or
"none".
"""
if method == "average":
reduced = tf.reduce_mean(frames, 1)
elif method == "max":
reduced = tf.reduce_max(frames, 1)
elif method == "none":
feature_size = frames.shape_as_list()[2]
reduced = tf.reshape(frames, [-1, feature_size])
else:
raise ValueError("Unrecognized pooling method: %s" % method)
return reduced
def context_gate(
input_features,
normalizer_fn=None,
normalizer_params: Optional[Dict[str, Any]] = None,
kernel_initializer: Union[
str, tf.keras.regularizers.Regularizer] = "glorot_uniform",
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_initializer: Union[str, tf.keras.regularizers.Regularizer] = "zeros",
hidden_layer_size: int = 0,
pooling_method: Optional[str] = None,
additive_residual: bool = False):
"""Context Gating.
More details: https://arxiv.org/pdf/1706.06905.pdf.
Args:
input_features: a tensor of at least rank 2.
normalizer_fn: Normalization function to use instead of `biases` (e.g.
tf.contrib.layers.batch_norm). If None, bias is added.
normalizer_params: Normalization function parameters.
kernel_initializer: Weight initializer to use instead of Xavier (e.g.
tf.contrib.layers.variance_scaling_initializer).
kernel_regularizer: Weight regularizer to use instead of None (e.g.,
tf.contrib.layers.l2_regularizer(l2_penalty)).
bias_initializer: Biases initializer to use (default tf.zeros_initializer)
hidden_layer_size: Dimensionality of the context gating hidden layer size,
if any. If None, will apply a fully-connected context gating layer with
shape [input_size x input_size]. If set to an int N, will factorize the
context gating layer into [input_size x N] x [N x input_size] as in the
squeeze-and-excitation block from https://arxiv.org/pdf/1709.01507.pdf.
pooling_method: Whether to perform global pooling of the local features
before applying the context gating layer. This is relevant only if the
input_features tensor has rank > 2, e.g., it's a sequence of frame
features, [batch_size, num_frames, feature_dim], or spatial convolution
features, [batch_size*num_frames, h, w, feature_dim]. If the inputs are a
set of local features and pooling_method is not None, will pool features
across all but the batch_size dimension using the specified pooling
method, and pass the aggregated features as context to the gating layer.
For a list of pooling methods, see the frame_pooling() function.
additive_residual: If true, will use ReLu6-activated (additive) residual
connections instead of Sigmoid-activated (multiplicative) connections when
combining the input_features with the context gating branch.
Returns:
A tensor with the same shape as input_features.
"""
if normalizer_params is None:
normalizer_params = {}
with tf.name_scope("ContextGating"):
num_dimensions = len(input_features.shape.as_list())
feature_size = input_features.shape.as_list()[-1]
if pooling_method:
assert num_dimensions > 2
# Collapse the inner axes of the original features shape into a 3D tensor
original_shape = tf.shape(input_features)
# The last dimension will change after concatenating the context
new_shape = tf.concat(
[original_shape[:-1],
tf.constant([2 * feature_size])], 0)
batch_size = original_shape[0]
reshaped_features = tf.reshape(input_features,
[batch_size, -1, feature_size])
num_features = tf.shape(reshaped_features)[1]
# Pool the feature channels across the inner axes to get global context
context_features = frame_pooling(reshaped_features, pooling_method)
context_features = tf.expand_dims(context_features, 1)
# Replicate the global context features and concat to the local features.
context_features = tf.tile(context_features, [1, num_features, 1])
context_features = tf.concat([reshaped_features, context_features], 2)
context_features = tf.reshape(context_features, shape=new_shape)
else:
context_features = input_features
if hidden_layer_size >= 2:
gates_bottleneck = tf.keras.layers.Dense(
hidden_layer_size,
activation="relu6",
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
)(context_features)
if normalizer_fn:
gates_bottleneck = normalizer_fn(**normalizer_params)(gates_bottleneck)
else:
gates_bottleneck = context_features
activation_fn = (tf.nn.relu6 if additive_residual else tf.nn.sigmoid)
gates = tf.keras.layers.Dense(
feature_size,
activation=activation_fn,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
)(gates_bottleneck)
if normalizer_fn:
gates = normalizer_fn(**normalizer_params)(gates)
if additive_residual:
input_features += tf.cast(gates, input_features.dtype)
else:
input_features *= tf.cast(gates, input_features.dtype)
return input_features
| 6,547 | 41.519481 | 80 | py |
models | models-master/official/projects/yt8m/modeling/yt8m_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yt8m network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.modeling import yt8m_model
class YT8MNetworkTest(parameterized.TestCase, tf.test.TestCase):
"""Class for testing yt8m network."""
# test_yt8m_network_creation arbitrary params
@parameterized.parameters((32, 1152), (24, 1152)) # 1152 = 1024 + 128
def test_yt8m_network_creation(self, num_frames, feature_dims):
"""Test for creation of a YT8M Model.
Args:
num_frames: number of frames.
feature_dims: indicates total dimension size of the features.
"""
input_specs = tf.keras.layers.InputSpec(shape=[None, None, feature_dims])
num_classes = 3862
model = yt8m_model.VideoClassificationModel(
params=yt8m_cfg.YT8MTask().model,
num_classes=num_classes,
input_specs=input_specs,
)
# batch = 2 -> arbitrary value for test.
inputs = np.random.rand(2, num_frames, feature_dims)
predictions = model(inputs)['predictions']
self.assertAllEqual([2, num_classes], predictions.numpy().shape)
def test_serialize_deserialize(self):
model = yt8m_model.VideoClassificationModel(
params=yt8m_cfg.YT8MTask().model
)
config = model.get_config()
new_model = yt8m_model.VideoClassificationModel.from_config(config)
# If the serialization was successful,
# the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,225 | 32.727273 | 77 | py |
models | models-master/official/projects/yt8m/modeling/backbones/dbof.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dbof model definitions."""
import functools
from typing import Optional
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.modeling import yt8m_model_utils as utils
from official.vision.configs import common
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
class Dbof(tf.keras.Model):
"""A YT8M model class builder.
Creates a Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
"""
def __init__(
self,
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, 1152]
),
params: yt8m_cfg.DbofModel = yt8m_cfg.DbofModel(),
norm_activation: common.NormActivation = common.NormActivation(),
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
):
"""YT8M initialization function.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
[batch_size x num_frames x num_features].
params: model configuration parameters.
norm_activation: Model normalization and activation configs.
l2_regularizer: An optional kernel weight regularizer.
**kwargs: keyword arguments to be passed.
"""
self._self_setattr_tracking = False
self._input_specs = input_specs
self._params = params
self._norm_activation = norm_activation
self._act_fn = tf_utils.get_activation(self._norm_activation.activation)
self._norm = functools.partial(
layers.BatchNormalization,
momentum=self._norm_activation.norm_momentum,
epsilon=self._norm_activation.norm_epsilon,
synchronized=self._norm_activation.use_sync_bn,
)
# [batch_size x num_frames x num_features]
feature_size = input_specs.shape[-1]
# shape 'excluding' batch_size
model_input = tf.keras.Input(shape=self._input_specs.shape[1:])
# normalize input features
input_data = tf.nn.l2_normalize(model_input, -1)
tf.summary.histogram("input_hist", input_data)
# configure model
if params.add_batch_norm:
input_data = self._norm(name="input_bn")(input_data)
# activation = reshaped input * cluster weights
if params.cluster_size > 0:
activation = layers.Dense(
params.cluster_size,
kernel_regularizer=l2_regularizer,
kernel_initializer=tf.random_normal_initializer(
stddev=1 / tf.sqrt(tf.cast(feature_size, tf.float32))
),
)(input_data)
else:
activation = input_data
if params.add_batch_norm:
activation = self._norm(name="cluster_bn")(activation)
else:
cluster_biases = tf.Variable(
tf.random_normal_initializer(stddev=1 / tf.math.sqrt(feature_size))(
shape=[params.cluster_size]),
name="cluster_biases")
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = self._act_fn(activation)
tf.summary.histogram("cluster_output", activation)
if params.use_context_gate_cluster_layer:
pooling_method = None
norm_args = dict(name="context_gate_bn")
activation = utils.context_gate(
activation,
normalizer_fn=self._norm,
normalizer_params=norm_args,
pooling_method=pooling_method,
hidden_layer_size=params.context_gate_cluster_bottleneck_size,
kernel_regularizer=l2_regularizer)
activation = utils.frame_pooling(activation, params.pooling_method)
# activation = activation * hidden1_weights
activation = layers.Dense(
params.hidden_size,
kernel_regularizer=l2_regularizer,
kernel_initializer=tf.random_normal_initializer(
stddev=1 / tf.sqrt(tf.cast(params.cluster_size, tf.float32))))(
activation)
if params.add_batch_norm:
activation = self._norm(name="hidden1_bn")(activation)
else:
hidden1_biases = tf.Variable(
tf.random_normal_initializer(stddev=0.01)(shape=[params.hidden_size]),
name="hidden1_biases")
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
activation = self._act_fn(activation)
tf.summary.histogram("hidden1_output", activation)
super().__init__(inputs=model_input, outputs=activation, **kwargs)
@factory.register_backbone_builder("dbof")
def build_dbof(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
) -> tf.keras.Model:
"""Builds a dbof backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == "dbof", f"Inconsistent backbone type {backbone_type}"
return Dbof(
input_specs=input_specs,
params=backbone_cfg,
norm_activation=norm_activation_config,
l2_regularizer=l2_regularizer,
**kwargs,
)
| 6,032 | 34.280702 | 80 | py |
models | models-master/official/projects/yt8m/modeling/heads/logistic.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logistic model definitions."""
from typing import Optional
import tensorflow as tf
layers = tf.keras.layers
class LogisticModel(tf.keras.Model):
"""Logistic prediction head model with L2 regularization."""
def __init__(
self,
input_specs: layers.InputSpec = layers.InputSpec(shape=[None, 128]),
vocab_size: int = 3862,
return_logits: bool = False,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
):
"""Creates a logistic model.
Args:
input_specs: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
return_logits: if True also return logits.
l2_regularizer: An optional L2 weight regularizer.
**kwargs: extra key word args.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:])
logits = layers.Dense(vocab_size, kernel_regularizer=l2_regularizer)(inputs)
outputs = {"predictions": tf.nn.sigmoid(logits)}
if return_logits:
outputs.update({"logits": logits})
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
| 1,926 | 32.224138 | 80 | py |
models | models-master/official/projects/yt8m/modeling/heads/moe.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MoE model definitions."""
from typing import Any, Optional
import tensorflow as tf
from official.projects.yt8m.modeling import yt8m_model_utils as utils
layers = tf.keras.layers
class MoeModel(tf.keras.Model):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def __init__(
self,
input_specs: layers.InputSpec = layers.InputSpec(shape=[None, 128]),
vocab_size: int = 3862,
num_mixtures: int = 2,
use_input_context_gate: bool = False,
use_output_context_gate: bool = False,
normalizer_params: Optional[dict[str, Any]] = None,
vocab_as_last_dim: bool = False,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs,
):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers
in the mixture is not trained, and always predicts 0.
Args:
input_specs: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
use_input_context_gate: if True apply context gate layer to the input.
use_output_context_gate: if True apply context gate layer to the output.
normalizer_params: parameters of the batch normalization.
vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as
the last dimension to avoid small `num_mixtures` as the last dimension.
XLA pads up the dimensions of tensors: typically the last dimension will
be padded to 128, and the second to last will be padded to 8.
l2_regularizer: An optional L2 weight regularizer.
**kwargs: extra key word args.
Returns:
A dictionary with a tensor containing the probability predictions
of the model in the 'predictions' key. The dimensions of the tensor
are batch_size x num_classes.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:])
model_input = inputs
if use_input_context_gate:
model_input = utils.context_gate(
model_input,
normalizer_fn=layers.BatchNormalization,
normalizer_params=normalizer_params,
)
gate_activations = layers.Dense(
vocab_size * (num_mixtures + 1),
activation=None,
bias_initializer=None,
kernel_regularizer=l2_regularizer)(
model_input)
expert_activations = layers.Dense(
vocab_size * num_mixtures,
activation=None,
kernel_regularizer=l2_regularizer)(
model_input)
if vocab_as_last_dim:
# Batch x (num_mixtures + 1) x #Labels
gate_activations = tf.reshape(
gate_activations, [-1, num_mixtures + 1, vocab_size])
# Batch x num_mixtures x #Labels
expert_activations = tf.reshape(
expert_activations, [-1, num_mixtures, vocab_size])
else:
# (Batch * #Labels) x (num_mixtures + 1)
gate_activations = tf.reshape(gate_activations, [-1, num_mixtures + 1])
# (Batch * #Labels) x num_mixtures
expert_activations = tf.reshape(expert_activations, [-1, num_mixtures])
gating_distribution = tf.nn.softmax(gate_activations, axis=1)
expert_distribution = tf.nn.sigmoid(expert_activations)
final_probabilities = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, axis=1)
if not vocab_as_last_dim:
final_probabilities = tf.reshape(final_probabilities, [-1, vocab_size])
if use_output_context_gate:
final_probabilities = utils.context_gate(
final_probabilities,
normalizer_fn=layers.BatchNormalization,
normalizer_params=normalizer_params,
)
outputs = {"predictions": final_probabilities}
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
| 4,618 | 37.491667 | 80 | py |
models | models-master/official/projects/yt8m/tasks/yt8m_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification task definition."""
from typing import Dict, List, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.modeling import tf_utils
from official.projects.yt8m.configs import yt8m as yt8m_cfg
from official.projects.yt8m.dataloaders import yt8m_input
from official.projects.yt8m.eval_utils import eval_util
from official.projects.yt8m.modeling import yt8m_model
@task_factory.register_task_cls(yt8m_cfg.YT8MTask)
class YT8MTask(base_task.Task):
"""A task for video classification."""
def build_model(self):
"""Builds model for YT8M Task."""
train_cfg = self.task_config.train_data
common_input_shape = [None, sum(train_cfg.feature_sizes)]
# [batch_size x num_frames x num_features]
input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape)
logging.info('Build model input %r', common_input_shape)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Model configuration.
model_config = self.task_config.model
model = yt8m_model.VideoClassificationModel(
params=model_config,
input_specs=input_specs,
num_classes=train_cfg.num_classes,
l2_weight_decay=l2_weight_decay)
non_trainable_batch_norm_variables = []
non_trainable_extra_variables = []
for var in model.non_trainable_variables:
if 'moving_mean' in var.name or 'moving_variance' in var.name:
non_trainable_batch_norm_variables.append(var)
else:
non_trainable_extra_variables.append(var)
logging.info(
'Trainable model variables:\n%s',
'\n'.join(
[f'{var.name}\t{var.shape}' for var in model.trainable_variables]
),
)
logging.info(
(
'Non-trainable batch norm variables (get updated in training'
' mode):\n%s'
),
'\n'.join(
[
f'{var.name}\t{var.shape}'
for var in non_trainable_batch_norm_variables
]
),
)
logging.info(
'Non-trainable frozen model variables:\n%s',
'\n'.join(
[
f'{var.name}\t{var.shape}'
for var in non_trainable_extra_variables
]
),
)
return model
def build_inputs(self, params: yt8m_cfg.DataConfig, input_context=None):
"""Builds input.
Args:
params: configuration for input data
input_context: indicates information about the compute replicas and input
pipelines
Returns:
dataset: dataset fetched from reader
"""
decoder = yt8m_input.Decoder(input_params=params)
decoder_fn = decoder.decode
parser = yt8m_input.Parser(input_params=params)
parser_fn = parser.parse_fn(params.is_training)
postprocess = yt8m_input.PostBatchProcessor(input_params=params)
postprocess_fn = postprocess.post_fn
transform_batch = yt8m_input.TransformBatcher(input_params=params)
batch_fn = transform_batch.batch_fn
reader = input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder_fn,
parser_fn=parser_fn,
postprocess_fn=postprocess_fn,
transform_and_batch_fn=batch_fn)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels,
model_outputs,
label_weights=None,
aux_losses=None):
"""Sigmoid Cross Entropy.
Args:
labels: tensor containing truth labels.
model_outputs: output probabilities of the classifier.
label_weights: optional tensor of label weights.
aux_losses: tensor containing auxiliarly loss tensors, i.e. `losses` in
keras.Model.
Returns:
A dict of tensors contains total loss, model loss tensors.
"""
losses_config = self.task_config.losses
model_loss = tf.keras.losses.binary_crossentropy(
tf.expand_dims(labels, axis=-1),
tf.expand_dims(model_outputs, axis=-1),
from_logits=losses_config.from_logits,
label_smoothing=losses_config.label_smoothing,
axis=-1)
if label_weights is None:
model_loss = tf_utils.safe_mean(model_loss)
else:
model_loss = model_loss * label_weights
# Manutally compute weighted mean loss.
total_loss = tf.reduce_sum(model_loss)
total_weight = tf.cast(
tf.reduce_sum(label_weights), dtype=total_loss.dtype)
model_loss = tf.math.divide_no_nan(total_loss, total_weight)
total_loss = model_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
return {'total_loss': total_loss, 'model_loss': model_loss}
def build_metrics(self, training=True):
"""Gets streaming metrics for training/validation.
metric: mAP/gAP
top_k: A positive integer specifying how many predictions are considered
per video.
top_n: A positive Integer specifying the average precision at n, or None
to use all provided data points.
Args:
training: Bool value, true for training mode, false for eval/validation.
Returns:
A list of metrics to be used.
"""
metrics = []
metric_names = ['total_loss', 'model_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if (
self.task_config.evaluation.average_precision is not None
and not training
):
# Cannot run in train step.
num_classes = self.task_config.validation_data.num_classes
top_k = self.task_config.evaluation.average_precision.top_k
top_n = self.task_config.evaluation.average_precision.top_n
self.avg_prec_metric = eval_util.EvaluationMetrics(
num_classes, top_k=top_k, top_n=top_n)
return metrics
def process_metrics(
self,
metrics: List[tf.keras.metrics.Metric],
labels: tf.Tensor,
outputs: tf.Tensor,
model_losses: Optional[Dict[str, tf.Tensor]] = None,
label_weights: Optional[tf.Tensor] = None,
training: bool = True,
**kwargs,
) -> Dict[str, Tuple[tf.Tensor, ...]]:
"""Updates metrics.
Args:
metrics: Evaluation metrics to be updated.
labels: A tensor containing truth labels.
outputs: Model output logits of the classifier.
model_losses: An optional dict of model losses.
label_weights: Optional label weights, can be broadcast into shape of
outputs/labels.
training: Bool indicates if in training mode.
**kwargs: Additional input arguments.
Returns:
Updated dict of metrics log.
"""
if model_losses is None:
model_losses = {}
logs = {}
if (
self.task_config.evaluation.average_precision is not None
and not training
):
logs.update({self.avg_prec_metric.name: (labels, outputs)})
for m in metrics:
if m.name in model_losses:
m.update_state(model_losses[m.name])
logs[m.name] = m.result()
return logs
def _preprocess_model_inputs(self,
inputs: dict[str, tf.Tensor],
training: bool = True):
"""Preprocesses input tensors before model on device."""
del training
return inputs['video_matrix']
def _preprocess_labels(self,
inputs: dict[str, tf.Tensor],
training: bool = True):
"""Preprocesses labels."""
del training # training is unused in _preprocess_labels in YT8M.
labels = inputs['labels']
label_weights = inputs.get('label_weights', None)
return labels, label_weights
def _postprocess_outputs(self,
outputs,
labels,
label_weights,
training: bool = True):
"""Postprocess model outputs (inputs / labels / label_weights)."""
if not training and self.task_config.validation_data.segment_labels:
# workaround to ignore the unrated labels.
outputs *= label_weights
# remove padding
outputs = outputs[~tf.reduce_all(labels == -1, axis=1)]
labels = labels[~tf.reduce_all(labels == -1, axis=1)]
return outputs, labels, label_weights
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors. output_dict = { "video_ids":
batch_video_ids, "video_matrix": batch_video_matrix, "labels":
batch_labels, "num_frames": batch_frames, }
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
a dictionary of logs.
"""
model_inputs = self._preprocess_model_inputs(inputs, training=True)
labels, label_weights = self._preprocess_labels(inputs, training=True)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(model_inputs, training=True)['predictions']
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Post-process model / label outputs.
outputs, labels, label_weights = self._postprocess_outputs(
outputs, labels, label_weights, training=True)
# Computes per-replica loss
all_losses = self.build_losses(
model_outputs=outputs,
labels=labels,
label_weights=label_weights,
aux_losses=model.losses)
loss = all_losses['total_loss']
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(grads,
self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
logs.update(
self.process_metrics(
metrics,
labels=labels,
outputs=outputs,
model_losses=all_losses,
label_weights=label_weights,
training=True))
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors. output_dict = { "video_ids":
batch_video_ids, "video_matrix": batch_video_matrix, "labels":
batch_labels, "num_frames": batch_frames}.
model: the model, forward definition.
metrics: a nested structure of metrics objects.
Returns:
a dictionary of logs.
"""
model_inputs = self._preprocess_model_inputs(inputs, training=False)
labels, label_weights = self._preprocess_labels(inputs, training=False)
outputs = self.inference_step(model_inputs, model)['predictions']
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
outputs, labels, label_weights = self._postprocess_outputs(
outputs, labels, label_weights, training=False)
all_losses = self.build_losses(
labels=labels,
model_outputs=outputs,
label_weights=label_weights,
aux_losses=model.losses)
logs = {self.loss: all_losses['total_loss']}
logs.update(
self.process_metrics(
metrics,
labels=labels,
outputs=outputs,
model_losses=all_losses,
label_weights=inputs.get('label_weights', None),
training=False))
return logs
def inference_step(self, inputs, model):
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(self, state=None, step_logs=None):
if self.task_config.evaluation.average_precision is not None:
if state is None:
state = self.avg_prec_metric
self.avg_prec_metric.accumulate(
labels=step_logs[self.avg_prec_metric.name][0],
predictions=step_logs[self.avg_prec_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.task_config.evaluation.average_precision is not None:
avg_prec_metrics = self.avg_prec_metric.get(
self.task_config.evaluation.average_precision.return_per_class_ap)
self.avg_prec_metric.clear()
return avg_prec_metrics
return None
| 13,880 | 34.141772 | 80 | py |
models | models-master/official/projects/s3d/modeling/inception_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from official.projects.s3d.modeling import inception_utils
class InceptionUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((1.0, 3, {'Conv2d_1a_7x7', 'Conv2d_2c_3x3'}),
(0.5, 5, {'Conv2d_1a_7x7', 'Conv2d_2c_3x3'}),
(0.25, 7, {'Conv2d_1a_7x7', 'Conv2d_2c_3x3'}))
def test_s3d_stem_cells(self, depth_multiplier, first_temporal_kernel_size,
temporal_conv_endpoints):
batch_size = 1
num_frames = 64
height, width = 224, 224
inputs = tf.keras.layers.Input(
shape=(num_frames, height, width, 3), batch_size=batch_size)
outputs, output_endpoints = inception_utils.inception_v1_stem_cells(
inputs,
depth_multiplier,
'Mixed_5c',
temporal_conv_endpoints=temporal_conv_endpoints,
self_gating_endpoints={'Conv2d_2c_3x3'},
first_temporal_kernel_size=first_temporal_kernel_size)
self.assertListEqual(outputs.shape.as_list(),
[batch_size, 32, 28, 28, int(192 * depth_multiplier)])
expected_endpoints = {
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3'
}
self.assertSetEqual(expected_endpoints, set(output_endpoints.keys()))
@parameterized.parameters(
('3d', True, True, True),
('2d', False, False, True),
('1+2d', True, False, False),
('2+1d', False, True, False),
)
def test_inception_v1_cell_endpoint_match(self, conv_type,
swap_pool_and_1x1x1,
use_self_gating_on_branch,
use_self_gating_on_cell):
batch_size = 5
num_frames = 32
channels = 128
height, width = 28, 28
inputs = tf.keras.layers.Input(
shape=(num_frames, height, width, channels), batch_size=batch_size)
inception_v1_cell_layer = inception_utils.InceptionV1CellLayer(
[[64], [96, 128], [16, 32], [32]],
conv_type=conv_type,
swap_pool_and_1x1x1=swap_pool_and_1x1x1,
use_self_gating_on_branch=use_self_gating_on_branch,
use_self_gating_on_cell=use_self_gating_on_cell,
name='test')
outputs = inception_v1_cell_layer(inputs)
# self.assertTrue(net.op.name.startswith('test'))
self.assertListEqual(outputs.shape.as_list(),
[batch_size, 32, 28, 28, 256])
if __name__ == '__main__':
tf.test.main()
| 3,194 | 36.588235 | 79 | py |
models | models-master/official/projects/s3d/modeling/s3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for S3D model."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.s3d.modeling import s3d
class S3dTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(7, 224, 224, 3),
(7, 128, 128, 3),
(7, 256, 256, 3),
(7, 192, 192, 3),
(64, 224, 224, 3),
(32, 224, 224, 3),
(64, 224, 224, 11),
(32, 224, 224, 11),
)
def test_build(self, num_frames, height, width, first_temporal_kernel_size):
batch_size = 5
input_shape = [batch_size, num_frames, height, width, 3]
input_specs = tf.keras.layers.InputSpec(shape=input_shape)
network = s3d.S3D(
input_specs=input_specs
)
inputs = tf.keras.Input(shape=input_shape[1:], batch_size=input_shape[0])
endpoints = network(inputs)
temporal_1a = (num_frames - 1)//2 + 1
expected_shapes = {
'Conv2d_1a_7x7': [5, temporal_1a, height//2, width//2, 64],
'Conv2d_2b_1x1': [5, temporal_1a, height//4, width//4, 64],
'Conv2d_2c_3x3': [5, temporal_1a, height//4, height//4, 192],
'MaxPool_2a_3x3': [5, temporal_1a, height//4, height//4, 64],
'MaxPool_3a_3x3': [5, temporal_1a, height//8, width//8, 192],
'Mixed_3b': [5, temporal_1a, height//8, width//8, 256],
'Mixed_3c': [5, temporal_1a, height//8, width//8, 480],
'MaxPool_4a_3x3': [5, temporal_1a//2, height//16, width//16, 480],
'Mixed_4b': [5, temporal_1a//2, height//16, width//16, 512],
'Mixed_4c': [5, temporal_1a//2, height//16, width//16, 512],
'Mixed_4d': [5, temporal_1a//2, height//16, width//16, 512],
'Mixed_4e': [5, temporal_1a//2, height//16, width//16, 528],
'Mixed_4f': [5, temporal_1a//2, height//16, width//16, 832],
'MaxPool_5a_2x2': [5, temporal_1a//4, height//32, width//32, 832],
'Mixed_5b': [5, temporal_1a//4, height//32, width//32, 832],
'Mixed_5c': [5, temporal_1a//4, height//32, width//32, 1024],
}
output_shapes = dict()
for end_point, output_tensor in endpoints.items():
output_shapes[end_point] = output_tensor.shape.as_list()
self.assertDictEqual(output_shapes, expected_shapes)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
input_specs=tf.keras.layers.InputSpec(shape=(5, 64, 224, 224, 3)),
final_endpoint='Mixed_5c',
first_temporal_kernel_size=3,
temporal_conv_start_at='Conv2d_2c_3x3',
gating_start_at='Conv2d_2c_3x3',
swap_pool_and_1x1x1=True,
gating_style='CELL',
use_sync_bn=False,
norm_momentum=0.999,
norm_epsilon=0.001,
temporal_conv_initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
temporal_conv_type='2+1d',
kernel_initializer='truncated_normal',
kernel_regularizer='l2',
depth_multiplier=1.0
)
network = s3d.S3D(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = s3d.S3D.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,127 | 37.579439 | 79 | py |
models | models-master/official/projects/s3d/modeling/s3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the Tensorflow 2 version definition of S3D model.
S3D model is described in the following paper:
https://arxiv.org/abs/1712.04851.
"""
from typing import Any, Dict, Mapping, Optional, Sequence, Text, Tuple, Union
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.s3d.configs import s3d as cfg
from official.projects.s3d.modeling import inception_utils
from official.projects.s3d.modeling import net_utils
from official.vision.modeling import factory_3d as model_factory
from official.vision.modeling.backbones import factory as backbone_factory
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
class S3D(tf.keras.Model):
"""Class to build S3D family model."""
def __init__(self,
input_specs: tf.keras.layers.InputSpec,
final_endpoint: Text = 'Mixed_5c',
first_temporal_kernel_size: int = 3,
temporal_conv_start_at: Text = 'Conv2d_2c_3x3',
gating_start_at: Text = 'Conv2d_2c_3x3',
swap_pool_and_1x1x1: bool = True,
gating_style: Text = 'CELL',
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text,
initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
temporal_conv_type: Text = '2+1d',
kernel_initializer: Union[
Text,
initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
depth_multiplier: float = 1.0,
**kwargs):
"""Constructor.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
final_endpoint: Specifies the endpoint to construct the network up to.
first_temporal_kernel_size: Temporal kernel size of the first convolution
layer.
temporal_conv_start_at: Specifies the endpoint where to start performimg
temporal convolution from.
gating_start_at: Specifies the endpoint where to start performimg self
gating from.
swap_pool_and_1x1x1: A boolean flag indicates that whether to swap the
order of convolution and max pooling in Branch_3 of inception v1 cell.
gating_style: A string that specifies self gating to be applied after each
branch and/or after each cell. It can be one of ['BRANCH', 'CELL',
'BRANCH_AND_CELL'].
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
temporal_conv_initializer: Weight initializer for temporal convolutional
layers.
temporal_conv_type: The type of parameterized convolution. Currently, we
support '2d', '3d', '2+1d', '1+2d'.
kernel_initializer: Weight initializer for convolutional layers other than
temporal convolution.
kernel_regularizer: Weight regularizer for all convolutional layers.
depth_multiplier: A float to reduce/increase number of channels.
**kwargs: keyword arguments to be passed.
"""
self._input_specs = input_specs
self._final_endpoint = final_endpoint
self._first_temporal_kernel_size = first_temporal_kernel_size
self._temporal_conv_start_at = temporal_conv_start_at
self._gating_start_at = gating_start_at
self._swap_pool_and_1x1x1 = swap_pool_and_1x1x1
self._gating_style = gating_style
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._temporal_conv_initializer = temporal_conv_initializer
self._temporal_conv_type = temporal_conv_type
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._depth_multiplier = depth_multiplier
self._temporal_conv_endpoints = net_utils.make_set_from_start_endpoint(
temporal_conv_start_at, inception_utils.INCEPTION_V1_CONV_ENDPOINTS)
self._self_gating_endpoints = net_utils.make_set_from_start_endpoint(
gating_start_at, inception_utils.INCEPTION_V1_CONV_ENDPOINTS)
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net, end_points = inception_utils.inception_v1_stem_cells(
inputs,
depth_multiplier,
final_endpoint,
temporal_conv_endpoints=self._temporal_conv_endpoints,
self_gating_endpoints=self._self_gating_endpoints,
temporal_conv_type=self._temporal_conv_type,
first_temporal_kernel_size=self._first_temporal_kernel_size,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
parameterized_conv_layer=self._get_parameterized_conv_layer_impl(),
layer_naming_fn=self._get_layer_naming_fn(),
)
for end_point, filters in inception_utils.INCEPTION_V1_ARCH_SKELETON:
net, end_points = self._s3d_cell(net, end_point, end_points, filters)
if end_point == final_endpoint:
break
if final_endpoint not in end_points:
raise ValueError(
'Unrecognized final endpoint %s (available endpoints: %s).' %
(final_endpoint, end_points.keys()))
super(S3D, self).__init__(inputs=inputs, outputs=end_points, **kwargs)
def _s3d_cell(
self,
net: tf.Tensor,
end_point: Text,
end_points: Dict[Text, tf.Tensor],
filters: Union[int, Sequence[Any]],
non_local_block: Optional[tf.keras.layers.Layer] = None,
attention_cell: Optional[tf.keras.layers.Layer] = None,
attention_cell_super_graph: Optional[tf.keras.layers.Layer] = None
) -> Tuple[tf.Tensor, Dict[Text, tf.Tensor]]:
if end_point.startswith('Mixed'):
conv_type = (
self._temporal_conv_type
if end_point in self._temporal_conv_endpoints else '2d')
use_self_gating_on_branch = (
end_point in self._self_gating_endpoints and
(self._gating_style == 'BRANCH' or
self._gating_style == 'BRANCH_AND_CELL'))
use_self_gating_on_cell = (
end_point in self._self_gating_endpoints and
(self._gating_style == 'CELL' or
self._gating_style == 'BRANCH_AND_CELL'))
net = self._get_inception_v1_cell_layer_impl()(
branch_filters=net_utils.apply_depth_multiplier(
filters, self._depth_multiplier),
conv_type=conv_type,
temporal_dilation_rate=1,
swap_pool_and_1x1x1=self._swap_pool_and_1x1x1,
use_self_gating_on_branch=use_self_gating_on_branch,
use_self_gating_on_cell=use_self_gating_on_cell,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
kernel_initializer=self._kernel_initializer,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_regularizer=self._kernel_regularizer,
parameterized_conv_layer=self._get_parameterized_conv_layer_impl(),
name=self._get_layer_naming_fn()(end_point))(
net)
else:
net = tf.keras.layers.MaxPool3D(
pool_size=filters[0],
strides=filters[1],
padding='same',
name=self._get_layer_naming_fn()(end_point))(
net)
end_points[end_point] = net
if non_local_block:
# TODO(b/182299420): Implement non local block in TF2.
raise NotImplementedError('Non local block is not implemented yet.')
if attention_cell:
# TODO(b/182299420): Implement attention cell in TF2.
raise NotImplementedError('Attention cell is not implemented yet.')
if attention_cell_super_graph:
# TODO(b/182299420): Implement attention cell super graph in TF2.
raise NotImplementedError('Attention cell super graph is not implemented'
' yet.')
return net, end_points
def get_config(self):
config_dict = {
'input_specs': self._input_specs,
'final_endpoint': self._final_endpoint,
'first_temporal_kernel_size': self._first_temporal_kernel_size,
'temporal_conv_start_at': self._temporal_conv_start_at,
'gating_start_at': self._gating_start_at,
'swap_pool_and_1x1x1': self._swap_pool_and_1x1x1,
'gating_style': self._gating_style,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'temporal_conv_initializer': self._temporal_conv_initializer,
'temporal_conv_type': self._temporal_conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'depth_multiplier': self._depth_multiplier
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
def _get_inception_v1_cell_layer_impl(self):
return inception_utils.InceptionV1CellLayer
def _get_parameterized_conv_layer_impl(self):
return net_utils.ParameterizedConvLayer
def _get_layer_naming_fn(self):
return lambda end_point: None
class S3DModel(tf.keras.Model):
"""An S3D model builder."""
def __init__(self,
backbone: tf.keras.Model,
num_classes: int,
input_specs: Mapping[Text, tf.keras.layers.InputSpec],
final_endpoint: Text = 'Mixed_5c',
dropout_rate: float = 0.0,
**kwargs):
"""Constructor.
Args:
backbone: S3D backbone Keras Model.
num_classes: `int` number of possible classes for video classification.
input_specs: input_specs: `tf.keras.layers.InputSpec` specs of the input
tensor.
final_endpoint: Specifies the endpoint to construct the network up to.
dropout_rate: `float` between 0 and 1. Fraction of the input units to
drop. Note that dropout_rate = 1.0 - dropout_keep_prob.
**kwargs: keyword arguments to be passed.
"""
self._self_setattr_tracking = False
self._backbone = backbone
self._num_classes = num_classes
self._input_specs = input_specs
self._final_endpoint = final_endpoint
self._dropout_rate = dropout_rate
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'input_specs': input_specs,
'final_endpoint': final_endpoint,
'dropout_rate': dropout_rate,
}
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
streams = self._backbone(inputs['image'])
pool = tf.math.reduce_mean(streams[self._final_endpoint], axis=[1, 2, 3])
fc = tf.keras.layers.Dropout(dropout_rate)(pool)
logits = tf.keras.layers.Dense(**self._build_dense_layer_params())(fc)
super(S3DModel, self).__init__(inputs=inputs, outputs=logits, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def _build_dense_layer_params(self):
return dict(units=self._num_classes, kernel_regularizer='l2')
@backbone_factory.register_backbone_builder('s3d')
def build_s3d(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds S3D backbone."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 's3d'
del norm_activation_config
backbone = S3D(
input_specs=input_specs,
final_endpoint=backbone_cfg.final_endpoint,
first_temporal_kernel_size=backbone_cfg.first_temporal_kernel_size,
temporal_conv_start_at=backbone_cfg.temporal_conv_start_at,
gating_start_at=backbone_cfg.gating_start_at,
swap_pool_and_1x1x1=backbone_cfg.swap_pool_and_1x1x1,
gating_style=backbone_cfg.gating_style,
use_sync_bn=backbone_cfg.use_sync_bn,
norm_momentum=backbone_cfg.norm_momentum,
norm_epsilon=backbone_cfg.norm_epsilon,
temporal_conv_type=backbone_cfg.temporal_conv_type,
kernel_regularizer=l2_regularizer,
depth_multiplier=backbone_cfg.depth_multiplier)
return backbone
@model_factory.register_model_builder('s3d')
def build_s3d_model(
input_specs: tf.keras.layers.InputSpec,
model_config: cfg.S3DModel,
num_classes: int,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds S3D model with classification layer."""
input_specs_dict = {'image': input_specs}
backbone = build_s3d(input_specs, model_config.backbone,
model_config.norm_activation, l2_regularizer)
model = S3DModel(
backbone,
num_classes=num_classes,
input_specs=input_specs_dict,
dropout_rate=model_config.dropout_rate)
return model
| 14,486 | 39.579832 | 80 | py |
models | models-master/official/projects/s3d/modeling/inception_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains modules related to Inception networks."""
from typing import Callable, Dict, Optional, Sequence, Set, Text, Tuple, Type, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.s3d.modeling import net_utils
from official.vision.modeling.layers import nn_blocks_3d
INCEPTION_V1_CONV_ENDPOINTS = [
'Conv2d_1a_7x7', 'Conv2d_2c_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'Mixed_5b', 'Mixed_5c'
]
# Mapping from endpoint to branch filters. The endpoint shapes below are
# specific for input 64x224x224.
INCEPTION_V1_ARCH_SKELETON = [
('Mixed_3b', [[64], [96, 128], [16, 32], [32]]), # 32x28x28x256
('Mixed_3c', [[128], [128, 192], [32, 96], [64]]), # 32x28x28x480
('MaxPool_4a_3x3', [[3, 3, 3], [2, 2, 2]]), # 16x14x14x480
('Mixed_4b', [[192], [96, 208], [16, 48], [64]]), # 16x14x14x512
('Mixed_4c', [[160], [112, 224], [24, 64], [64]]), # 16x14x14x512
('Mixed_4d', [[128], [128, 256], [24, 64], [64]]), # 16x14x14x512
('Mixed_4e', [[112], [144, 288], [32, 64], [64]]), # 16x14x14x528
('Mixed_4f', [[256], [160, 320], [32, 128], [128]]), # 16x14x14x832
('MaxPool_5a_2x2', [[2, 2, 2], [2, 2, 2]]), # 8x7x7x832
('Mixed_5b', [[256], [160, 320], [32, 128], [128]]), # 8x7x7x832
('Mixed_5c', [[384], [192, 384], [48, 128], [128]]), # 8x7x7x1024
]
INCEPTION_V1_LOCAL_SKELETON = [
('MaxPool_5a_2x2_local', [[2, 2, 2], [2, 2, 2]]), # 8x7x7x832
('Mixed_5b_local', [[256], [160, 320], [32, 128], [128]]), # 8x7x7x832
('Mixed_5c_local', [[384], [192, 384], [48, 128], [128]]), # 8x7x7x1024
]
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
def inception_v1_stem_cells(
inputs: tf.Tensor,
depth_multiplier: float,
final_endpoint: Text,
temporal_conv_endpoints: Optional[Set[Text]] = None,
self_gating_endpoints: Optional[Set[Text]] = None,
temporal_conv_type: Text = '3d',
first_temporal_kernel_size: int = 7,
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text, initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
kernel_initializer: Union[Text,
initializers.Initializer] = 'truncated_normal',
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
parameterized_conv_layer: Type[
net_utils.ParameterizedConvLayer] = net_utils.ParameterizedConvLayer,
layer_naming_fn: Callable[[Text], Text] = lambda end_point: None,
) -> Tuple[tf.Tensor, Dict[Text, tf.Tensor]]:
"""Stem cells used in the original I3D/S3D model.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
depth_multiplier: A float to reduce/increase number of channels.
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3'].
temporal_conv_endpoints: Specifies the endpoints where to perform temporal
convolution.
self_gating_endpoints: Specifies the endpoints where to perform self gating.
temporal_conv_type: '3d' for I3D model and '2+1d' for S3D model.
first_temporal_kernel_size: temporal kernel size of the first convolution
layer.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
temporal_conv_initializer: Weight initializer for temporal convolution
inside the cell. It only applies to 2+1d and 1+2d cases.
kernel_initializer: Weight initializer for convolutional layers other than
temporal convolution.
kernel_regularizer: Weight regularizer for all convolutional layers.
parameterized_conv_layer: class for parameterized conv layer.
layer_naming_fn: function to customize conv / pooling layer names given
endpoint name of the block. This is mainly used to creat model that is
compatible with TF1 checkpoints.
Returns:
A dictionary from components of the network to the corresponding activation.
"""
if temporal_conv_endpoints is None:
temporal_conv_endpoints = set()
if self_gating_endpoints is None:
self_gating_endpoints = set()
if use_sync_bn:
batch_norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
batch_norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
end_points = {}
# batch_size x 32 x 112 x 112 x 64
end_point = 'Conv2d_1a_7x7'
net = tf.keras.layers.Conv3D(
filters=net_utils.apply_depth_multiplier(64, depth_multiplier),
kernel_size=[first_temporal_kernel_size, 7, 7],
strides=[2, 2, 2],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
kernel_regularizer=kernel_regularizer,
name=layer_naming_fn(end_point))(
inputs)
net = batch_norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
scale=False,
gamma_initializer='ones',
name=layer_naming_fn(end_point + '/BatchNorm'))(
net)
net = tf.nn.relu(net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'MaxPool_2a_3x3'
net = tf.keras.layers.MaxPool3D(
pool_size=[1, 3, 3],
strides=[1, 2, 2],
padding='same',
name=layer_naming_fn(end_point))(
net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = tf.keras.layers.Conv3D(
filters=net_utils.apply_depth_multiplier(64, depth_multiplier),
strides=[1, 1, 1],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
kernel_regularizer=kernel_regularizer,
name=layer_naming_fn(end_point))(
net)
net = batch_norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
scale=False,
gamma_initializer='ones',
name=layer_naming_fn(end_point + '/BatchNorm'))(
net)
net = tf.nn.relu(net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 192
end_point = 'Conv2d_2c_3x3'
if end_point not in temporal_conv_endpoints:
temporal_conv_type = '2d'
net = parameterized_conv_layer(
conv_type=temporal_conv_type,
kernel_size=3,
filters=net_utils.apply_depth_multiplier(192, depth_multiplier),
strides=[1, 1, 1],
rates=[1, 1, 1],
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
temporal_conv_initializer=temporal_conv_initializer,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
kernel_regularizer=kernel_regularizer,
name=layer_naming_fn(end_point))(
net)
if end_point in self_gating_endpoints:
net = nn_blocks_3d.SelfGating(
filters=net_utils.apply_depth_multiplier(192, depth_multiplier),
name=layer_naming_fn(end_point + '/self_gating'))(
net)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 28 x 28 x 192
end_point = 'MaxPool_3a_3x3'
net = tf.keras.layers.MaxPool3D(
pool_size=[1, 3, 3],
strides=[1, 2, 2],
padding='same',
name=layer_naming_fn(end_point))(
net)
end_points[end_point] = net
return net, end_points
def _construct_branch_3_layers(
channels: int,
swap_pool_and_1x1x1: bool,
pool_type: Text,
batch_norm_layer: tf.keras.layers.Layer,
kernel_initializer: Union[Text, initializers.Initializer],
kernel_regularizer: Union[Text, regularizers.Regularizer],
):
"""Helper function for Branch 3 inside Inception module."""
kernel_size = [1, 3, 3] if pool_type == '2d' else [3] * 3
conv = tf.keras.layers.Conv3D(
filters=channels,
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)
activation = tf.keras.layers.Activation('relu')
pool = tf.keras.layers.MaxPool3D(
pool_size=kernel_size, strides=[1, 1, 1], padding='same')
if swap_pool_and_1x1x1:
branch_3_layers = [conv, batch_norm_layer, activation, pool]
else:
branch_3_layers = [pool, conv, batch_norm_layer, activation]
return branch_3_layers
class InceptionV1CellLayer(tf.keras.layers.Layer):
"""A single Tensorflow 2 cell used in the original I3D/S3D model."""
def __init__(
self,
branch_filters: Sequence[Sequence[int]],
conv_type: Text = '3d',
temporal_dilation_rate: int = 1,
swap_pool_and_1x1x1: bool = False,
use_self_gating_on_branch: bool = False,
use_self_gating_on_cell: bool = False,
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text, initializers.Initializer] = initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
kernel_initializer: Union[Text,
initializers.Initializer] = 'truncated_normal',
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
parameterized_conv_layer: Type[
net_utils.ParameterizedConvLayer] = net_utils.ParameterizedConvLayer,
**kwargs):
"""A cell structure inspired by Inception V1.
Args:
branch_filters: Specifies the number of filters in four branches
(Branch_0, Branch_1, Branch_2, Branch_3). Single number for Branch_0 and
Branch_3. For Branch_1 and Branch_2, each need to specify two numbers,
one for 1x1x1 and one for 3x3x3.
conv_type: The type of parameterized convolution. Currently, we support
'2d', '3d', '2+1d', '1+2d'.
temporal_dilation_rate: The dilation rate for temporal convolution.
swap_pool_and_1x1x1: A boolean flag indicates that whether to swap the
order of convolution and max pooling in Branch_3.
use_self_gating_on_branch: Whether or not to apply self gating on each
branch of the inception cell.
use_self_gating_on_cell: Whether or not to apply self gating on each cell
after the concatenation of all branches.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
temporal_conv_initializer: Weight initializer for temporal convolution
inside the cell. It only applies to 2+1d and 1+2d cases.
kernel_initializer: Weight initializer for convolutional layers other than
temporal convolution.
kernel_regularizer: Weight regularizer for all convolutional layers.
parameterized_conv_layer: class for parameterized conv layer.
**kwargs: keyword arguments to be passed.
Returns:
out_tensor: A 5-D float tensor of size [batch_size, num_frames, height,
width, channels].
"""
super(InceptionV1CellLayer, self).__init__(**kwargs)
self._branch_filters = branch_filters
self._conv_type = conv_type
self._temporal_dilation_rate = temporal_dilation_rate
self._swap_pool_and_1x1x1 = swap_pool_and_1x1x1
self._use_self_gating_on_branch = use_self_gating_on_branch
self._use_self_gating_on_cell = use_self_gating_on_cell
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._temporal_conv_initializer = temporal_conv_initializer
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._parameterized_conv_layer = parameterized_conv_layer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._channel_axis = -1
else:
self._channel_axis = 1
def _build_branch_params(self):
branch_0_params = [
# Conv3D
dict(
filters=self._branch_filters[0][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
]
branch_1_params = [
# Conv3D
dict(
filters=self._branch_filters[1][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
# ParameterizedConvLayer
dict(
conv_type=self._conv_type,
kernel_size=3,
filters=self._branch_filters[1][1],
strides=[1, 1, 1],
rates=[self._temporal_dilation_rate, 1, 1],
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
]
branch_2_params = [
# Conv3D
dict(
filters=self._branch_filters[2][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
# ParameterizedConvLayer
dict(
conv_type=self._conv_type,
kernel_size=3,
filters=self._branch_filters[2][1],
strides=[1, 1, 1],
rates=[self._temporal_dilation_rate, 1, 1],
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
temporal_conv_initializer=self._temporal_conv_initializer,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
]
branch_3_params = [
# Conv3D
dict(
filters=self._branch_filters[3][0],
kernel_size=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer),
# norm
dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones'),
# relu
dict(),
# pool
dict(
pool_size=([1, 3, 3] if self._conv_type == '2d' else [3] * 3),
strides=[1, 1, 1],
padding='same')
]
if self._use_self_gating_on_branch:
branch_0_params.append(dict(filters=self._branch_filters[0][0]))
branch_1_params.append(dict(filters=self._branch_filters[1][1]))
branch_2_params.append(dict(filters=self._branch_filters[2][1]))
branch_3_params.append(dict(filters=self._branch_filters[3][0]))
out_gating_params = []
if self._use_self_gating_on_cell:
out_channels = (
self._branch_filters[0][0] + self._branch_filters[1][1] +
self._branch_filters[2][1] + self._branch_filters[3][0])
out_gating_params.append(dict(filters=out_channels))
return [
branch_0_params, branch_1_params, branch_2_params, branch_3_params,
out_gating_params
]
def build(self, input_shape):
branch_params = self._build_branch_params()
self._branch_0_layers = [
tf.keras.layers.Conv3D(**branch_params[0][0]),
self._norm(**branch_params[0][1]),
tf.keras.layers.Activation('relu', **branch_params[0][2]),
]
self._branch_1_layers = [
tf.keras.layers.Conv3D(**branch_params[1][0]),
self._norm(**branch_params[1][1]),
tf.keras.layers.Activation('relu', **branch_params[1][2]),
self._parameterized_conv_layer(**branch_params[1][3]),
]
self._branch_2_layers = [
tf.keras.layers.Conv3D(**branch_params[2][0]),
self._norm(**branch_params[2][1]),
tf.keras.layers.Activation('relu', **branch_params[2][2]),
self._parameterized_conv_layer(**branch_params[2][3])
]
if self._swap_pool_and_1x1x1:
self._branch_3_layers = [
tf.keras.layers.Conv3D(**branch_params[3][0]),
self._norm(**branch_params[3][1]),
tf.keras.layers.Activation('relu', **branch_params[3][2]),
tf.keras.layers.MaxPool3D(**branch_params[3][3]),
]
else:
self._branch_3_layers = [
tf.keras.layers.MaxPool3D(**branch_params[3][3]),
tf.keras.layers.Conv3D(**branch_params[3][0]),
self._norm(**branch_params[3][1]),
tf.keras.layers.Activation('relu', **branch_params[3][2]),
]
if self._use_self_gating_on_branch:
self._branch_0_layers.append(
nn_blocks_3d.SelfGating(**branch_params[0][-1]))
self._branch_1_layers.append(
nn_blocks_3d.SelfGating(**branch_params[1][-1]))
self._branch_2_layers.append(
nn_blocks_3d.SelfGating(**branch_params[2][-1]))
self._branch_3_layers.append(
nn_blocks_3d.SelfGating(**branch_params[3][-1]))
if self._use_self_gating_on_cell:
self.cell_self_gating = nn_blocks_3d.SelfGating(**branch_params[4][0])
super(InceptionV1CellLayer, self).build(input_shape)
def call(self, inputs):
x = inputs
for layer in self._branch_0_layers:
x = layer(x)
branch_0 = x
x = inputs
for layer in self._branch_1_layers:
x = layer(x)
branch_1 = x
x = inputs
for layer in self._branch_2_layers:
x = layer(x)
branch_2 = x
x = inputs
for layer in self._branch_3_layers:
x = layer(x)
branch_3 = x
out_tensor = tf.concat([branch_0, branch_1, branch_2, branch_3],
axis=self._channel_axis)
if self._use_self_gating_on_cell:
out_tensor = self.cell_self_gating(out_tensor)
return out_tensor
| 20,360 | 36.916201 | 84 | py |
models | models-master/official/projects/s3d/modeling/net_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
from official.projects.s3d.modeling import net_utils
class Tf2NetUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('3d', [2, 1, 1], [5, 16, 28, 28, 256]),
('3d', [2, 2, 2], [5, 16, 14, 14, 256]),
('3d', [1, 2, 1], [5, 32, 14, 28, 256]),
('2d', [2, 2, 2], [5, 32, 14, 14, 256]),
('2d', [1, 1, 2], [5, 32, 28, 14, 256]),
('1+2d', [2, 2, 2], [5, 16, 14, 14, 256]),
('1+2d', [2, 1, 1], [5, 16, 28, 28, 256]),
('1+2d', [1, 1, 1], [5, 32, 28, 28, 256]),
('1+2d', [1, 1, 2], [5, 32, 28, 14, 256]),
('2+1d', [2, 2, 2], [5, 16, 14, 14, 256]),
('2+1d', [1, 1, 1], [5, 32, 28, 28, 256]),
('2+1d', [2, 1, 2], [5, 16, 28, 14, 256]),
('1+1+1d', [2, 2, 2], [5, 16, 14, 14, 256]),
('1+1+1d', [1, 1, 1], [5, 32, 28, 28, 256]),
('1+1+1d', [2, 1, 2], [5, 16, 28, 14, 256]),
)
def test_parameterized_conv_layer_creation(self, conv_type, strides,
expected_shape):
batch_size = 5
temporal_size = 32
spatial_size = 28
channels = 128
kernel_size = 3
filters = 256
rates = [1, 1, 1]
name = 'ParameterizedConv'
inputs = tf.keras.Input(
shape=(temporal_size, spatial_size, spatial_size, channels),
batch_size=batch_size)
parameterized_conv_layer = net_utils.ParameterizedConvLayer(
conv_type, kernel_size, filters, strides, rates, name=name)
features = parameterized_conv_layer(inputs)
logging.info(features.shape.as_list())
logging.info([w.name for w in parameterized_conv_layer.weights])
self.assertAllEqual(features.shape.as_list(), expected_shape)
if __name__ == '__main__':
tf.test.main()
| 2,429 | 34.217391 | 74 | py |
models | models-master/official/projects/s3d/modeling/net_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commonly used TensorFlow 2 network blocks."""
from typing import Any, Text, Sequence, Union
import tensorflow as tf
from official.modeling import tf_utils
WEIGHT_INITIALIZER = {
'Xavier': tf.keras.initializers.GlorotUniform,
'Gaussian': lambda: tf.keras.initializers.RandomNormal(stddev=0.01),
}
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
def make_set_from_start_endpoint(start_endpoint: Text,
endpoints: Sequence[Text]):
"""Makes a subset of endpoints from the given starting position."""
if start_endpoint not in endpoints:
return set()
start_index = endpoints.index(start_endpoint)
return set(endpoints[start_index:])
def apply_depth_multiplier(d: Union[int, Sequence[Any]],
depth_multiplier: float):
"""Applies depth_multiplier recursively to ints."""
if isinstance(d, int):
return int(d * depth_multiplier)
else:
return [apply_depth_multiplier(x, depth_multiplier) for x in d]
class ParameterizedConvLayer(tf.keras.layers.Layer):
"""Convolution layer based on the input conv_type."""
def __init__(
self,
conv_type: Text,
kernel_size: int,
filters: int,
strides: Sequence[int],
rates: Sequence[int],
use_sync_bn: bool = False,
norm_momentum: float = 0.999,
norm_epsilon: float = 0.001,
temporal_conv_initializer: Union[
Text, initializers.Initializer] = 'glorot_uniform',
kernel_initializer: Union[Text,
initializers.Initializer] = 'truncated_normal',
kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2',
**kwargs):
super(ParameterizedConvLayer, self).__init__(**kwargs)
self._conv_type = conv_type
self._kernel_size = kernel_size
self._filters = filters
self._strides = strides
self._rates = rates
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._channel_axis = -1
else:
self._channel_axis = 1
self._temporal_conv_initializer = temporal_conv_initializer
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
def _build_conv_layer_params(self, input_shape):
"""Builds params for conv layers."""
conv_layer_params = []
if self._conv_type == '3d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[self._kernel_size] * 3,
strides=self._strides,
dilation_rate=self._rates,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
elif self._conv_type == '2d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, self._kernel_size],
strides=[1, self._strides[1], self._strides[2]],
dilation_rate=[1, self._rates[1], self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
elif self._conv_type == '1+2d':
channels_in = input_shape[self._channel_axis]
conv_layer_params.append(
dict(
filters=channels_in,
kernel_size=[self._kernel_size, 1, 1],
strides=[self._strides[0], 1, 1],
dilation_rate=[self._rates[0], 1, 1],
kernel_initializer=tf_utils.clone_initializer(
self._temporal_conv_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, self._kernel_size],
strides=[1, self._strides[1], self._strides[2]],
dilation_rate=[1, self._rates[1], self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
elif self._conv_type == '2+1d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, self._kernel_size],
strides=[1, self._strides[1], self._strides[2]],
dilation_rate=[1, self._rates[1], self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[self._kernel_size, 1, 1],
strides=[self._strides[0], 1, 1],
dilation_rate=[self._rates[0], 1, 1],
kernel_initializer=tf_utils.clone_initializer(
self._temporal_conv_initializer),
))
elif self._conv_type == '1+1+1d':
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, 1, self._kernel_size],
strides=[1, 1, self._strides[2]],
dilation_rate=[1, 1, self._rates[2]],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[1, self._kernel_size, 1],
strides=[1, self._strides[1], 1],
dilation_rate=[1, self._rates[1], 1],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
conv_layer_params.append(
dict(
filters=self._filters,
kernel_size=[self._kernel_size, 1, 1],
strides=[self._strides[0], 1, 1],
dilation_rate=[self._rates[0], 1, 1],
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
))
else:
raise ValueError('Unsupported conv_type: {}'.format(self._conv_type))
return conv_layer_params
def _build_norm_layer_params(self, conv_param):
"""Builds params for the norm layer after one conv layer."""
return dict(
axis=self._channel_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
scale=False,
gamma_initializer='ones')
def _build_activation_layer_params(self, conv_param):
"""Builds params for the activation layer after one conv layer."""
return {}
def _append_conv_layer(self, param):
"""Appends conv, normalization and activation layers."""
self._parameterized_conv_layers.append(
tf.keras.layers.Conv3D(
padding='same',
use_bias=False,
kernel_regularizer=self._kernel_regularizer,
**param,
))
norm_layer_params = self._build_norm_layer_params(param)
self._parameterized_conv_layers.append(self._norm(**norm_layer_params))
relu_layer_params = self._build_activation_layer_params(param)
self._parameterized_conv_layers.append(
tf.keras.layers.Activation('relu', **relu_layer_params))
def build(self, input_shape):
self._parameterized_conv_layers = []
for conv_layer_param in self._build_conv_layer_params(input_shape):
self._append_conv_layer(conv_layer_param)
super(ParameterizedConvLayer, self).build(input_shape)
def call(self, inputs):
x = inputs
for layer in self._parameterized_conv_layers:
x = layer(x)
return x
| 8,323 | 36.495495 | 79 | py |
models | models-master/official/projects/pointpillars/utils/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utility functions for pointpillars."""
import collections
from typing import Any, List, Mapping, Tuple
import numpy as np
import tensorflow as tf
CLASSES = {'vehicle': 1, 'pedestrian': 2, 'cyclist': 3}
def assert_shape(x: np.ndarray, shape: List[int]):
if tuple(x.shape) != tuple(shape):
raise ValueError('Shape of array should be {}, but {} found'.format(
shape, x.shape))
def assert_channels_last():
if tf.keras.backend.image_data_format() != 'channels_last':
raise ValueError('Only "channels_last" mode is supported')
def pad_or_trim_to_shape(x: np.ndarray, shape: List[int]) -> np.ndarray:
"""Pad and trim x to the specified shape, x should have same rank as shape.
Args:
x: An np array.
shape: A list of int indicating a array shape.
Returns:
y: An np array with padded/trimmed shape.
"""
shape = np.array(shape)
# Try to pad from end
pad_end = shape - np.minimum(x.shape, shape)
pad_begin = np.zeros_like(pad_end)
padder = np.stack([pad_begin, pad_end], axis=1)
x = np.pad(x, padder)
# Try to trim from end.
slice_end = shape
slice_begin = np.zeros_like(slice_end)
slicer = tuple(map(slice, slice_begin, slice_end))
y = x[slicer].reshape(shape)
return y
def clip_boxes(boxes: np.ndarray, image_height: int,
image_width: int) -> np.ndarray:
"""Clip boxes to image boundaries.
Args:
boxes: An np array of boxes, [y0, x0, y1, y1].
image_height: An int of image height.
image_width: An int of image width.
Returns:
clipped_boxes: An np array of boxes, [y0, x0, y1, y1].
"""
max_length = [image_height, image_width, image_height, image_width]
clipped_boxes = np.maximum(np.minimum(boxes, max_length), 0.0)
return clipped_boxes
def get_vehicle_xy(image_height: int, image_width: int,
x_range: Tuple[float, float],
y_range: Tuple[float, float]) -> Tuple[int, int]:
"""Get vehicle x/y in image coordinate.
Args:
image_height: A float of image height.
image_width: A float of image width.
x_range: A float tuple of (-x, +x).
y_range: A float tuple of (-y, +x).
Returns:
vehicle_xy: An int tuple of (col, row).
"""
vehicle_col = (image_width * (-x_range[0] / (-x_range[0] + x_range[1])))
vehicle_row = (image_height * (-y_range[0] / (-y_range[0] + y_range[1])))
vehicle_xy = (int(vehicle_col), int(vehicle_row))
return vehicle_xy
def frame_to_image_coord(frame_xy: np.ndarray, vehicle_xy: Tuple[int, int],
one_over_resolution: float) -> np.ndarray:
"""Convert float frame (x, y) to int image (x, y).
Args:
frame_xy: An np array of frame xy coordinates.
vehicle_xy: An int tuple of (vehicle_x, vehicle_y) in image.
one_over_resolution: A float of one over image resolution.
Returns:
image_xy: An np array of image xy cooridnates.
"""
image_xy = np.floor(frame_xy * one_over_resolution).astype(np.int32)
image_xy[..., 0] += vehicle_xy[0]
image_xy[..., 1] = vehicle_xy[1] - 1 - image_xy[..., 1]
return image_xy
def image_to_frame_coord(image_xy: np.ndarray, vehicle_xy: Tuple[int, int],
resolution: float) -> np.ndarray:
"""Convert int image (x, y) to float frame (x, y).
Args:
image_xy: An np array of image xy cooridnates.
vehicle_xy: An int tuple of (vehicle_x, vehicle_y) in image.
resolution: A float of image resolution.
Returns:
frame_xy: An np array of frame xy coordinates.
"""
frame_xy = image_xy.astype(np.float32)
frame_xy[..., 0] = (frame_xy[..., 0] - vehicle_xy[0]) * resolution
frame_xy[..., 1] = (vehicle_xy[1] - 1 - frame_xy[..., 1]) * resolution
return frame_xy
def frame_to_image_boxes(frame_boxes: Any, vehicle_xy: Tuple[int, int],
one_over_resolution: float) -> Any:
"""Convert boxes from frame coordinate to image coordinate.
Args:
frame_boxes: A [N, 4] array or tensor, [center_x, center_y, length, width]
in frame coordinate.
vehicle_xy: An int tuple of (vehicle_x, vehicle_y) in image.
one_over_resolution: A float number, 1.0 / resolution.
Returns:
image_boxes: A [N, 4] array or tensor, [ymin, xmin, ymax, xmax] in image
coordinate.
"""
center_x = frame_boxes[..., 0]
center_y = frame_boxes[..., 1]
box_length = frame_boxes[..., 2]
box_width = frame_boxes[..., 3]
image_box_length = box_length * one_over_resolution
image_box_width = box_width * one_over_resolution
image_box_center_x = (center_x * one_over_resolution + vehicle_xy[0])
image_box_center_y = (vehicle_xy[1] - 1 - center_y * one_over_resolution)
ymin = image_box_center_y - image_box_width * 0.5
xmin = image_box_center_x - image_box_length * 0.5
ymax = image_box_center_y + image_box_width * 0.5
xmax = image_box_center_x + image_box_length * 0.5
image_boxes = np.stack([ymin, xmin, ymax, xmax], axis=-1)
return image_boxes
def image_to_frame_boxes(image_boxes: Any, vehicle_xy: Tuple[float],
resolution: float) -> Any:
"""Convert boxes from image coordinate to frame coordinate.
Args:
image_boxes: A [N, 4] array or tensor, [ymin, xmin, ymax, xmax] in image
coordinate.
vehicle_xy: A float tuple of (vehicle_x, vehicle_y) in image.
resolution: A float number representing pillar grid resolution.
Returns:
frame_boxes: A [N, 4] array or tensor, [center_x, center_y, length, width]
in frame coordinate.
"""
ymin = image_boxes[..., 0]
xmin = image_boxes[..., 1]
ymax = image_boxes[..., 2]
xmax = image_boxes[..., 3]
image_box_length = xmax - xmin
image_box_width = ymax - ymin
image_box_center_x = xmin + image_box_length * 0.5
image_box_center_y = ymin + image_box_width * 0.5
center_x = (image_box_center_x - vehicle_xy[0]) * resolution
center_y = (vehicle_xy[1] - 1 - image_box_center_y) * resolution
box_length = image_box_length * resolution
box_width = image_box_width * resolution
frame_boxes = np.stack([center_x, center_y, box_length, box_width], axis=-1)
return frame_boxes
def clip_heading(heading: Any) -> Any:
"""Clip heading to the range [-pi, pi]."""
heading = tf.nest.map_structure(lambda x: np.pi * tf.tanh(x), heading)
return heading
def wrap_angle_rad(angles_rad: Any,
min_val: float = -np.pi,
max_val: float = np.pi) -> Any:
"""Wrap the value of `angles_rad` to the range [min_val, max_val]."""
max_min_diff = max_val - min_val
return min_val + tf.math.floormod(angles_rad + max_val, max_min_diff)
def generate_anchors(min_level: int, max_level: int, image_size: Tuple[int],
anchor_sizes: List[Tuple[float]]) -> Mapping[str, Any]:
"""Generate anchor boxes without scale to level stride.
Args:
min_level: integer number of minimum level of the output.
max_level: integer number of maximum level of the output.
image_size: a tuple (image_height, image_width).
anchor_sizes: a list of tuples, each tuple is (anchor_length, anchor_width).
Returns:
boxes_all: a {level: boxes_i} dict, each boxes_i is a [h_i, w_i, 4] tensor
for boxes at level i, each box is (ymin, xmin, ymax, xmax).
Notations:
k: length of anchor_sizes, the number of indicated anchors.
w: the image width at a specific level.
h: the image height at a specifc level.
"""
# Prepare k anchors' lengths and widths
k = len(anchor_sizes)
# (k,)
anchor_lengths = []
anchor_widths = []
for anchor_size in anchor_sizes:
anchor_lengths.append(anchor_size[0])
anchor_widths.append(anchor_size[1])
anchor_lengths = tf.convert_to_tensor(anchor_lengths, dtype=tf.float32)
anchor_widths = tf.convert_to_tensor(anchor_widths, dtype=tf.float32)
# (1, 1, k)
half_anchor_lengths = tf.reshape(0.5 * anchor_lengths, [1, 1, k])
half_anchor_widths = tf.reshape(0.5 * anchor_widths, [1, 1, k])
boxes_all = collections.OrderedDict()
for level in range(min_level, max_level + 1):
# Generate anchor boxes for this level with stride.
boxes_i = []
stride = 2 ** level
# (w,)
x = tf.range(stride / 2, image_size[1], stride, dtype=tf.float32)
# (h,)
y = tf.range(stride / 2, image_size[0], stride, dtype=tf.float32)
# (h, w)
xv, yv = tf.meshgrid(x, y)
# (h, w, 1)
xv = tf.expand_dims(xv, axis=-1)
yv = tf.expand_dims(yv, axis=-1)
# (h, w, k, 1)
y_min = tf.expand_dims(yv - half_anchor_widths, axis=-1)
y_max = tf.expand_dims(yv + half_anchor_widths, axis=-1)
x_min = tf.expand_dims(xv - half_anchor_lengths, axis=-1)
x_max = tf.expand_dims(xv + half_anchor_lengths, axis=-1)
# (h, w, k, 4)
boxes_i = tf.concat([y_min, x_min, y_max, x_max], axis=-1)
# [h, w, k * 4]
shape = boxes_i.shape.as_list()
boxes_i = tf.reshape(boxes_i, [shape[0], shape[1], shape[2] * shape[3]])
boxes_all[str(level)] = boxes_i
return boxes_all
| 9,549 | 33.981685 | 80 | py |
models | models-master/official/projects/pointpillars/utils/model_exporter.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars model export utility function for serving/inference."""
import os
from typing import Any, Dict, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.core import train_utils
from official.projects.pointpillars.modeling import factory
from official.projects.pointpillars.utils import utils
def export_inference_graph(
batch_size: int,
params: cfg.ExperimentConfig,
checkpoint_path: str,
export_dir: str,
export_module: Optional[export_base.ExportModule] = None,
):
"""Exports inference graph for PointPillars model.
Saved model is stored at export_dir/saved_model, checkpoint is saved
at export_dir/checkpoint, and params is saved at export_dir/params.yaml.
Args:
batch_size: An int number specifying batch size for inference.
Saved PointPillars model doesn't support dynamic batch size.
Only three batch sizes are acceptable:
train batch size per replica, evaluation batch size per replica, and 1.
params: An instance of cfg.ExperimentConfig.
checkpoint_path: Trained checkpoint path or directory.
export_dir: Export directory path.
export_module: Optional export module to be used instead of using params
to create one.
"""
logging.info('Exporting model.')
if not export_module:
export_module = PointPillarsModule(
params=params,
batch_size=batch_size)
# Disable custom_gradients to make trt-converter be able to work.
# Consider to use tf.keras.models.save_model/load_model APIs to fix
# the custom gradients saving problem.
# https://github.com/tensorflow/tensorflow/issues/40166
save_options = tf.saved_model.SaveOptions(experimental_custom_gradients=False)
export_base.export(
export_module,
function_keys=['tensors'],
export_savedmodel_dir=export_dir,
checkpoint_path=checkpoint_path,
timestamped=False,
save_options=save_options)
logging.info('Saving checkpoint.')
ckpt = tf.train.Checkpoint(model=export_module.model)
ckpt.save(os.path.join(export_dir, 'checkpoint', 'ckpt'))
logging.info('Saving experiment params.')
train_utils.serialize_config(params, export_dir)
def load_model_predict_fn(export_dir: str) -> Any:
"""Load PointPillars model from saved directory.
Args:
export_dir: Export directory path.
Returns:
predict_fn: A function can be run for model inference.
"""
logging.info('Loading model from %s.', export_dir)
model = tf.saved_model.load(export_dir)
predict_fn = model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
return predict_fn
def random_input_tensors(
batch_size: int,
params: cfg.ExperimentConfig) -> Tuple[tf.Tensor, tf.Tensor]:
"""Create random input tensors for PointPillars model.
Args:
batch_size: An int number specifying batch size to inference.
params: An instance of cfg.ExperimentConfig.
Returns:
pillars: A tensor for input.
indices: A tensor for input.
"""
model_config = params.task.model
pillars_config = model_config.pillars
pillars = tf.random.uniform(
shape=[batch_size,
pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point],
minval=0.0,
maxval=1.0,
dtype=tf.float32,
name='pillars')
indices = tf.random.uniform(
shape=[batch_size, pillars_config.num_pillars, 2],
minval=0,
maxval=model_config.image.height,
dtype=tf.int32,
name='indices')
return pillars, indices
class PointPillarsModule(export_base.ExportModule):
"""PointPillars model export module."""
def __init__(self, params: cfg.ExperimentConfig, batch_size: int):
"""Initialize the module.
Args:
params: Experiment params.
batch_size: The batch size of the model input.
"""
self._params = params
self._batch_size = batch_size
self._pillars_spec, self._indices_spec = self._build_input_specs()
model = self._build_model()
super().__init__(params=params, model=model)
def _build_input_specs(
self) -> Tuple[tf.keras.layers.InputSpec, tf.keras.layers.InputSpec]:
pillars_config = self._params.task.model.pillars
pillars_spec = tf.keras.layers.InputSpec(
shape=(self._batch_size,
pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point),
dtype='float32')
indices_spec = tf.keras.layers.InputSpec(
shape=(self._batch_size,
pillars_config.num_pillars,
2),
dtype='int32')
return pillars_spec, indices_spec
def _build_model(self) -> tf.keras.Model:
logging.info('Building PointPillars model.')
input_specs = {
'pillars': self._pillars_spec, 'indices': self._indices_spec
}
model = factory.build_pointpillars(
input_specs=input_specs,
model_config=self._params.task.model,
# Train and eval batch size will be ignored for inference.
train_batch_size=1,
eval_batch_size=1)
return model
def serve(self, pillars: tf.Tensor, indices: tf.Tensor) -> Mapping[str, Any]:
"""Run model inference.
Args:
pillars: A float32 tensor.
indices: An int32 tensor.
Returns:
outputs: A dict of detected results.
"""
# Build image_shape and anchor_boxes on CPU.
with tf.device('cpu'):
model_config = self._params.task.model
image_size = [model_config.image.height,
model_config.image.width]
image_shape = tf.tile(tf.expand_dims(
image_size, axis=0), [self._batch_size, 1])
anchor_sizes = [(a.length, a.width) for a in model_config.anchors]
anchor_boxes = utils.generate_anchors(
min_level=model_config.min_level,
max_level=model_config.max_level,
image_size=image_size,
anchor_sizes=anchor_sizes)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0),
[self._batch_size, 1, 1, 1])
# Run model.
detections = self.model.call(
pillars=pillars,
indices=indices,
image_shape=image_shape,
anchor_boxes=anchor_boxes,
training=None
)
outputs = {
'detection_boxes': detections['boxes'],
'detection_scores': detections['scores'],
'detection_classes': detections['classes'],
'num_detections': detections['num_detections']
}
# NOTE: Need to flatten attributes, because outputs for functions used as
# signatures must be a single Tensor, a sequence of Tensors, or a dictionary
# from string to Tensor.
outputs.update(detections['attributes'])
return outputs
@tf.function
def inference_from_tensors(
self, pillars: tf.Tensor, indices: tf.Tensor) -> Mapping[str, Any]:
return self.serve(pillars, indices)
def get_inference_signatures(
self, function_keys: Dict[str, str]) -> Mapping[str, Any]:
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for input_type, name in function_keys.items():
if input_type == 'tensors':
pillars = tf.TensorSpec(
shape=self._pillars_spec.shape,
dtype=self._pillars_spec.dtype,
name='pillars'
)
indices = tf.TensorSpec(
shape=self._indices_spec.shape,
dtype=self._indices_spec.dtype,
name='indices'
)
signatures[
name] = self.inference_from_tensors.get_concrete_function(
pillars, indices)
else:
raise ValueError('Unrecognized input_type: {}'.format(input_type))
return signatures
| 8,772 | 33.269531 | 80 | py |
models | models-master/official/projects/pointpillars/modeling/backbones_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backbones."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import backbones
class BackboneTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([1, 32, 32, 3], 1, 1),
([2, 32, 64, 4], 1, 3),
)
def test_network_creation(self, input_shape, min_level, max_level):
batch_size = input_shape[0]
inputs = tf.keras.Input(shape=input_shape[1:], batch_size=batch_size)
backbone = backbones.Backbone(input_shape, min_level, max_level)
endpoints = backbone(inputs)
_, h, w, c = input_shape
for level in range(min_level, max_level + 1):
self.assertAllEqual([
batch_size,
int(h / 2**level),
int(w / 2**level),
int(c * 2**(level - 1))
], endpoints[str(level)].shape.as_list())
def test_serialization(self):
kwargs = dict(
input_specs=[1, 64, 64, 3],
min_level=2,
max_level=4,
num_convs=3,
kernel_regularizer=None,
)
net = backbones.Backbone(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = backbones.Backbone.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
_ = new_net.to_json()
if __name__ == '__main__':
tf.test.main()
| 1,982 | 30.983871 | 74 | py |
models | models-master/official/projects/pointpillars/modeling/decoders_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoders."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import decoders
class DecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
({'1': [1, 32, 32, 3]},
1, 1),
({'1': [1, 32, 32, 3],
'2': [1, 16, 16, 6]},
1, 2)
)
def test_network_creation(self, input_shape, min_level, max_level):
"""Test if network could be created and infer with expected shapes."""
inputs = {}
for k, v in input_shape.items():
if k == str(min_level):
batch_size, height, width, _ = v
inputs[k] = tf.keras.Input(shape=v[1:], batch_size=batch_size)
decoder = decoders.Decoder(input_shape)
endpoints = decoder(inputs)
self.assertLen(endpoints, 1)
self.assertEqual(list(endpoints.keys())[0], str(min_level))
self.assertIn(str(min_level), endpoints)
expected_channels = input_shape[str(min_level)][-1] * 2 * (
max_level - min_level + 1)
self.assertAllEqual(endpoints[str(min_level)].shape.as_list(),
[batch_size, height, width, expected_channels])
def test_serialization(self):
kwargs = dict(
input_specs={'1': [1, 64, 64, 3]},
kernel_regularizer=None,
)
net = decoders.Decoder(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = decoders.Decoder.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
_ = new_net.to_json()
if __name__ == '__main__':
tf.test.main()
| 2,224 | 32.208955 | 74 | py |
models | models-master/official/projects/pointpillars/modeling/featurizers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Featurizer layers for Pointpillars."""
from typing import Any, List, Mapping, Optional, Tuple
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class Featurizer(tf.keras.layers.Layer):
"""The featurizer to convert pillars to a BEV pseudo image.
The implementation is from the network architecture of PointPillars
(https://arxiv.org/pdf/1812.05784.pdf). It extract features from pillar
tensors then scatter them back to bird-eye-view (BEV) image using indices.
Notations:
B: batch size
H: height of the BEV image
W: width of the BEV image
P: number of pillars in an example
N: number of points in a pillar
D: number of features in a point
C: channels of the BEV image
"""
def __init__(
self,
image_size: Tuple[int, int],
pillars_size: Tuple[int, int, int],
train_batch_size: int,
eval_batch_size: int,
num_blocks: int,
num_channels: int,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the featurizer.
Args:
image_size: A [int, int] tuple to define the [H, W] of BEV image.
pillars_size: A [int, int, int] tuple to define the [P, N, D] of pillars.
train_batch_size: An `int` training batch size per replica.
eval_batch_size: An `int` evaluation batch size per replica.
num_blocks: An `int` number of blocks for extracting features.
num_channels: An `int` number channels of the BEV image.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
block layers. Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
super(Featurizer, self).__init__(**kwargs)
self._config_dict = {
'image_size': image_size,
'pillars_size': pillars_size,
'train_batch_size': train_batch_size,
'eval_batch_size': eval_batch_size,
'num_blocks': num_blocks,
'num_channels': num_channels,
'kernel_regularizer': kernel_regularizer,
}
self._image_shape = [image_size[0], image_size[1], num_channels]
utils.assert_channels_last()
def build(self, input_specs: List[tf.TensorShape]):
"""Creates variables for the featurizer."""
self._blocks = []
for _ in range(self._config_dict['num_blocks']):
self._blocks.append(
layers.ConvBlock(
filters=self._config_dict['num_channels'],
kernel_size=1,
strides=1,
kernel_regularizer=self._config_dict['kernel_regularizer']))
# These batch_dims are [B, P, 1] tensors that could be created before
# call(). They will be used for tf.scatter_nd to convert pillars to BEV
# images. Because tf.scatter_nd requires a concrete batch size, we need to
# prepare all possibilities of batch size for train, eval and test mode.
self._train_batch_dims = self._get_batch_dims(
self._config_dict['train_batch_size'])
self._eval_batch_dims = self._get_batch_dims(
self._config_dict['eval_batch_size'])
self._test_batch_dims = self._get_batch_dims(1)
super(Featurizer, self).build(input_specs)
def _get_batch_dims(self, batch_size: int) -> tf.Tensor:
p = self._config_dict['pillars_size'][0]
batch_dims = np.indices([batch_size, p])[0]
batch_dims = tf.convert_to_tensor(batch_dims, dtype=tf.int32)
batch_dims = tf.expand_dims(batch_dims, axis=-1)
return batch_dims
def _get_batch_size_and_dims(self,
training: bool = None) -> Tuple[int, tf.Tensor]:
# We use training as a ternary indicator, None for test mode.
# Test mode will be used for saving model and model inference.
if training is None:
batch_size = 1
batch_dims = self._test_batch_dims
else:
if training:
batch_size = self._config_dict['train_batch_size']
batch_dims = self._train_batch_dims
else:
batch_size = self._config_dict['eval_batch_size']
batch_dims = self._eval_batch_dims
return batch_size, batch_dims
def call(self,
pillars: tf.Tensor,
indices: tf.Tensor,
training: bool = None) -> tf.Tensor:
"""Forward pass of the featurizer."""
# Add batch index to pillar indices.
# (B, P, 1)
batch_size, batch_dims = self._get_batch_size_and_dims(training)
# (B, P, 3)
batch_indices = tf.concat([batch_dims, indices], axis=-1)
# Extract features from pillars.
# (B, P, N, D)
x = pillars
# (B, P, N, C)
for block in self._blocks:
x = block(x)
# (B, P, C)
x = tf.reduce_max(x, axis=2, keepdims=False)
# Scatter pillars back to form a BEV image.
# (B, H, W, C)
image = tf.scatter_nd(
batch_indices,
x,
shape=[batch_size] + self._image_shape)
self._output_specs = image.get_shape()
return image
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> tf.TensorShape:
return self._output_specs
| 5,936 | 34.550898 | 79 | py |
models | models-master/official/projects/pointpillars/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.modeling import factory
from official.projects.pointpillars.modeling import models
class PointPillarsBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(4, 4),
(1, 2),
(2, 1),
)
def test_builder(self, train_batch_size, eval_batch_size):
model_config = cfg.PointPillarsModel()
model_config.anchors = [cfg.Anchor(length=1.0, width=1.0)]
pillars_config = model_config.pillars
input_specs = {
'pillars':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point)),
'indices':
tf.keras.layers.InputSpec(
shape=(None, pillars_config.num_pillars, 2), dtype='int32'),
}
model = factory.build_pointpillars(
input_specs, model_config, train_batch_size, eval_batch_size
)
config = model.get_config()
new_model = models.PointPillarsModel.from_config(config)
_ = new_model.to_json()
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,023 | 33.305085 | 76 | py |
models | models-master/official/projects/pointpillars/modeling/models_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PointPillars models."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.pointpillars.modeling import backbones
from official.projects.pointpillars.modeling import decoders
from official.projects.pointpillars.modeling import featurizers
from official.projects.pointpillars.modeling import heads
from official.projects.pointpillars.modeling import models
from official.projects.pointpillars.utils import utils
from official.vision.modeling.layers import detection_generator
class PointpillarsTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_gpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
training=[True, False],
))
def test_all(self, strategy, training):
tf.keras.backend.set_image_data_format('channels_last')
num_classes = 2
h, w, c = 8, 8, 2
n, p, d = 2, 3, 4
image_size = [h, w]
pillars_size = [n, p, d]
indices_size = [n, 2]
attribute_heads = [{'name': 'heading', 'type': 'regression', 'size': 1}]
min_level = 1
max_level = 2
anchor_sizes = [(1.1, 1.1)]
num_anchors_per_location = len(anchor_sizes)
global_batch_size = 4
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
batch_size = int(global_batch_size / num_replicas)
pillars = tf.keras.Input(shape=pillars_size, batch_size=batch_size)
indices = tf.keras.Input(
shape=indices_size, batch_size=batch_size, dtype=tf.int32)
image_shape = tf.tile(tf.expand_dims([h, w], axis=0), [batch_size, 1])
max_num_detections = 4
# Test model creation.
with strategy.scope():
anchor_boxes = utils.generate_anchors(min_level,
max_level,
image_size,
anchor_sizes)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0), [batch_size, 1, 1, 1])
featurizer = featurizers.Featurizer(
image_size=image_size,
pillars_size=pillars_size,
train_batch_size=batch_size,
eval_batch_size=batch_size,
num_blocks=3,
num_channels=c
)
image = featurizer(pillars, indices, training)
backbone = backbones.Backbone(
input_specs=featurizer.output_specs,
min_level=min_level,
max_level=max_level,
num_convs=3
)
encoded_feats = backbone(image)
decoder = decoders.Decoder(
input_specs=backbone.output_specs)
decoded_feats = decoder(encoded_feats)
head = heads.SSDHead(
num_classes=num_classes,
num_anchors_per_location=num_anchors_per_location,
num_params_per_anchor=4,
attribute_heads=attribute_heads,
min_level=min_level,
max_level=max_level
)
_ = head(decoded_feats)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=max_num_detections,
nms_version='v1',
use_cpu_nms=True,
soft_nms_sigma=0.1)
model = models.PointPillarsModel(
featurizer=featurizer,
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator,
min_level=min_level,
max_level=max_level,
image_size=image_size,
anchor_sizes=anchor_sizes)
outputs = model(
pillars,
indices,
image_shape,
anchor_boxes,
training)
# Test training and evaluation.
if training:
cls_outputs = outputs['cls_outputs']
box_outputs = outputs['box_outputs']
for level in range(min_level, max_level+1):
self.assertIn(str(level), cls_outputs)
self.assertIn(str(level), box_outputs)
self.assertAllEqual([
batch_size,
h // 2**level,
w // 2**level,
num_classes * num_anchors_per_location
], cls_outputs[str(level)].shape)
self.assertAllEqual([
batch_size,
h // 2**level,
w // 2**level,
4 * num_anchors_per_location
], box_outputs[str(level)].shape)
att_outputs = outputs['attribute_outputs']
self.assertLen(att_outputs, 1)
self.assertIn('heading', att_outputs)
self.assertAllEqual([
batch_size,
h // 2**level,
w // 2**level,
1 * num_anchors_per_location
], att_outputs['heading'][str(level)].shape)
else:
self.assertIn('boxes', outputs)
self.assertIn('scores', outputs)
self.assertIn('classes', outputs)
self.assertIn('num_detections', outputs)
self.assertAllEqual([
batch_size,
], outputs['num_detections'].shape)
self.assertAllEqual([batch_size, max_num_detections, 4],
outputs['boxes'].shape)
self.assertAllEqual([batch_size, max_num_detections],
outputs['scores'].shape)
self.assertAllEqual([batch_size, max_num_detections],
outputs['classes'].shape)
self.assertIn('attributes', outputs)
self.assertAllEqual(
[batch_size, max_num_detections, 1],
outputs['attributes']['heading'].shape)
# Test serialization.
config = model.get_config()
new_model = models.PointPillarsModel.from_config(config)
_ = new_model.to_json()
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 6,623 | 34.805405 | 76 | py |
models | models-master/official/projects/pointpillars/modeling/layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Featurizer layers for Pointpillars."""
from typing import Any, Mapping, Optional
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ConvBlock(tf.keras.layers.Layer):
"""A conv2d followed by a norm then an activation."""
def __init__(
self,
filters: int,
kernel_size: int,
strides: int,
use_transpose_conv: bool = False,
kernel_initializer: Optional[tf.keras.initializers.Initializer] = tf.keras
.initializers.VarianceScaling(),
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_bias: bool = False,
bias_initializer: Optional[tf.keras.initializers.Initializer] = tf.keras
.initializers.Zeros(),
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_sync_bn: bool = True,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
bn_trainable: bool = True,
activation: str = 'relu',
**kwargs):
"""Initialize a block with conv, bn and activation.
Args:
filters: An int number of filters of the conv layer.
kernel_size: An int number of kernel size of the conv layer.
strides: An int number of strides of the conv layer.
use_transpose_conv: A bool for wether to use transpose conv or not.
kernel_initializer: A tf Initializer object for the conv layer.
kernel_regularizer: A tf Regularizer object for the conv layer.
use_bias: A bool for whether to use bias for the conv layer.
bias_initializer: A tf Initializer object for the conv layer bias.
bias_regularizer: A tf Regularizer object for the conv layer bias.
use_sync_bn: A bool for wether to use synchronized batch normalization.
norm_momentum: A float of normalization momentum for the moving average.
norm_epsilon: A float added to variance to avoid dividing by zero.
bn_trainable: A bool that indicates whether batch norm layers should be
trainable. Default to True.
activation: A str name of the activation function.
**kwargs: Additional keyword arguments to be passed.
"""
super(ConvBlock, self).__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._use_transpose_conv = use_transpose_conv
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._use_bias = use_bias
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._bn_trainable = bn_trainable
self._activation = activation
self._activation_fn = tf_utils.get_activation(activation)
utils.assert_channels_last()
def build(self, input_shape: tf.TensorShape):
"""Creates variables for the block."""
# Config conv
if self._use_transpose_conv:
conv_op = tf.keras.layers.Conv2DTranspose
else:
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'padding': 'same',
'use_bias': self._use_bias,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
self._conv = conv_op(**conv_kwargs)
# Config norm
if self._use_sync_bn:
bn_op = tf.keras.layers.experimental.SyncBatchNormalization
else:
bn_op = tf.keras.layers.BatchNormalization
bn_kwargs = {
'axis': -1,
'momentum': self._norm_momentum,
'epsilon': self._norm_epsilon,
'trainable': self._bn_trainable,
}
self._norm = bn_op(**bn_kwargs)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
"""Forward pass of the block."""
x = inputs
x = self._conv(x)
x = self._norm(x)
outputs = self._activation_fn(x)
return outputs
def get_config(self) -> Mapping[str, Any]:
config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'use_transpose_conv': self._use_transpose_conv,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'use_bias': self._use_bias,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'bn_trainable': self._bn_trainable,
'activation': self._activation,
}
return config
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
| 5,643 | 36.131579 | 80 | py |
models | models-master/official/projects/pointpillars/modeling/heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Head layers for Pointpillars."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
import numpy as np
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class SSDHead(tf.keras.layers.Layer):
"""A SSD head for PointPillars detection."""
def __init__(
self,
num_classes: int,
num_anchors_per_location: int,
num_params_per_anchor: int = 4,
attribute_heads: Optional[List[Dict[str, Any]]] = None,
min_level: int = 1,
max_level: int = 3,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the SSD Head.
Args:
num_classes: An `int` number of classes to predict.
num_anchors_per_location: An `int` number of anchors per location.
num_params_per_anchor: An `int` number of parameters per anchor.
attribute_heads: If not None, a list that contains a dict for each
additional attribute head. Each dict consists of 3 key-value pairs:
`name`, `type` ('regression' or 'classification'), and `size` (number
of predicted values for each instance).
min_level: An `int` of min level for output mutiscale features.
max_level: An `int` of max level for output mutiscale features.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
Returns:
endpoints: A `dict` of {level: Tensor} pairs for the model output.
output_specs: A dict of {level: TensorShape} pairs for the model output.
"""
super(SSDHead, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'num_anchors_per_location': num_anchors_per_location,
'num_params_per_anchor': num_params_per_anchor,
'attribute_heads': attribute_heads,
'min_level': min_level,
'max_level': max_level,
'kernel_regularizer': kernel_regularizer,
}
utils.assert_channels_last()
def build(self, input_specs: Mapping[str, tf.TensorShape]):
self._decoder_output_level = int(min(input_specs.keys()))
if self._config_dict['min_level'] < self._decoder_output_level:
raise ValueError('The min_level should be >= decoder output '
'level, but {} < {}'.format(
self._config_dict['min_level'],
self._decoder_output_level))
# Multi-level convs.
# Set num_filters as the one of decoder's output level.
num_filters = input_specs[str(self._decoder_output_level)].as_list()[-1]
self._convs = {}
for level in range(self._decoder_output_level + 1,
self._config_dict['max_level'] + 1):
self._convs[str(level)] = layers.ConvBlock(
filters=num_filters,
kernel_size=3,
strides=2,
kernel_regularizer=self._config_dict['kernel_regularizer'])
# Detection convs, share weights across multi levels.
self._classifier = tf.keras.layers.Conv2D(
filters=(self._config_dict['num_classes'] *
self._config_dict['num_anchors_per_location']),
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)))
self._box_regressor = tf.keras.layers.Conv2D(
filters=(self._config_dict['num_params_per_anchor'] *
self._config_dict['num_anchors_per_location']),
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_initializer=tf.zeros_initializer())
if self._config_dict['attribute_heads']:
self._att_predictors = {}
for att_config in self._config_dict['attribute_heads']:
att_name = att_config['name']
att_type = att_config['type']
att_size = att_config['size']
if att_type != 'regression':
raise ValueError('Unsupported head type: {}'.format(att_type))
self._att_predictors[att_name] = tf.keras.layers.Conv2D(
filters=(att_size * self._config_dict['num_anchors_per_location']),
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_initializer=tf.zeros_initializer())
super(SSDHead, self).build(input_specs)
def call(
self, inputs: Mapping[str, tf.Tensor]
) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[Any, Dict[str, Any]]]:
# Build multi level features.
feats = {}
for level in range(self._decoder_output_level,
self._config_dict['max_level'] + 1):
if level == self._decoder_output_level:
x = inputs[str(level)]
else:
x = self._convs[str(level)](feats[level - 1])
feats[level] = x
# Get multi level detection.
scores = {}
boxes = {}
if self._config_dict['attribute_heads']:
attributes = {
att_config['name']: {}
for att_config in self._config_dict['attribute_heads']
}
else:
attributes = {}
for level in range(self._config_dict['min_level'],
self._config_dict['max_level'] + 1):
# The branch to predict box classes.
scores[str(level)] = self._classifier(feats[level])
# The branch to predict boxes.
boxes[str(level)] = self._box_regressor(feats[level])
# The branches to predict box attributes.
if self._config_dict['attribute_heads']:
for att_config in self._config_dict['attribute_heads']:
att_name = att_config['name']
attributes[att_name][str(level)] = self._att_predictors[att_name](
feats[level])
return scores, boxes, attributes
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.layers.Layer:
return cls(**config)
| 7,055 | 39.090909 | 79 | py |
models | models-master/official/projects/pointpillars/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
from typing import Mapping, Optional
from absl import logging
import tensorflow as tf
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.modeling import backbones
from official.projects.pointpillars.modeling import decoders
from official.projects.pointpillars.modeling import featurizers
from official.projects.pointpillars.modeling import heads
from official.projects.pointpillars.modeling import models
from official.vision.modeling.layers import detection_generator
def build_pointpillars(
input_specs: Mapping[str, tf.keras.layers.InputSpec],
model_config: cfg.PointPillarsModel,
train_batch_size: int,
eval_batch_size: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Build the PointPillars model.
Args:
input_specs: A {name: input_spec} dict used to construct inputs.
model_config: A PointPillarsModel config.
train_batch_size: An `int` of training batch size per replica.
eval_batch_size: An `int` of evaluation batch size per replica.
l2_regularizer: A L2 regularizer.
Returns:
model: A PointPillarsModel built from the config.
"""
# Build inputs
inputs = {}
for k, v in input_specs.items():
inputs[k] = tf.keras.Input(shape=v.shape[1:], dtype=v.dtype)
# Build featurizer
image_size = (model_config.image.height, model_config.image.width)
pillars_size = input_specs['pillars'].shape[1:]
featurizer_config = model_config.featurizer
featurizer = featurizers.Featurizer(
image_size=image_size,
pillars_size=pillars_size,
num_blocks=featurizer_config.num_blocks,
num_channels=featurizer_config.num_channels,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
kernel_regularizer=l2_regularizer)
image = featurizer(inputs['pillars'], inputs['indices'], training=True)
# Build backbone
backbone_config = model_config.backbone
backbone = backbones.Backbone(
input_specs=featurizer.output_specs,
min_level=backbone_config.min_level,
max_level=backbone_config.max_level,
num_convs=backbone_config.num_convs,
kernel_regularizer=l2_regularizer)
encoded_feats = backbone(image)
# Build decoder
decoder = decoders.Decoder(
input_specs=backbone.output_specs,
kernel_regularizer=l2_regularizer)
decoded_feats = decoder(encoded_feats)
# Build detection head
head_config = model_config.head
num_anchors_per_location = (len(model_config.anchors))
head = heads.SSDHead(
num_classes=model_config.num_classes,
num_anchors_per_location=num_anchors_per_location,
num_params_per_anchor=4,
attribute_heads=[
attr.as_dict() for attr in (head_config.attribute_heads or [])
],
min_level=model_config.min_level,
max_level=model_config.max_level,
kernel_regularizer=l2_regularizer)
scores, boxes, attrs = head(decoded_feats)
generator_config = model_config.detection_generator
detection_generator_obj = detection_generator.MultilevelDetectionGenerator(
apply_nms=generator_config.apply_nms,
pre_nms_top_k=generator_config.pre_nms_top_k,
pre_nms_score_threshold=generator_config.pre_nms_score_threshold,
nms_iou_threshold=generator_config.nms_iou_threshold,
max_num_detections=generator_config.max_num_detections,
nms_version=generator_config.nms_version,
use_cpu_nms=generator_config.use_cpu_nms)
image_size = [model_config.image.height, model_config.image.width]
anchor_sizes = [(a.length, a.width) for a in model_config.anchors]
model = models.PointPillarsModel(
featurizer=featurizer,
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=detection_generator_obj,
min_level=model_config.min_level,
max_level=model_config.max_level,
image_size=image_size,
anchor_sizes=anchor_sizes)
logging.info('Train/Eval batch size per replica: %d/%d', train_batch_size,
eval_batch_size)
logging.info('Model inputs: %s', inputs)
logging.info('Outputs in training:')
logging.info('Featurizer output: %s', image)
logging.info('Backbone output: %s', encoded_feats)
logging.info('Decoder output: %s', decoded_feats)
logging.info('Detection head outputs: scores %s, boxes %s, atrributes %s',
scores, boxes, attrs)
return model
| 5,075 | 37.165414 | 77 | py |
models | models-master/official/projects/pointpillars/modeling/layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backbones."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
class ConvBlockTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([1, 8, 8, 3], 4, 1, False),
([1, 8, 8, 3], 4, 2, False),
([1, 8, 8, 3], 2, 1, True),
([1, 8, 8, 3], 2, 2, True),
)
def test_creation(self, input_shape, filters, strides,
use_transpose_conv):
kernel_size = 3
n, h, w, _ = input_shape
inputs = tf.keras.Input(shape=input_shape[1:], batch_size=n)
block = layers.ConvBlock(filters, kernel_size, strides, use_transpose_conv)
outputs = block(inputs)
if not use_transpose_conv:
if strides == 1:
self.assertAllEqual([n, h, w, filters], outputs.shape.as_list())
elif strides == 2:
self.assertAllEqual([n, h/2, w/2, filters], outputs.shape.as_list())
else:
if strides == 1:
self.assertAllEqual([n, h, w, filters], outputs.shape.as_list())
elif strides == 2:
self.assertAllEqual([n, h*2, w*2, filters], outputs.shape.as_list())
def test_serialization(self):
kwargs = dict(
filters=3,
kernel_size=3,
strides=1,
use_transpose_conv=False,
kernel_initializer=None,
kernel_regularizer=None,
use_bias=False,
bias_initializer=None,
bias_regularizer=None,
use_sync_bn=True,
norm_momentum=0.99,
norm_epsilon=0.001,
bn_trainable=True,
activation='relu',
)
net = layers.ConvBlock(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = layers.ConvBlock.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,489 | 31.763158 | 79 | py |
models | models-master/official/projects/pointpillars/modeling/models.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars Model."""
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import tensorflow as tf
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class PointPillarsModel(tf.keras.Model):
"""The PointPillars model class."""
def __init__(self,
featurizer: tf.keras.layers.Layer,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
head: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
min_level: int,
max_level: int,
image_size: Tuple[int, int],
anchor_sizes: List[Tuple[float, float]],
**kwargs):
"""Initialize the model class.
Args:
featurizer: A `tf.keras.layers.Layer` to extract features from pillars.
backbone: A `tf.keras.Model` to downsample feature images.
decoder: A `tf.keras.Model` to upsample feature images.
head: A `tf.keras.layers.Layer` to predict targets.
detection_generator: A `tf.keras.layers.Layer` to generate detections.
min_level: An `int` minimum level of multiscale outputs.
max_level: An `int` maximum level of multiscale outputs.
image_size: A tuple (height, width) of image size.
anchor_sizes: A list of tuple (length, width) of anchor boxes.
**kwargs: Additional keyword arguments to be passed.
"""
super(PointPillarsModel, self).__init__(**kwargs)
self._featurizer = featurizer
self._backbone = backbone
self._decoder = decoder
self._head = head
self._detection_generator = detection_generator
self._min_level = min_level
self._max_level = max_level
self._image_size = image_size
self._anchor_sizes = anchor_sizes
def generate_outputs(
self,
raw_scores: Dict[str, tf.Tensor],
raw_boxes: Dict[str, tf.Tensor],
raw_attributes: Dict[str, Dict[str, tf.Tensor]],
image_shape: Optional[tf.Tensor] = None,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
generate_detections: bool = False) -> Mapping[str, Any]:
if not raw_attributes:
raise ValueError('PointPillars model needs attribute heads.')
# Clap heading to [-pi, pi]
if 'heading' in raw_attributes:
raw_attributes['heading'] = utils.clip_heading(raw_attributes['heading'])
outputs = {
'cls_outputs': raw_scores,
'box_outputs': raw_boxes,
'attribute_outputs': raw_attributes,
}
# Cast raw prediction to float32 for loss calculation.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
if not generate_detections:
return outputs
if image_shape is None:
raise ValueError('Image_shape should not be None for evaluation.')
if anchor_boxes is None:
# Generate anchors if needed.
anchor_boxes = utils.generate_anchors(
self._min_level,
self._max_level,
self._image_size,
self._anchor_sizes,
)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0),
[tf.shape(image_shape)[0], 1, 1, 1])
# Generate detected boxes.
if not self._detection_generator.get_config()['apply_nms']:
raise ValueError('An NMS algorithm is required for detection generator')
detections = self._detection_generator(raw_boxes, raw_scores,
anchor_boxes, image_shape,
raw_attributes)
outputs.update({
'boxes': detections['detection_boxes'],
'scores': detections['detection_scores'],
'classes': detections['detection_classes'],
'num_detections': detections['num_detections'],
'attributes': detections['detection_attributes'],
})
return outputs
def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
pillars: tf.Tensor,
indices: tf.Tensor,
image_shape: Optional[tf.Tensor] = None,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
training: bool = None) -> Mapping[str, Any]:
"""Forward pass of the model.
Notation:
B: batch size
H_i: image height at level i
W_i: image width at level i
D: number of anchors per location
C: number of classes to predict
M: number of detected boxes
T: attribute size
P: number of pillars in an example
N: number of points in a pillar
D: number of features in a point
Args:
pillars: A tensor with shape [B, P, N, D].
indices: A tensor with shape [B, P, 2].
image_shape: A tensor with shape [B, 2] representing size of images.
anchor_boxes: A {level: tensor} dict contains multi level anchor boxes.
- key: a `str` level.
- value: a tensor with shape [B, H_i, W_i, 4 * D].
training: A `bool` indicating whether it's in training mode.
Returns:
cls_outputs: A {level: tensor} dict, tensor shape is [B, H_i, W_i, C * D].
box_outputs: A {level: tensor} dict, tensor shape is [B, H_i, W_i, 4 * D].
attribute_outputs: A {name: {level: tensor}} dict, tensor shape is
[B, H_i, W_i, T * D].
(Below are only for evaluation mode)
num_detections: A `int` tensor represent number of detected boxes.
boxes: A tensor with shape [B, M, 4].
scores: A tensor with shape [B, M].
classes: A tensor with shape [B, M].
attributes: A {name: tensor} dict, tensor shape is [B, M, T].
"""
images = self.featurizer(pillars, indices, training=training)
features = self.backbone(images)
features = self.decoder(features)
raw_scores, raw_boxes, raw_attributes = self.head(features)
return self.generate_outputs(raw_scores=raw_scores,
raw_boxes=raw_boxes,
raw_attributes=raw_attributes,
image_shape=image_shape,
anchor_boxes=anchor_boxes,
generate_detections=not training)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(featurizer=self.featurizer,
backbone=self.backbone,
decoder=self.decoder,
head=self.head)
return items
@property
def featurizer(self) -> tf.keras.layers.Layer:
return self._featurizer
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
@property
def decoder(self) -> tf.keras.Model:
return self._decoder
@property
def head(self) -> tf.keras.layers.Layer:
return self._head
@property
def detection_generator(self) -> tf.keras.layers.Layer:
return self._detection_generator
def get_config(self) -> Mapping[str, Any]:
config_dict = {
'featurizer': self._featurizer,
'backbone': self._backbone,
'decoder': self._decoder,
'head': self._head,
'detection_generator': self._detection_generator,
'min_level': self._min_level,
'max_level': self._max_level,
'image_size': self._image_size,
'anchor_sizes': self._anchor_sizes,
}
return config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
| 8,181 | 36.53211 | 91 | py |
models | models-master/official/projects/pointpillars/modeling/backbones.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbone models for Pointpillars."""
from typing import Any, Mapping, Optional
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class Backbone(tf.keras.Model):
"""The backbone to extract features from BEV pseudo image.
The implementation is from the network architecture of PointPillars
(https://arxiv.org/pdf/1812.05784.pdf). It downsamples the input image
through convolutions and output features with multiple levels.
"""
def __init__(
self,
input_specs: tf.TensorShape,
min_level: int = 1,
max_level: int = 3,
num_convs: int = 4,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the backbone.
The output of the backbone is a multi-level features.
1 <= min_level <= max_level,
level_feature_size = input_image_size / 2 ^ level,
e.g. input size (32, 32), feature size should be:
(32, 32) at level 0, (16, 16) at level 1, (8, 8) at level 2, ...
Args:
input_specs: A `tf.TensorShape` of the input tensor.
min_level: An `int` of min level for output multiscale features.
max_level: An `int` of max level for output multiscale features.
num_convs: An `int` number of convolution layers in a downsample group.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
Returns:
endpoints: A `dict` of {level: Tensor} pairs for the model output.
output_specs: A dict of {level: TensorShape} pairs for the model output.
"""
utils.assert_channels_last()
self._config_dict = {
'input_specs': input_specs,
'min_level': min_level,
'max_level': max_level,
'num_convs': num_convs,
'kernel_regularizer': kernel_regularizer,
}
# Onlly allow to output from level 1.
if min_level < 1:
raise ValueError(
'The min_level must be >= 1, but {} found.'.format(min_level))
input_channels = input_specs[-1]
inputs = tf.keras.Input(shape=input_specs[1:])
# build the net
x = inputs
net = {}
scale = 1
for level in range(1, max_level + 1):
x = self._block_group(
inputs=x,
filters=input_channels * scale)
scale *= 2
net[level] = x
# build endpoints
endpoints = {}
for level in range(min_level, max_level + 1):
endpoints[str(level)] = net[level]
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(Backbone, self).__init__(inputs=inputs, outputs=endpoints)
def _block_group(self,
inputs: tf.Tensor,
filters: int) -> tf.Tensor:
"""A group of convolution layers to downsample inputs.
Args:
inputs: A tensor to be downsampled.
filters: An `int` number of filters of convolution.
Returns:
x: A tensor of downsampled feature.
"""
x = layers.ConvBlock(
filters=filters,
kernel_size=3,
strides=2,
kernel_regularizer=self._config_dict['kernel_regularizer'])(inputs)
for _ in range(1, self._config_dict['num_convs']):
x = layers.ConvBlock(
filters=filters,
kernel_size=3,
strides=1,
kernel_regularizer=self._config_dict['kernel_regularizer'])(x)
return x
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
return self._output_specs
| 4,423 | 32.515152 | 78 | py |
models | models-master/official/projects/pointpillars/modeling/heads_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoders."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.pointpillars.modeling import heads
class SSDHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(2, [], 1, 1),
(3, [{'name': 'z', 'type': 'regression', 'size': 1}], 1, 3))
def test_network_creation(self, num_classes, attribute_heads, min_level,
max_level):
"""Test if network could be created and infer with expected shapes."""
# Fix the input shape, anchor size and num of conv filters.
n, h, w, c = 1, 32, 32, 4
num_anchors_per_location = 3
num_params_per_anchor = 4
inputs = {'1': tf.keras.Input(shape=[h, w, c], batch_size=n)}
head = heads.SSDHead(num_classes, num_anchors_per_location,
num_params_per_anchor, attribute_heads, min_level,
max_level)
scores, boxes, attributes = head(inputs)
for level in range(min_level, max_level+1):
self.assertIn(str(level), scores)
self.assertIn(str(level), boxes)
scale = 2**(level - min_level)
self.assertAllEqual(scores[str(level)].shape.as_list(), [
n,
int(h / scale),
int(w / scale), num_classes * num_anchors_per_location
])
self.assertAllEqual(boxes[str(level)].shape.as_list(), [
n,
int(h / scale),
int(w / scale), num_params_per_anchor * num_anchors_per_location
])
for attr_head in attribute_heads:
name = attr_head['name']
size = attr_head['size']
self.assertIn(name, attributes)
attr = attributes[name]
for level in range(min_level, max_level+1):
self.assertIn(str(level), attr)
scale = 2**(level - min_level)
self.assertAllEqual(attr[str(level)].shape.as_list(), [
n,
int(h / scale),
int(w / scale), size * num_anchors_per_location
])
def test_serialization(self):
kwargs = dict(
num_classes=2,
num_anchors_per_location=3,
num_params_per_anchor=4,
attribute_heads=[
{'name': 'z', 'type': 'regression', 'size': 1},
],
min_level=1,
max_level=3,
kernel_regularizer=None
)
net = heads.SSDHead(**kwargs)
expected_config = kwargs
self.assertEqual(net.get_config(), expected_config)
new_net = heads.SSDHead.from_config(net.get_config())
self.assertAllEqual(net.get_config(), new_net.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,185 | 34.010989 | 75 | py |
models | models-master/official/projects/pointpillars/modeling/decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoder models for Pointpillars."""
from typing import Any, Mapping, Optional
import tensorflow as tf
from official.projects.pointpillars.modeling import layers
from official.projects.pointpillars.utils import utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class Decoder(tf.keras.Model):
"""The decoder to process feature maps learned by a backbone.
The implementation is from the network architecture of PointPillars
(https://arxiv.org/pdf/1812.05784.pdf). It upsamples the feature image
to the same size and combine them to be the output.
"""
def __init__(
self,
input_specs: Mapping[str, tf.TensorShape],
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initialize the Decoder.
Args:
input_specs: A dict of {level: tf.TensorShape} of the input tensor.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
Returns:
endpoints: A `dict` of {level: Tensor} pairs for the model output.
output_specs: A dict of {level: TensorShape} pairs for the model output.
"""
self._config_dict = {
'input_specs': input_specs,
'kernel_regularizer': kernel_regularizer,
}
utils.assert_channels_last()
# Only allow to process levels learned by a backbone.
min_level = int(min(input_specs.keys()))
max_level = int(max(input_specs.keys()))
# Build inputs
inputs = {}
# Set min_level as the output level.
output_level = min_level
for level, shape in input_specs.items():
# Set num_filters as 2c if the channels of backbone output level is c.
if int(level) == output_level:
num_filters = 2 * shape[-1]
inputs[level] = tf.keras.Input(shape=shape[1:])
# Build lateral features
lateral_feats = {}
for level in range(min_level, max_level + 1):
lateral_feats[level] = inputs[str(level)]
# Build scale-up path
feats = []
for level in range(min_level, max_level + 1):
x = layers.ConvBlock(
filters=num_filters,
kernel_size=3,
strides=int(2 ** (level - output_level)),
use_transpose_conv=True,
kernel_regularizer=kernel_regularizer)(
lateral_feats[level])
feats.append(x)
# Fuse all levels feature into the output level.
endpoints = {}
endpoints[str(output_level)] = tf.keras.layers.Concatenate(axis=-1)(feats)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(Decoder, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
return self._output_specs
| 3,593 | 32.90566 | 78 | py |
Subsets and Splits