text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast GRU layer backed by cuDNN."""
import collections
import tensorflow.compat.v2 as tf
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.layers.rnn import gru_lstm_utils
from tf_keras.layers.rnn.base_cudnn_rnn import _CuDNNRNN
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.layers.CuDNNGRU"])
class CuDNNGRU(_CuDNNRNN):
"""Fast GRU implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Args:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used
for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output in the
output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to
the output.
go_backwards: Boolean (default False). If True, process the input
sequence backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each
sample at index i in a batch will be used as initial state for the
sample of index i in the following batch.
"""
def __init__(
self,
units,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs
):
self.units = units
cell_spec = collections.namedtuple("cell", "state_size")
self._cell = cell_spec(state_size=self.units)
super().__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs
)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super().build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
self.bias = self.add_weight(
shape=(self.units * 6,),
name="bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = tf.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_h = tf.expand_dims(input_h, axis=0)
params = gru_lstm_utils.canonical_to_params(
weights=[
self.kernel[:, self.units : self.units * 2],
self.kernel[:, : self.units],
self.kernel[:, self.units * 2 :],
self.recurrent_kernel[:, self.units : self.units * 2],
self.recurrent_kernel[:, : self.units],
self.recurrent_kernel[:, self.units * 2 :],
],
biases=[
self.bias[self.units : self.units * 2],
self.bias[: self.units],
self.bias[self.units * 2 : self.units * 3],
self.bias[self.units * 4 : self.units * 5],
self.bias[self.units * 3 : self.units * 4],
self.bias[self.units * 5 :],
],
shape=self._vector_shape,
)
args = {
"input": inputs,
"input_h": input_h,
"input_c": 0,
"params": params,
"is_training": True,
"rnn_mode": "gru",
}
outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV2(**args)
if self.stateful or self.return_state:
h = h[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = tf.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h]
def get_config(self):
config = {
"units": self.units,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/rnn/cudnn_gru.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/cudnn_gru.py",
"repo_id": "tf-keras",
"token_count": 3727
} | 243 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM V1 layer."""
import time
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.rnn import lstm
from tf_keras.layers.rnn import lstm_v1
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import np_utils
# isort: off
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.platform import tf_logging as logging
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = tf.compat.v1.GraphOptions(rewrite_options=_rewrites)
_config = tf.compat.v1.ConfigProto(graph_options=_graph_options)
@test_combinations.run_all_keras_modes(config=_config)
class LSTMGraphRewriteTest(test_combinations.TestCase):
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
@test_utils.run_v2_only
def test_lstm_feature_parity_v1_v2(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size,
random_seed=87654321,
)
y_train = np_utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32
)
masked_input = keras.layers.Masking()(inputs)
lstm_layer = lstm_v1.LSTM(
rnn_state_size, recurrent_activation="sigmoid"
)
output = lstm_layer(masked_input)
lstm_model = keras.models.Model(inputs, output)
weights = lstm_model.get_weights()
y_1 = lstm_model.predict(x_train)
lstm_model.compile("rmsprop", "mse")
lstm_model.fit(x_train, y_train)
y_2 = lstm_model.predict(x_train)
with test_utils.device(should_use_gpu=True):
cudnn_layer = lstm.LSTM(rnn_state_size)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile("rmsprop", "mse")
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
("normal", False, False),
("time_major", True, False),
("go_backwards", False, True),
("both", True, True),
)
def test_time_major_and_go_backward_v1_v2(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32
)
layer = layer_cls(
rnn_state_size,
recurrent_activation="sigmoid",
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards,
)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
lstm_model = build_model(lstm_v1.LSTM)
y_ref = lstm_model.predict(x_train)
weights = lstm_model.get_weights()
lstm_v2_model = build_model(lstm.LSTM)
lstm_v2_model.set_weights(weights)
y = lstm_v2_model.predict(x_train)
self.assertAllClose(y, y_ref)
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape,
)
y_train = np_utils.to_categorical(y_train, output_shape)
layer = lstm.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32
)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile("rmsprop", loss="mse")
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
@test_utils.run_v2_only
def test_explicit_device_with_go_backward_and_mask_v1(self):
batch_size = 8
timestep = 7
masksteps = 5
units = 4
inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)
mask = np.ones((batch_size, timestep)).astype(bool)
mask[:, masksteps:] = 0
lstm_v1_layer = lstm_v1.LSTM(
units, return_sequences=True, go_backwards=True
)
with test_utils.device(should_use_gpu=True):
outputs_masked_v1 = lstm_v1_layer(inputs, mask=tf.constant(mask))
outputs_trimmed_v1 = lstm_v1_layer(inputs[:, :masksteps])
self.assertAllClose(
outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1
)
class LSTMPerformanceTest(tf.test.Benchmark):
def _measure_performance(self, test_config, model, x_train, y_train):
batch = test_config["batch"]
epoch = test_config["epoch"]
warmup_epoch = test_config["warmup_epoch"]
# warm up the model
model.fit(x_train, y_train, batch_size=batch, epochs=warmup_epoch)
start_time = time.time()
model.fit(
x_train, y_train, batch_size=batch, epochs=epoch - warmup_epoch
)
end_time = time.time()
return (end_time - start_time) / (epoch - warmup_epoch)
def _time_performance_run_cudnn_lstm(self, test_config, x_train, y_train):
# Get the performance number for standard Cudnn LSTM
input_shape = test_config["input_shape"]
rnn_state_size = test_config["rnn_state_size"]
timestep = test_config["timestep"]
cudnn_lstm_layer = keras.layers.CuDNNLSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32
)
outputs = cudnn_lstm_layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile("sgd", "mse")
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train
)
logging.info(
"Average performance for %s per epoch is: %s",
"CuDNN LSTM",
sec_per_epoch,
)
return sec_per_epoch
def _time_performance_run_unifed_lstm_gpu(
self, test_config, x_train, y_train
):
# Get performance number for lstm_v2 with grappler swap the impl
input_shape = test_config["input_shape"]
rnn_state_size = test_config["rnn_state_size"]
timestep = test_config["timestep"]
layer = keras.layers.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32
)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile("sgd", "mse")
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train
)
logging.info(
"Average performance for %s per epoch is: %s",
"LSTM V2",
sec_per_epoch,
)
return sec_per_epoch
def _time_performance_run_normal_lstm(self, test_config, x_train, y_train):
# Get performance number for standard LSTM on GPU.
input_shape = test_config["input_shape"]
rnn_state_size = test_config["rnn_state_size"]
timestep = test_config["timestep"]
layer = lstm_v1.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=tf.float32
)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile("sgd", "mse")
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train
)
logging.info(
"Average performance for %s per epoch is: %s",
"Normal LSTM",
sec_per_epoch,
)
return sec_per_epoch
def _benchmark_performance_with_standard_cudnn_impl(self):
if not tf.test.is_gpu_available():
self.skipTest("performance test will only run on GPU")
mode = "eager" if tf.executing_eagerly() else "graph"
batch = 64
num_batch = 10
test_config = {
"input_shape": 128,
"rnn_state_size": 64,
"output_shape": 64,
"timestep": 50,
"batch": batch,
"epoch": 20,
# The performance for warmup epoch is ignored.
"warmup_epoch": 1,
}
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=(batch * num_batch),
test_samples=0,
input_shape=(test_config["timestep"], test_config["input_shape"]),
num_classes=test_config["output_shape"],
)
y_train = np_utils.to_categorical(y_train, test_config["output_shape"])
cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm(
test_config, x_train, y_train
)
lstm_v2_sec_per_epoch = self._time_performance_run_unifed_lstm_gpu(
test_config, x_train, y_train
)
normal_lstm_sec_per_epoch = self._time_performance_run_normal_lstm(
test_config, x_train, y_train
)
cudnn_vs_v2 = cudnn_sec_per_epoch / lstm_v2_sec_per_epoch
v2_vs_normal = normal_lstm_sec_per_epoch / lstm_v2_sec_per_epoch
self.report_benchmark(
name="keras_cudnn_lstm_" + mode,
wall_time=cudnn_sec_per_epoch,
iters=test_config["epoch"],
extras=test_config,
)
self.report_benchmark(
name="keras_lstm_v2_" + mode,
wall_time=lstm_v2_sec_per_epoch,
iters=test_config["epoch"],
extras=test_config,
)
self.report_benchmark(
name="keras_canonical_lstm_" + mode,
wall_time=normal_lstm_sec_per_epoch,
iters=test_config["epoch"],
extras=test_config,
)
logging.info(
"Expect the performance of LSTM V2 is within 80% of "
"cuDNN LSTM, got {0:.2f}%".format(cudnn_vs_v2 * 100)
)
logging.info(
"Expect the performance of LSTM V2 is more than 5 times"
" of normal LSTM, got {0:.2f}".format(v2_vs_normal)
)
def benchmark_performance_graph(self):
with tf.compat.v1.get_default_graph().as_default():
with tf.compat.v1.Session(config=_config):
self._benchmark_performance_with_standard_cudnn_impl()
def benchmark_performance_eager(self):
with tf.__internal__.eager_context.eager_mode():
self._benchmark_performance_with_standard_cudnn_impl()
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/rnn/lstm_v1_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/lstm_v1_test.py",
"repo_id": "tf-keras",
"token_count": 6423
} | 244 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.convolutional."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.legacy_tf_layers import convolutional as conv_layers
class ConvTest(tf.test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "data_format"):
conv_layers.conv2d(images, 32, 3, data_format="invalid")
def testInvalidStrides(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.conv2d(images, 32, None)
def testCreateConv2D(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], activation=tf.nn.relu)
output = layer(images)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "conv2d/Relu")
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DFloat16(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4), dtype="float16")
output = conv_layers.conv2d(images, 32, [3, 3], activation=tf.nn.relu)
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
def testCreateConv2DIntegerKernelSize(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, 3)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DChannelsFirst(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, 4, height, width))
layer = conv_layers.Conv2D(32, [3, 3], data_format="channels_first")
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, 32, height - 2, width - 2]
)
self.assertListEqual(
layer.kernel.get_shape().as_list(), [3, 3, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannels(self):
with tf.Graph().as_default():
images = tf.compat.v1.placeholder(tf.float32, (5, 7, 9, None))
layer = conv_layers.Conv2D(32, [3, 3], activation=tf.nn.relu)
with self.assertRaisesRegex(
ValueError,
"The channel dimension of the inputs "
"should be defined. The input_shape received is",
):
_ = layer(images)
images = tf.compat.v1.placeholder(tf.float32, (5, None, 7, 9))
layer = conv_layers.Conv2D(32, [3, 3], data_format="channels_first")
with self.assertRaisesRegex(
ValueError,
"The channel dimension of the inputs "
"should be defined. The input_shape received is",
):
_ = layer(images)
def testConv2DPaddingSame(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2D(64, images.get_shape()[1:3], padding="same")
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height, width, 64]
)
def testCreateConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = tf.random.uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 2), padding="same")
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height / 2, width / 2, 32]
)
# Test strides integer
layer = conv_layers.Conv2D(32, [3, 3], strides=2, padding="same")
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height / 2, width / 2, 32]
)
# Test unequal strides
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 1), padding="same")
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height / 2, width, 32]
)
def testCreateConv1D(self):
width = 7
data = tf.random.uniform((5, width, 4))
layer = conv_layers.Conv1D(32, 3, activation=tf.nn.relu)
output = layer(data)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "conv1d/Relu")
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv1DFloat16(self):
width = 7
data = tf.random.uniform((5, width, 4), dtype="float16")
output = conv_layers.conv1d(data, 32, 3, activation=tf.nn.relu)
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
def testCreateConv1DChannelsFirst(self):
with tf.Graph().as_default():
width = 7
data = tf.random.uniform((5, 4, width))
layer = conv_layers.Conv1D(32, 3, data_format="channels_first")
output = layer(data)
self.assertListEqual(
output.get_shape().as_list(), [5, 32, width - 2]
)
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv1D(self):
with tf.Graph().as_default():
data = tf.compat.v1.placeholder(tf.float32, (5, 4, None))
layer = conv_layers.Conv1D(32, 3, activation=tf.nn.relu)
with self.assertRaisesRegex(
ValueError,
"The channel dimension of the inputs "
"should be defined. The input_shape received is",
):
_ = layer(data)
data = tf.compat.v1.placeholder(tf.float32, (5, None, 4))
layer = conv_layers.Conv1D(32, 3, data_format="channels_first")
with self.assertRaisesRegex(
ValueError,
"The channel dimension of the inputs "
"should be defined. The input_shape received is",
):
_ = layer(data)
def testCreateConv3D(self):
depth, height, width = 6, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 4))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=tf.nn.relu)
output = layer(volumes)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "conv3d/Relu")
self.assertListEqual(
output.get_shape().as_list(),
[5, depth - 2, height - 2, width - 2, 32],
)
self.assertListEqual(
layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv3D(self):
with tf.Graph().as_default():
volumes = tf.compat.v1.placeholder(tf.float32, (5, 6, 7, 9, None))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=tf.nn.relu)
with self.assertRaisesRegex(
ValueError,
"The channel dimension of the inputs "
"should be defined. The input_shape received is",
):
_ = layer(volumes)
def testConv2DKernelRegularizer(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], kernel_regularizer=reg)
layer(images)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testConv2DBiasRegularizer(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], bias_regularizer=reg)
layer(images)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testConv2DNoBias(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.Conv2D(
32, [3, 3], activation=tf.nn.relu, use_bias=False
)
output = layer(images)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "conv2d/Relu")
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testDilatedConv2D(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=3)
output = layer(images)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 3, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
# Test tuple dilation rate
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=(1, 3))
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, 3, 32]
)
def testFunctionalConv2DReuse(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name="conv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3], name="conv1", reuse=True)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
def testFunctionalConv2DReuseFromScope(self):
with tf.Graph().as_default():
with tf.compat.v1.variable_scope("scope"):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name="conv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
with tf.compat.v1.variable_scope("scope", reuse=True):
conv_layers.conv2d(images, 32, [3, 3], name="conv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
def testFunctionalConv2DInitializerFromScope(self):
with tf.Graph().as_default(), self.cached_session():
with tf.compat.v1.variable_scope(
"scope", initializer=tf.compat.v1.ones_initializer()
):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name="conv1")
weights = tf.compat.v1.trainable_variables()
# Check the names of weights in order.
self.assertTrue("kernel" in weights[0].name)
self.assertTrue("bias" in weights[1].name)
self.evaluate(tf.compat.v1.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from
# scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 4)
def testConstraints(self):
# Conv1D
k_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
conv1d = conv_layers.Conv1D(
2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint
)
inputs = tf.random.uniform((5, 3, 5), seed=1)
conv1d(inputs)
self.assertEqual(conv1d.kernel_constraint, k_constraint)
self.assertEqual(conv1d.bias_constraint, b_constraint)
# Conv2D
k_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
conv2d = conv_layers.Conv2D(
2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint
)
inputs = tf.random.uniform((5, 3, 3, 5), seed=1)
conv2d(inputs)
self.assertEqual(conv2d.kernel_constraint, k_constraint)
self.assertEqual(conv2d.bias_constraint, b_constraint)
# Conv3D
k_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
conv3d = conv_layers.Conv3D(
2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint
)
inputs = tf.random.uniform((5, 3, 3, 3, 5), seed=1)
conv3d(inputs)
self.assertEqual(conv3d.kernel_constraint, k_constraint)
self.assertEqual(conv3d.bias_constraint, b_constraint)
def testConv3DChannelsFirst(self):
# Test case for GitHub issue 15655
with tf.Graph().as_default():
images = tf.compat.v1.placeholder(
dtype=tf.float32, shape=[None, 1, 32, 32, 32]
)
conv_layers.conv3d(images, 32, 9, data_format="channels_first")
class SeparableConv1DTest(tf.test.TestCase):
def testInvalidDataFormat(self):
length = 9
data = tf.random.uniform((5, length, 3), seed=1)
with self.assertRaisesRegex(ValueError, "data_format"):
conv_layers.separable_conv1d(data, 32, 3, data_format="invalid")
def testInvalidStrides(self):
length = 9
data = tf.random.uniform((5, length, 3), seed=1)
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.separable_conv1d(data, 32, 3, strides=(1, 2))
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.separable_conv1d(data, 32, 3, strides=None)
def testInvalidKernelSize(self):
length = 9
data = tf.random.uniform((5, length, 3), seed=1)
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.separable_conv1d(data, 32, (1, 2))
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.separable_conv1d(data, 32, None)
def testCreateSeparableConv1D(self):
length = 9
data = tf.random.uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, activation=tf.nn.relu)
output = layer(data)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "separable_conv1d/Relu")
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1]
)
self.assertEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32]
)
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv1DDepthMultiplier(self):
length = 9
data = tf.random.uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(32, 3, depth_multiplier=2)
output = layer(data)
self.assertEqual(output.get_shape().as_list(), [5, length - 2, 32])
self.assertEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 4, 2]
)
self.assertEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 8, 32]
)
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv1DChannelsFirst(self):
with tf.Graph().as_default():
length = 9
data = tf.random.uniform((5, 4, length))
layer = conv_layers.SeparableConv1D(
32, 3, data_format="channels_first"
)
output = layer(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length - 2])
self.assertEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 4, 1]
)
self.assertEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 4, 32]
)
self.assertEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv1DPaddingSame(self):
length = 9
data = tf.random.uniform((5, length, 32), seed=1)
layer = conv_layers.SeparableConv1D(64, length, padding="same")
output = layer(data)
self.assertEqual(output.get_shape().as_list(), [5, length, 64])
def testCreateSeparableConv1DWithStrides(self):
length = 10
data = tf.random.uniform((5, length, 3), seed=1)
layer = conv_layers.SeparableConv1D(32, 3, strides=2, padding="same")
output = layer(data)
self.assertEqual(output.get_shape().as_list(), [5, length // 2, 32])
def testCreateSeparableConv1DWithStridesChannelsFirst(self):
with tf.Graph().as_default():
data_format = "channels_first"
length = 10
data = tf.random.uniform((5, 3, length), seed=1)
layer = conv_layers.SeparableConv1D(
32, 3, strides=2, padding="same", data_format=data_format
)
output = layer(data)
self.assertEqual(output.get_shape().as_list(), [5, 32, length // 2])
def testFunctionalConv1DReuse(self):
with tf.Graph().as_default():
length = 10
data = tf.random.uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name="sepconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
conv_layers.separable_conv1d(
data, 32, 3, name="sepconv1", reuse=True
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
def testFunctionalConv1DReuseFromScope(self):
with tf.Graph().as_default():
with tf.compat.v1.variable_scope("scope"):
length = 10
data = tf.random.uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3, name="sepconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
with tf.compat.v1.variable_scope("scope", reuse=True):
conv_layers.separable_conv1d(data, 32, 3, name="sepconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
def testFunctionalConv1DNoReuse(self):
with tf.Graph().as_default():
length = 10
data = tf.random.uniform((5, length, 3), seed=1)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
conv_layers.separable_conv1d(data, 32, 3)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 6)
def testSeparableConv1DDepthwiseRegularizer(self):
with tf.Graph().as_default():
length = 9
data = tf.random.uniform((5, length, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.SeparableConv1D(
32, 3, depthwise_regularizer=reg
)
layer(data)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testSeparableConv1DPointwiseRegularizer(self):
with tf.Graph().as_default():
length = 9
data = tf.random.uniform((5, length, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.SeparableConv1D(
32, 3, pointwise_regularizer=reg
)
layer(data)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testSeparableConv1DBiasRegularizer(self):
with tf.Graph().as_default():
length = 9
data = tf.random.uniform((5, length, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.SeparableConv1D(32, 3, bias_regularizer=reg)
layer(data)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testSeparableConv1DNoBias(self):
with tf.Graph().as_default():
length = 9
data = tf.random.uniform((5, length, 4))
layer = conv_layers.SeparableConv1D(
32, 3, activation=tf.nn.relu, use_bias=False
)
output = layer(data)
self.assertEqual(output.op.name, "separable_conv1d/Relu")
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / tf.reduce_sum(x)
p_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
layer = conv_layers.SeparableConv1D(
2,
3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint,
)
inputs = tf.random.uniform((5, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class SeparableConv2DTest(tf.test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "data_format"):
conv_layers.separable_conv2d(images, 32, 3, data_format="invalid")
def testInvalidStrides(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.separable_conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.separable_conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.separable_conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.separable_conv2d(images, 32, None)
def testCreateSeparableConv2D(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], activation=tf.nn.relu)
output = layer(images)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "separable_conv2d/Relu")
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
self.assertListEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]
)
self.assertListEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DDepthMultiplier(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], depth_multiplier=2)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
self.assertListEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 2]
)
self.assertListEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 1, 8, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DIntegerKernelSize(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, 3)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
self.assertListEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]
)
self.assertListEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DChannelsFirst(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, 4, height, width))
layer = conv_layers.SeparableConv2D(
32, [3, 3], data_format="channels_first"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, 32, height - 2, width - 2]
)
self.assertListEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]
)
self.assertListEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv2DPaddingSame(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 32), seed=1)
layer = conv_layers.SeparableConv2D(
64, images.get_shape()[1:3], padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height, width, 64]
)
def testCreateSeparableConvWithStrides(self):
with tf.Graph().as_default():
height, width = 6, 8
# Test strides tuple
images = tf.random.uniform((5, height, width, 3), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height / 2, width / 2, 32]
)
# Test strides integer
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=2, padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height / 2, width / 2, 32]
)
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height / 2, width, 32]
)
def testCreateSeparableConvWithStridesChannelsFirst(self):
with tf.Graph().as_default():
data_format = "channels_first"
height, width = 6, 8
# Test strides tuple
images = tf.random.uniform((5, 3, height, width), seed=1)
layer = conv_layers.SeparableConv2D(
32,
[3, 3],
strides=(2, 2),
padding="same",
data_format=data_format,
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, 32, height / 2, width / 2]
)
# Test strides integer
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=2, padding="same", data_format=data_format
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, 32, height / 2, width / 2]
)
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32,
[3, 3],
strides=(2, 1),
padding="same",
data_format=data_format,
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, 32, height / 2, width]
)
def testFunctionalConv2DReuse(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name="sepconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
conv_layers.separable_conv2d(
images, 32, [3, 3], name="sepconv1", reuse=True
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
def testFunctionalConv2DReuseFromScope(self):
with tf.Graph().as_default():
with tf.compat.v1.variable_scope("scope"):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(
images, 32, [3, 3], name="sepconv1"
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
with tf.compat.v1.variable_scope("scope", reuse=True):
conv_layers.separable_conv2d(
images, 32, [3, 3], name="sepconv1"
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
def testFunctionalConv2DInitializerFromScope(self):
with tf.Graph().as_default(), self.cached_session():
with tf.compat.v1.variable_scope(
"scope", initializer=tf.compat.v1.ones_initializer()
):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(
images, 32, [3, 3], name="sepconv1"
)
weights = tf.compat.v1.trainable_variables()
# Check the names of weights in order.
self.assertTrue("depthwise_kernel" in weights[0].name)
self.assertTrue("pointwise_kernel" in weights[1].name)
self.assertTrue("bias" in weights[2].name)
self.evaluate(tf.compat.v1.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from
# scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 1)))
self.assertAllClose(weights[1], np.ones((1, 1, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[2], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 6)
def testSeparableConv2DDepthwiseRegularizer(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.SeparableConv2D(
32, [3, 3], depthwise_regularizer=reg
)
layer(images)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testSeparableConv2DPointwiseRegularizer(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.SeparableConv2D(
32, [3, 3], pointwise_regularizer=reg
)
layer(images)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testSeparableConv2DBiasRegularizer(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.SeparableConv2D(
32, [3, 3], bias_regularizer=reg
)
layer(images)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testSeparableConv2DNoBias(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(
32, [3, 3], activation=tf.nn.relu, use_bias=False
)
output = layer(images)
self.assertEqual(output.op.name, "separable_conv2d/Relu")
self.assertListEqual(
output.get_shape().as_list(), [5, height - 2, width - 2, 32]
)
self.assertListEqual(
layer.depthwise_kernel.get_shape().as_list(), [3, 3, 4, 1]
)
self.assertListEqual(
layer.pointwise_kernel.get_shape().as_list(), [1, 1, 4, 32]
)
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / tf.reduce_sum(x)
p_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
layer = conv_layers.SeparableConv2D(
2,
3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint,
)
inputs = tf.random.uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv2DTransposeTest(tf.test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "data_format"):
conv_layers.conv2d_transpose(images, 32, 3, data_format="invalid")
def testInvalidStrides(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.conv2d_transpose(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.conv2d_transpose(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.conv2d_transpose(images, 32, (1, 2, 3))
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.conv2d_transpose(images, 32, None)
def testCreateConv2DTranspose(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, [3, 3], activation=tf.nn.relu)
output = layer(images)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "conv2d_transpose/Relu")
self.assertListEqual(
output.get_shape().as_list(), [5, height + 2, width + 2, 32]
)
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposeFloat16(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4), dtype="float16")
output = conv_layers.conv2d_transpose(
images, 32, [3, 3], activation=tf.nn.relu
)
self.assertListEqual(
output.get_shape().as_list(), [5, height + 2, width + 2, 32]
)
def testCreateConv2DTransposeIntegerKernelSize(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, 3)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height + 2, width + 2, 32]
)
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DTransposeChannelsFirst(self):
height, width = 7, 9
images = tf.random.uniform((5, 4, height, width))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], data_format="channels_first"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, 32, height + 2, width + 2]
)
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposePaddingSame(self):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2DTranspose(
64, images.get_shape()[1:3], padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height, width, 64]
)
def testCreateConv2DTransposeWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = tf.random.uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 2), padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height * 2, width * 2, 32]
)
# Test strides integer
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=2, padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height * 2, width * 2, 32]
)
# Test unequal strides
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 1), padding="same"
)
output = layer(images)
self.assertListEqual(
output.get_shape().as_list(), [5, height * 2, width, 32]
)
def testConv2DTransposeKernelRegularizer(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(
32, [3, 3], kernel_regularizer=reg
)
layer(images)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testConv2DTransposeBiasRegularizer(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(
32, [3, 3], bias_regularizer=reg
)
layer(images)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testConv2DTransposeNoBias(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], activation=tf.nn.relu, use_bias=False
)
output = layer(images)
self.assertEqual(output.op.name, "conv2d_transpose/Relu")
self.assertListEqual(
output.get_shape().as_list(), [5, height + 2, width + 2, 32]
)
self.assertListEqual(
layer.kernel.get_shape().as_list(), [3, 3, 32, 4]
)
self.assertEqual(layer.bias, None)
def testFunctionalConv2DTransposeReuse(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name="deconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
conv_layers.conv2d_transpose(
images, 32, [3, 3], name="deconv1", reuse=True
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
def testFunctionalConv2DTransposeReuseFromScope(self):
with tf.Graph().as_default():
with tf.compat.v1.variable_scope("scope"):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name="deconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
with tf.compat.v1.variable_scope("scope", reuse=True):
conv_layers.conv2d_transpose(images, 32, [3, 3], name="deconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
def testFunctionalConv2DTransposeInitializerFromScope(self):
with tf.Graph().as_default(), self.cached_session():
with tf.compat.v1.variable_scope(
"scope", initializer=tf.compat.v1.ones_initializer()
):
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name="deconv1")
weights = tf.compat.v1.trainable_variables()
# Check the names of weights in order.
self.assertTrue("kernel" in weights[0].name)
self.assertTrue("bias" in weights[1].name)
self.evaluate(tf.compat.v1.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from
# scope)
self.assertAllClose(weights[0], np.ones((3, 3, 32, 3)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DTransposeNoReuse(self):
with tf.Graph().as_default():
height, width = 7, 9
images = tf.random.uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
layer = conv_layers.Conv2DTranspose(
2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint
)
inputs = tf.random.uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv3DTransposeTest(tf.test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegex(ValueError, "data_format"):
conv_layers.conv3d_transpose(volumes, 4, 3, data_format="invalid")
def testInvalidStrides(self):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=(1, 2))
with self.assertRaisesRegex(ValueError, "strides"):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=None)
def testInvalidKernelSize(self):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.conv3d_transpose(volumes, 4, (1, 2))
with self.assertRaisesRegex(ValueError, "kernel_size"):
conv_layers.conv3d_transpose(volumes, 4, None)
def testCreateConv3DTranspose(self):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], activation=tf.nn.relu)
output = layer(volumes)
if not tf.executing_eagerly():
self.assertEqual(output.op.name, "conv3d_transpose/Relu")
self.assertListEqual(
output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4],
)
self.assertListEqual(
layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeIntegerKernelSize(self):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, 3)
output = layer(volumes)
self.assertListEqual(
output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4],
)
self.assertListEqual(
layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeChannelsFirst(self):
with tf.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, 32, depth, height, width))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], data_format="channels_first"
)
output = layer(volumes)
self.assertListEqual(
output.get_shape().as_list(),
[5, 4, depth + 2, height + 2, width + 2],
)
self.assertListEqual(
layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]
)
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testConv3DTransposePaddingSame(self):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 64), seed=1)
layer = conv_layers.Conv3DTranspose(
32, volumes.get_shape()[1:4], padding="same"
)
output = layer(volumes)
self.assertListEqual(
output.get_shape().as_list(), [5, depth, height, width, 32]
)
def testCreateConv3DTransposeWithStrides(self):
depth, height, width = 4, 6, 8
# Test strides tuple.
volumes = tf.random.uniform((5, depth, height, width, 32), seed=1)
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 2, 2), padding="same"
)
output = layer(volumes)
self.assertListEqual(
output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4],
)
# Test strides integer.
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=2, padding="same"
)
output = layer(volumes)
self.assertListEqual(
output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4],
)
# Test unequal strides.
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 1, 1), padding="same"
)
output = layer(volumes)
self.assertListEqual(
output.get_shape().as_list(), [5, depth * 2, height, width, 4]
)
def testConv3DTransposeKernelRegularizer(self):
with tf.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], kernel_regularizer=reg
)
layer(volumes)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testConv3DTransposeBiasRegularizer(self):
with tf.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * tf.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], bias_regularizer=reg
)
layer(volumes)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in layer.variables])
self.assertListEqual(
self.evaluate(layer.losses), self.evaluate(loss_keys)
)
def testConv3DTransposeNoBias(self):
with tf.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], activation=tf.nn.relu, use_bias=False
)
output = layer(volumes)
self.assertEqual(output.op.name, "conv3d_transpose/Relu")
self.assertListEqual(
output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4],
)
self.assertListEqual(
layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32]
)
self.assertEqual(layer.bias, None)
def testFunctionalConv3DTransposeReuse(self):
with tf.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name="deconv1")
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name="deconv1", reuse=True
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
def testFunctionalConv3DTransposeReuseFromScope(self):
with tf.Graph().as_default():
with tf.compat.v1.variable_scope("scope"):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform(
(5, depth, height, width, 32), seed=1
)
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name="deconv1"
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
with tf.compat.v1.variable_scope("scope", reuse=True):
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name="deconv1"
)
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
def testFunctionalConv3DTransposeInitializerFromScope(self):
with tf.Graph().as_default(), self.cached_session():
with tf.compat.v1.variable_scope(
"scope", initializer=tf.compat.v1.ones_initializer()
):
depth, height, width = 5, 7, 9
volumes = tf.random.uniform(
(5, depth, height, width, 32), seed=1
)
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name="deconv1"
)
weights = tf.compat.v1.trainable_variables()
# Check the names of weights in order.
self.assertTrue("kernel" in weights[0].name)
self.assertTrue("bias" in weights[1].name)
self.evaluate(tf.compat.v1.global_variables_initializer())
weights = self.evaluate(weights)
# Check that the kernel weights got initialized to ones (from
# scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 4, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((4)))
def testFunctionalConv3DTransposeNoReuse(self):
with tf.Graph().as_default():
depth, height, width = 5, 7, 9
volumes = tf.random.uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 2)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(tf.compat.v1.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
layer = conv_layers.Conv3DTranspose(
2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint
)
inputs = tf.random.uniform((5, 3, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/legacy_tf_layers/convolutional_test.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/convolutional_test.py",
"repo_id": "tf-keras",
"token_count": 29699
} | 245 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for accuracy metrics."""
import tensorflow.compat.v2 as tf
from tf_keras import Model
from tf_keras import layers
from tf_keras import metrics
from tf_keras.testing_infra import test_combinations
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class AccuracyTest(tf.test.TestCase):
def test_accuracy(self):
acc_obj = metrics.Accuracy(name="my_acc")
# check config
self.assertEqual(acc_obj.name, "my_acc")
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state(
[[1], [2], [3], [4]], [[1], [2], [3], [4]]
)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# Check save and restore config
a2 = metrics.Accuracy.from_config(acc_obj.get_config())
self.assertEqual(a2.name, "my_acc")
self.assertTrue(a2.stateful)
self.assertEqual(len(a2.variables), 2)
self.assertEqual(a2.dtype, tf.float32)
# check with sample_weight
result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_accuracy_ragged(self):
acc_obj = metrics.Accuracy(name="my_acc")
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[1], [2], [3], [4]])
rt2 = tf.ragged.constant([[1], [2], [3], [4]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = tf.ragged.constant([[2], [1]])
rt2 = tf.ragged.constant([[2], [0]])
sw_ragged = tf.ragged.constant([[0.5], [0.2]])
result_t = acc_obj(rt1, rt2, sample_weight=sw_ragged)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_binary_accuracy(self):
acc_obj = metrics.BinaryAccuracy(name="my_acc")
# check config
self.assertEqual(acc_obj.name, "my_acc")
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_pred squeeze
update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertAlmostEqual(result, 0.75, 2) # 3/4
# check y_true squeeze
result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4/6
# check with sample_weight
result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
def test_binary_accuracy_ragged(self):
acc_obj = metrics.BinaryAccuracy(name="my_acc")
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[1], [0]])
rt2 = tf.ragged.constant([[1], [0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_true squeeze only supported for dense tensors and is
# not supported by ragged tensor (different ranks). --> error
rt1 = tf.ragged.constant([[[1], [1]]])
rt2 = tf.ragged.constant([[1], [0]])
with self.assertRaises(ValueError):
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
def test_binary_accuracy_threshold(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_binary_accuracy_threshold_ragged(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
rt1 = tf.ragged.constant([[1], [1], [0], [0]])
rt2 = tf.ragged.constant([[0.9], [0.6], [0.4], [0.8]])
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_categorical_accuracy(self):
acc_obj = metrics.CategoricalAccuracy(name="my_acc")
# check config
self.assertEqual(acc_obj.name, "my_acc")
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state(
[[0, 0, 1], [0, 1, 0]], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]]
)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj(
[[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]],
)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_categorical_accuracy_ragged(self):
acc_obj = metrics.CategoricalAccuracy(name="my_acc")
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]])
sample_weight = tf.ragged.constant([[0.5], [0.2]])
with self.assertRaises(tf.errors.InvalidArgumentError):
result_t = acc_obj(rt1, rt2, sample_weight)
result = self.evaluate(result_t)
def test_sparse_categorical_accuracy(self):
acc_obj = metrics.SparseCategoricalAccuracy(name="my_acc")
# check config
self.assertEqual(acc_obj.name, "my_acc")
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state(
[[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]]
)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj(
[[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]]
)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_ragged(self):
acc_obj = metrics.SparseCategoricalAccuracy(name="my_acc")
# verify that correct value is returned
rt1 = tf.ragged.constant([[2], [1]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
with self.assertRaises(tf.errors.InvalidArgumentError):
# sparse_categorical_accuracy is not supported for composite/ragged
# tensors.
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
def test_sparse_categorical_accuracy_mismatched_dims(self):
acc_obj = metrics.SparseCategoricalAccuracy(name="my_acc")
# check config
self.assertEqual(acc_obj.name, "my_acc")
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state(
[2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]]
)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj(
[2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]]
)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: # noqa: E501
acc_obj = metrics.SparseCategoricalAccuracy(name="my_acc")
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
t = tf.compat.v1.placeholder(tf.float32)
p = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
result_t = acc_obj(t, p, w)
result = sess.run(
result_t,
feed_dict=(
{
t: [2, 1],
p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
w: [[0.5], [0.2]],
}
),
)
self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7
def test_get_acc(self):
acc_fn = metrics.get("acc")
self.assertEqual(acc_fn, metrics.accuracy)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class TopKCategoricalAccuracyTest(tf.test.TestCase):
def test_config(self):
a_obj = metrics.TopKCategoricalAccuracy(name="topkca", dtype=tf.int32)
self.assertEqual(a_obj.name, "topkca")
self.assertEqual(a_obj._dtype, tf.int32)
a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())
self.assertEqual(a_obj2.name, "topkca")
self.assertEqual(a_obj2._dtype, tf.int32)
def test_correctness(self):
a_obj = metrics.TopKCategoricalAccuracy()
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([[0, 0, 1], [0, 1, 0]])
y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.TopKCategoricalAccuracy(k=1)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_true = tf.constant([[0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0]])
y_pred = tf.constant(
[[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4], [0.05, 0.95, 0, 0, 0, 0, 0]]
)
a_obj = metrics.TopKCategoricalAccuracy(k=6)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.TopKCategoricalAccuracy(k=2)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = tf.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SparseTopKCategoricalAccuracyTest(tf.test.TestCase):
def test_config(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(
name="stopkca", dtype=tf.int32
)
self.assertEqual(a_obj.name, "stopkca")
self.assertEqual(a_obj._dtype, tf.int32)
a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(
a_obj.get_config()
)
self.assertEqual(a_obj2.name, "stopkca")
self.assertEqual(a_obj2._dtype, tf.int32)
def test_correctness(self):
a_obj = metrics.SparseTopKCategoricalAccuracy()
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([2, 1])
y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_pred = tf.constant(
[[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4], [0.05, 0.95, 0, 0, 0, 0, 0]]
)
a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(k=2)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([1, 0, 2])
y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = tf.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
def test_sparse_top_k_categorical_accuracy_mismatched_dims_dynamic(self):
if not tf.compat.v1.executing_eagerly():
# Test will fail in v1 graph mode since the metric is not a normal
# layer. It will aggregate the output by batch dim, which failed on
# v1 code.
self.skipTest("v2 eager mode only")
class AccLayer(layers.Layer):
def build(self, _):
self.acc = metrics.SparseTopKCategoricalAccuracy(k=1)
def call(self, y_true, y_pred):
return self.acc(y_true, y_pred)
label = layers.Input(shape=[1])
predict = layers.Input(shape=[3])
metric_result = AccLayer()(label, predict)
model = Model([label, predict], metric_result)
result = model.predict(
[
tf.constant([[2], [1]]),
tf.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]]),
],
steps=1,
)
self.assertAllClose(result, 0.5)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/metrics/accuracy_metrics_test.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/accuracy_metrics_test.py",
"repo_id": "tf-keras",
"token_count": 8039
} | 246 |
# Copyright 2023 The TF-Keras Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras PyMetric classes."""
import tensorflow.compat.v2 as tf
from tf_keras import metrics
from tf_keras.testing_infra import test_combinations
class KTrimmedMean(metrics.PyMetric):
"""An example PyMetric which computes the trimmed mean of `y_pred`."""
def __init__(self, k=0.1, name="k_trimmed_mean", **kwargs):
super().__init__(name=name, **kwargs)
self.k = k
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = y_true.numpy()
if sample_weight is not None:
y_true *= sample_weight.numpy()
# Insert y_pred into our values list (keeping the list sorted)
index = 0
for i, element in enumerate(self.values):
if y_true > element:
index = i
break
self.values = self.values[:index] + [y_true] + self.values[index:]
def reset_state(self):
self.values = []
def result(self):
k = int(self.k * len(self.values))
return tf.reduce_mean(self.values[k:-k])
def get_config(self):
config = super().get_config()
config.update({"k": self.k})
return config
class Mean(metrics.PyMetric):
"""An example PyMetric which computes the mean of `y_pred`."""
def __init__(self, name="mean", **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
self.values.append(y_true)
def reset_state(self):
self.values = []
def result(self):
return tf.reduce_mean(tf.concat(self.values, axis=0))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class PyMetricsTest(tf.test.TestCase):
def test_config(self):
ktm_object = KTrimmedMean(name="ktm", k=0.2, dtype=tf.float16)
self.assertEqual(ktm_object.name, "ktm")
self.assertEqual(ktm_object.k, 0.2)
self.assertEqual(ktm_object.dtype, tf.float16)
# Check save and restore config
ktm_object2 = KTrimmedMean.from_config(ktm_object.get_config())
self.assertEqual(ktm_object2.name, "ktm")
self.assertEqual(ktm_object.k, 0.2)
self.assertEqual(ktm_object2.dtype, tf.float16)
def test_unweighted(self):
ktm_object = KTrimmedMean(k=0.2)
for y_true in [-100, -10, 1, 2, 3, 4, 5, 6, 14, 9001]:
self.evaluate(
ktm_object.update_state(
tf.constant(y_true, dtype=tf.float32),
y_pred=tf.constant(0, dtype=tf.float32),
)
)
result = ktm_object.result()
self.assertEqual(3.5, self.evaluate(result))
def test_weighted(self):
ktm_object = KTrimmedMean(k=0.2)
for y_true in [-100, -10, 1, 2, 3, 4, 5, 6, 14, 9001]:
self.evaluate(
ktm_object.update_state(
tf.constant(y_true, dtype=tf.float32),
y_pred=tf.constant(0, dtype=tf.float32),
sample_weight=tf.constant(2, dtype=tf.float32),
)
)
result = ktm_object.result()
self.assertEqual(7, self.evaluate(result))
def test_state_stored_on_cpu_host(self):
with tf.device("/device:GPU:0"):
mean_obj = Mean()
y_true_0 = tf.constant([0, 1, 2], dtype=tf.float32)
y_true_1 = tf.constant([3, 4], dtype=tf.float32)
self.evaluate(
mean_obj.update_state(
y_true=y_true_0, y_pred=tf.constant(0, dtype=tf.float32)
)
)
self.evaluate(
mean_obj.update_state(
y_true=y_true_1, y_pred=tf.constant(0, dtype=tf.float32)
)
)
self.assertEqual(2, self.evaluate(mean_obj.result()))
if not tf.executing_eagerly():
self.assertEndsWith(y_true_0.device, "/device:GPU:0")
self.assertEndsWith(y_true_1.device, "/device:GPU:0")
self.assertEndsWith(mean_obj.values[0].device, "/device:CPU:0")
self.assertEndsWith(mean_obj.values[1].device, "/device:CPU:0")
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/metrics/py_metric_test.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/py_metric_test.py",
"repo_id": "tf-keras",
"token_count": 2295
} | 247 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Policies."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.engine import base_layer_utils
from tf_keras.mixed_precision import device_compatibility_check
from tf_keras.mixed_precision import policy as mp_policy
from tf_keras.optimizers.legacy import gradient_descent
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.platform import tf_logging
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class PolicyTest(tf.test.TestCase, parameterized.TestCase):
"""Tests Policies."""
@test_utils.enable_v2_dtype_behavior
def test_dtype_attributes(self):
for dtype in "int32", "bool", "float16", "float32":
policy = mp_policy.Policy(dtype)
self.assertEqual(policy.name, dtype)
self.assertEqual(policy.compute_dtype, dtype)
self.assertEqual(policy.variable_dtype, dtype)
for dtype in "float16", "bfloat16":
policy = mp_policy.Policy("mixed_" + dtype)
self.assertEqual(policy.name, "mixed_" + dtype)
self.assertEqual(policy.compute_dtype, dtype)
self.assertEqual(policy.variable_dtype, "float32")
policy = mp_policy.Policy("_infer")
self.assertEqual(policy.compute_dtype, None)
self.assertEqual(policy.variable_dtype, None)
@test_utils.enable_v2_dtype_behavior
def test_repr(self):
# Test Policy repr
for policy in (
"float32",
"int8",
"mixed_float16",
"mixed_bfloat16",
"_infer",
):
self.assertEqual(
repr(mp_policy.Policy(policy)), f'<Policy "{policy}">'
)
@test_utils.enable_v2_dtype_behavior
def test_policy_errors(self):
# Test passing invalid strings
with self.assertRaisesRegex(
ValueError, "Cannot convert value abc to a mixed precision Policy."
):
mp_policy.Policy("abc")
# Test passing a DType
with self.assertRaisesRegex(
TypeError, "'name' must be a string, not a DType. "
):
mp_policy.Policy(tf.float16)
# Test passing a non-DType invalid type
with self.assertRaisesRegex(
TypeError, "'name' must be a string, but got: 5"
):
mp_policy.Policy(5)
# Test passing a now-removed policy ending in float32_vars
with self.assertRaisesRegex(
ValueError,
"Policies ending in '_float32_vars' have been removed "
"from TensorFlow. Please use the 'mixed_float16' or "
"'mixed_bfloat16' policy instead. Got policy name: "
"'infer_float32_vars'",
):
mp_policy.Policy("infer_float32_vars")
with self.assertRaisesRegex(
ValueError,
"Policies ending in '_float32_vars' have been removed "
"from TensorFlow. Please use the 'mixed_float16' policy "
"instead. Got policy name: 'float16_with_float32_vars'",
):
mp_policy.Policy("float16_with_float32_vars")
with self.assertRaisesRegex(
ValueError,
"Policies ending in '_float32_vars' have been removed "
"from TensorFlow. Please use the 'mixed_bfloat16' policy "
"instead. Got policy name: 'bfloat16_with_float32_vars'",
):
mp_policy.Policy("bfloat16_with_float32_vars")
with self.assertRaisesRegex(
ValueError,
"Policies ending in '_float32_vars' have been removed "
"from TensorFlow. Got policy name: "
"'int8_with_float32_vars'",
):
mp_policy.Policy("int8_with_float32_vars")
@test_utils.enable_v2_dtype_behavior
def test_global_policy(self):
if base_layer_utils.v2_dtype_behavior_enabled():
default_policy = "float32"
else:
default_policy = "_infer"
self.assertEqual(mp_policy.global_policy().name, default_policy)
try:
mp_policy.set_global_policy("mixed_float16")
self.assertEqual(mp_policy.global_policy().name, "mixed_float16")
# Policies are not associated with a graph
with tf.Graph().as_default():
self.assertEqual(
mp_policy.global_policy().name, "mixed_float16"
)
mp_policy.set_global_policy("_infer")
self.assertEqual(mp_policy.global_policy().name, "_infer")
policy = mp_policy.Policy("mixed_bfloat16")
mp_policy.set_global_policy(policy)
self.assertIs(mp_policy.global_policy(), policy)
finally:
mp_policy.set_global_policy(None)
@test_utils.enable_v2_dtype_behavior
def test_global_policy_dtype_error(self):
with self.assertRaisesRegex(
ValueError,
"set_global_policy can only be used to set the global policy to "
'floating-point policies, such as "float32" and "mixed_float16", '
"but got policy: int32",
):
mp_policy.set_global_policy("int32")
with self.assertRaisesRegex(
ValueError,
"set_global_policy can only be used to set the global policy to "
'floating-point policies, such as "float32" and "mixed_float16", '
"but got policy: complex64",
):
mp_policy.set_global_policy(mp_policy.Policy("complex64"))
@test_utils.enable_v2_dtype_behavior
def test_device_compatibility_warning(self):
if not tf.executing_eagerly():
self.skipTest("Run in eager mode only.")
device_compatibility_check._logged_compatibility_check = False
with tf.compat.v1.test.mock.patch.object(
tf_logging, "warning"
) as mock_warn:
mp_policy.Policy("mixed_float16")
if tf.config.list_physical_devices("GPU"):
mock_warn.assert_not_called()
else:
self.assertRegex(
mock_warn.call_args[0][0],
r"Mixed precision compatibility check \(mixed_float16\): "
r"WARNING.*",
)
if tf.config.list_physical_devices("GPU"):
# Assert message is only logged once
with tf.compat.v1.test.mock.patch.object(
tf_logging, "warning"
) as mock_warn:
mp_policy.Policy("mixed_float16")
mock_warn.assert_not_called()
@test_utils.enable_v2_dtype_behavior
def test_policy_scope(self):
if base_layer_utils.v2_dtype_behavior_enabled():
default_policy = "float32"
else:
default_policy = "_infer"
with mp_policy.policy_scope("mixed_float16"):
self.assertEqual(mp_policy.global_policy().name, "mixed_float16")
with mp_policy.policy_scope("_infer"):
self.assertEqual(mp_policy.global_policy().name, "_infer")
self.assertEqual(mp_policy.global_policy().name, "mixed_float16")
self.assertEqual(mp_policy.global_policy().name, default_policy)
@test_utils.enable_v2_dtype_behavior
def test_config(self):
for policy in (
mp_policy.Policy("float16"),
mp_policy.Policy("float32"),
mp_policy.Policy("int16"),
mp_policy.Policy("mixed_float16"),
mp_policy.Policy("mixed_bfloat16"),
mp_policy.Policy("_infer"),
):
config = policy.get_config()
new_policy = mp_policy.Policy.from_config(config)
# Comparing strings is the easiest way to ensure the policies are
# the same, as policy does not override the == operator.
self.assertEqual(str(policy), str(new_policy))
@test_utils.enable_v2_dtype_behavior
def test_serialization(self):
# Test policies that are equivalent to a single dtype
for policy_name in "float16", "float32", "int8", "string", "bool":
policy = mp_policy.Policy(policy_name)
config = mp_policy.serialize(policy)
self.assertEqual(config, policy_name)
new_policy = mp_policy.deserialize(config)
self.assertEqual(str(policy), str(new_policy))
# Test "_infer" policy
policy = mp_policy.Policy("_infer")
config = mp_policy.serialize(policy)
self.assertIsNone(config)
new_policy = mp_policy.deserialize(config)
self.assertEqual(str(policy), str(new_policy))
class MyPolicy(mp_policy.Policy):
pass
# Test policies that are not equivalent to a single dtype
for policy in (
mp_policy.Policy("mixed_float16"),
mp_policy.Policy("mixed_bfloat16"),
MyPolicy("float32"),
):
config = mp_policy.serialize(policy)
if tf.__internal__.tf2.enabled():
if policy.name == "float32":
self.assertEqual(
config,
{
"module": None,
"class_name": policy.__class__.__name__,
"config": {"name": policy.name},
"registered_name": "MyPolicy",
},
)
else:
self.assertEqual(
config,
{
"module": "keras.mixed_precision",
"class_name": policy.__class__.__name__,
"config": {"name": policy.name},
"registered_name": None,
},
)
else:
self.assertEqual(
config,
{
"class_name": policy.__class__.__name__,
"config": {"name": policy.name},
},
)
new_policy = mp_policy.deserialize(
config, custom_objects={"MyPolicy": MyPolicy}
)
self.assertEqual(str(policy), str(new_policy))
@test_utils.enable_v2_dtype_behavior
def test_error_if_graph_rewrite_enabled(self):
try:
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent.SGD(1.0)
)
with self.assertRaisesRegex(
ValueError,
'cannot be set to "mixed_float16", .* the mixed '
"precision graph rewrite has already been enabled",
):
mp_policy.set_global_policy("mixed_float16")
with mp_policy.policy_scope("float64"):
pass # Non-mixed policies are allowed
finally:
tf.compat.v1.mixed_precision.disable_mixed_precision_graph_rewrite()
@test_utils.disable_v2_dtype_behavior
def test_v1_dtype_behavior(self):
# Setting global policies are not allowed with V1 dtype behavior
with self.assertRaisesRegex(
ValueError, "global policy can only be set in TensorFlow 2"
):
with mp_policy.policy_scope(mp_policy.Policy("_infer")):
pass
with self.assertRaisesRegex(
ValueError, "global policy can only be set in TensorFlow 2"
):
with mp_policy.policy_scope(mp_policy.Policy("float32")):
pass
with self.assertRaisesRegex(
ValueError, "global policy can only be set in TensorFlow 2"
):
with mp_policy.policy_scope(mp_policy.Policy("mixed_float16")):
pass
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/mixed_precision/policy_test.py/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/policy_test.py",
"repo_id": "tf-keras",
"token_count": 6032
} | 248 |
"""Tests for sharpness_aware_minimization."""
import os
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.models import sharpness_aware_minimization
from tf_keras.optimizers import adam
from tf_keras.testing_infra import test_utils
ds_combinations = tf.__internal__.distribute.combinations
STRATEGIES = [
ds_combinations.one_device_strategy,
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.parameter_server_strategy_3worker_2ps_1gpu,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
@test_utils.run_v2_only
class SharpnessAwareMinimizationTest(tf.test.TestCase, parameterized.TestCase):
def test_sam_model_call(self):
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([2, 2])
self.assertAllClose(model(data), sam_model(data))
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(strategy=STRATEGIES)
)
def test_sam_model_fit(self, strategy):
with strategy.scope():
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([2, 2])
label = data[:, 0] > 0.5
sam_model.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model.fit(data, label, steps_per_epoch=1)
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(strategy=STRATEGIES)
)
def test_sam_model_fit_with_sub_batch(self, strategy):
with strategy.scope():
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model, num_batch_splits=4
)
data = tf.random.uniform([48, 2])
label = data[:, 0] > 0.5
sam_model.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model.fit(data, label, steps_per_epoch=1)
def test_save_sam(self):
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([1, 2, 2])
label = data[:, 0] > 0.5
sam_model.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model.fit(data, label)
with self.subTest("savedmodel"):
path = os.path.join(self.get_temp_dir(), "model")
sam_model.save(path)
loaded_sam_model = keras.models.load_model(path)
loaded_sam_model.load_weights(path)
self.assertAllClose(sam_model(data), loaded_sam_model(data))
with self.subTest("keras_v3"):
path = os.path.join(self.get_temp_dir(), "model.keras")
sam_model.save(path)
loaded_sam_model = keras.models.load_model(path)
loaded_sam_model.load_weights(path)
self.assertAllClose(sam_model(data), loaded_sam_model(data))
def test_checkpoint_sam(self):
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model_1 = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
sam_model_2 = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([1, 2, 2])
label = data[:, 0] > 0.5
sam_model_1.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model_1.fit(data, label)
checkpoint = tf.train.Checkpoint(sam_model_1)
checkpoint2 = tf.train.Checkpoint(sam_model_2)
temp_dir = self.get_temp_dir()
save_path = checkpoint.save(temp_dir)
checkpoint2.restore(save_path)
self.assertAllClose(sam_model_1(data), sam_model_2(data))
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/models/sharpness_aware_minimization_test.py/0 | {
"file_path": "tf-keras/tf_keras/models/sharpness_aware_minimization_test.py",
"repo_id": "tf-keras",
"token_count": 2691
} | 249 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
import copy
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.optimizers.legacy import adagrad
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.testing_infra import test_combinations
_DATA_TYPES = [tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128]
def adagrad_update_numpy(param, accum, g_t, lr=0.001, epsilon=1e-7):
accum_t = accum + g_t * g_t
param_t = param - lr * g_t / (np.sqrt(accum_t) + epsilon)
return param_t, accum_t
def sparse_adagrad_update_numpy(
param, accum, gindexs, gvalues, lr=0.001, epsilon=1e-7
):
accum_t = copy.deepcopy(accum)
param_t = copy.deepcopy(param)
# first loop accumulates repeated indices if necessary.
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
accum_t[gindex] = accum_t[gindex] + gvalue * gvalue
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
param_t[gindex] = param_t[gindex] - lr * gvalue / (
np.sqrt(accum_t[gindex]) + epsilon
)
return param_t, accum_t
class AdagradOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def doTestBasic(self, use_callable_params=False):
for dtype in _DATA_TYPES:
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.Adagrad(learning_rate)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not tf.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not tf.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, accum0_np = adagrad_update_numpy(
var0_np, accum0_np, grads0_np, 3.0
)
var1_np, accum1_np = adagrad_update_numpy(
var1_np, accum1_np, grads1_np, 3.0
)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testBasic(self):
self.doTestBasic()
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_callable_params=True)
def testBasicWithLearningRateDecay(self):
for dtype in _DATA_TYPES:
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
ada_opt = adagrad.Adagrad(learning_rate, decay=decay)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not tf.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not tf.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(
var0_np, accum0_np, grads0_np, lr_np
)
var1_np, accum1_np = adagrad_update_numpy(
var1_np, accum1_np, grads1_np, lr_np
)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLargeEpsilon(self):
var0_np = np.array([1.0, 2.0])
var1_np = np.array([3.0, 4.0])
grads0_np = np.array([0.1, 0.1])
grads1_np = np.array([0.01, 0.01])
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.0)
accum0_np = np.array([0.1, 0.1])
accum1_np = np.array([0.1, 0.1])
if not tf.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not tf.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, accum0_np = adagrad_update_numpy(
var0_np, accum0_np, grads0_np, 3.0, 1.0
)
var1_np, accum1_np = adagrad_update_numpy(
var1_np, accum1_np, grads1_np, 3.0, 1.0
)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
for dtype in _DATA_TYPES:
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay
)
ada_opt = adagrad.Adagrad(lr_schedule)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not tf.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not tf.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(
var0_np, accum0_np, grads0_np, lr_np
)
var1_np, accum1_np = adagrad_update_numpy(
var1_np, accum1_np, grads1_np, lr_np
)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = tf.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = tf.matmul(
tf.compat.v1.nn.embedding_lookup([var0], [0]), x
)
return pred * pred
sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0])
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0], [3.0, 4.0]], self.evaluate(var0)
)
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], self.evaluate(var0), atol=0.01
)
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = tf.constant(3.0)
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
# Run 3 steps of adagrad
for _ in range(3):
self.evaluate(ada_update)
var0_np, accum0_np = adagrad_update_numpy(
var0_np, accum0_np, grads0_np, learning_rate
)
var1_np, accum1_np = adagrad_update_numpy(
var1_np, accum1_np, grads1_np, learning_rate
)
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testSparseBasic(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array(
[0.01, 0, 0.01], dtype=dtype.as_numpy_dtype
)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np[grads0_np_indices]),
tf.constant(grads0_np_indices),
tf.constant([3]),
)
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np[grads1_np_indices]),
tf.constant(grads1_np_indices),
tf.constant([3]),
)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
accum0_np = np.array(
[0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype
)
accum1_np = np.array(
[0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype
)
# Run 3 step of sgd
for _ in range(3):
self.evaluate(ada_update)
var0_np, accum0_np = sparse_adagrad_update_numpy(
var0_np,
accum0_np,
grads0_np_indices,
grads0_np[grads0_np_indices],
learning_rate,
)
var1_np, accum1_np = sparse_adagrad_update_numpy(
var1_np,
accum1_np,
grads1_np_indices,
grads1_np[grads1_np_indices],
learning_rate,
)
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testSparseSingleVarDim(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0_np = np.array([1.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np[grads0_np_indices]),
tf.constant(grads0_np_indices),
tf.constant([3]),
)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.0)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0], self.evaluate(var0))
accum0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
# Run 3 step of sgd
for _ in range(3):
self.evaluate(ada_update)
var0_np, accum0_np = sparse_adagrad_update_numpy(
var0_np,
accum0_np,
grads0_np_indices,
grads0_np[grads0_np_indices],
learning_rate,
epsilon=1.0,
)
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = tf.Variable(var_np, dtype=dtype)
aggregated_update_var = tf.Variable(var_np, dtype=dtype)
grad_repeated_index = tf.IndexedSlices(
tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
tf.constant([1, 1]),
tf.constant([2, 1]),
)
grad_aggregated = tf.IndexedSlices(
tf.constant([0.2], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]),
)
repeated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
aggregated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var),
)
for _ in range(3):
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var),
)
def testSparseRepeatedIndicesByEmbeddingLookUp(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var_repeated = tf.Variable([1.0, 2.0], dtype=dtype)
loss_repeated = lambda: tf.reduce_sum(
tf.compat.v1.nn.embedding_lookup(var_repeated, [0, 0])
)
var_aggregated = tf.Variable([1.0, 2.0], dtype=dtype)
loss_aggregated = lambda: 2 * tf.reduce_sum(
tf.compat.v1.nn.embedding_lookup(var_aggregated, [0])
)
update_op_repeated = adagrad.Adagrad(2.0).minimize(
loss_repeated, var_list=[var_repeated]
)
update_op_aggregated = adagrad.Adagrad(2.0).minimize(
loss_aggregated, var_list=[var_aggregated]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated), self.evaluate(var_aggregated)
)
for _ in range(3):
self.evaluate(update_op_repeated)
self.evaluate(update_op_aggregated)
self.assertAllCloseAccordingToType(
self.evaluate(var_repeated),
self.evaluate(var_aggregated),
)
def testSparseStability(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in [tf.half]:
shape = [1, 6]
var0_np = np.array(
[
[
0.00872496,
-0.106952,
0.110467,
0.226505,
-0.0147257,
-0.0105945,
]
],
dtype=dtype.as_numpy_dtype,
)
var0 = tf.Variable(var0_np)
grads0_np = np.array(
[
[
-5.91278e-05,
5.31673e-05,
-2.5779e-06,
4.29153e-05,
-8.4877e-05,
-9.48906e-05,
]
],
dtype=dtype.as_numpy_dtype,
)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np), tf.constant([0]), tf.constant(shape)
)
ada_opt = adagrad.Adagrad(1.0)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
slot0 = ada_opt.get_slot(var0, "accumulator")
init = tf.compat.v1.global_variables_initializer()
for _ in range(100):
self.evaluate(init)
self.evaluate(ada_update)
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]),
self.evaluate(slot0),
)
self.assertAllCloseAccordingToType(
np.array(
[
[
0.00891194,
-0.10712013,
0.11047515,
0.22636929,
-0.0144573,
-0.01029443,
]
]
),
self.evaluate(var0),
)
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEqual(slot0.shape, var0.shape)
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEqual(slot1.shape, var1.shape)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
self.evaluate(ada_update1)
self.evaluate(ada_update2)
self.evaluate(ada_update1)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
for _ in range(3):
var0_np, accum0_np = adagrad_update_numpy(
var0_np, accum0_np, grads0_np, learning_rate
)
var1_np, accum1_np = adagrad_update_numpy(
var1_np, accum1_np, grads1_np, learning_rate
)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testConstructAdagradWithLR(self):
opt = adagrad.Adagrad(lr=1.0)
opt_2 = adagrad.Adagrad(learning_rate=0.1, lr=1.0)
opt_3 = adagrad.Adagrad(learning_rate=0.1)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt_2.lr, tf.Variable)
self.assertIsInstance(opt_3.lr, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/legacy/adagrad_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/adagrad_test.py",
"repo_id": "tf-keras",
"token_count": 15609
} | 250 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for learning rate decay."""
import math
import tensorflow.compat.v2 as tf
from tf_keras.testing_infra import test_combinations
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class LRDecayTest(test_combinations.TestCase):
def testContinuous(self):
self.evaluate(tf.compat.v1.global_variables_initializer())
step = 5
decayed_lr = tf.compat.v1.train.exponential_decay(0.05, step, 10, 0.96)
expected = 0.05 * 0.96 ** (5.0 / 10.0)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testStaircase(self):
if tf.executing_eagerly():
step = tf.Variable(0)
self.evaluate(tf.compat.v1.global_variables_initializer())
decayed_lr = tf.compat.v1.train.exponential_decay(
0.1, step, 3, 0.96, staircase=True
)
# No change to learning rate due to staircase
expected = 0.1
self.evaluate(step.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
expected = 0.1
self.evaluate(step.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
# Decayed learning rate
expected = 0.1 * 0.96 ** (100 // 3)
self.evaluate(step.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testVariables(self):
step = tf.Variable(1)
decayed_lr = tf.compat.v1.train.exponential_decay(
0.1, step, 3, 0.96, staircase=True
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# No change to learning rate
assign_1 = step.assign(1)
if not tf.executing_eagerly():
self.evaluate(assign_1.op)
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
assign_2 = step.assign(2)
if not tf.executing_eagerly():
self.evaluate(assign_2.op)
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
# Decayed learning rate
assign_100 = step.assign(100)
if not tf.executing_eagerly():
self.evaluate(assign_100.op)
expected = 0.1 * 0.96 ** (100 // 3)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testPiecewiseConstant(self):
x = tf.Variable(-999)
decayed_lr = tf.compat.v1.train.piecewise_constant(
x, [100, 110, 120], [1.0, 0.1, 0.01, 0.001]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(105))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(110))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(120))
self.assertAllClose(self.evaluate(decayed_lr), 0.01, 1e-6)
self.evaluate(x.assign(999))
self.assertAllClose(self.evaluate(decayed_lr), 0.001, 1e-6)
def testPiecewiseConstantEdgeCases(self):
x_int = tf.Variable(0, dtype=tf.int32)
boundaries, values = [-1.0, 1.0], [1, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = tf.compat.v1.train.piecewise_constant(
x_int, boundaries, values
)
if tf.executing_eagerly():
decayed_lr()
x = tf.Variable(0.0)
boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = tf.compat.v1.train.piecewise_constant(
x, boundaries, values
)
if tf.executing_eagerly():
decayed_lr()
# Test that ref types are valid.
if not tf.executing_eagerly():
x = tf.compat.v1.Variable(0.0, use_resource=False)
x_ref = x.op.outputs[0] # float32_ref tensor should be accepted
boundaries, values = [1.0, 2.0], [1, 2, 3]
tf.compat.v1.train.piecewise_constant(x_ref, boundaries, values)
# Test casting boundaries from int32 to int64.
x_int64 = tf.Variable(0, dtype=tf.int64)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
decayed_lr = tf.compat.v1.train.piecewise_constant(
x_int64, boundaries, values
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), 0.5, 1e-6)
self.evaluate(x_int64.assign(3))
self.assertAllClose(self.evaluate(decayed_lr), 0.6, 1e-6)
self.evaluate(x_int64.assign(4))
self.assertAllClose(self.evaluate(decayed_lr), 0.7, 1e-6)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class LinearDecayTest(test_combinations.TestCase):
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = tf.compat.v1.train.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = tf.compat.v1.train.polynomial_decay(
lr, step, 10, end_lr, cycle=True
)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SqrtDecayTest(test_combinations.TestCase):
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = tf.compat.v1.train.polynomial_decay(
lr, step, 10, end_lr, power=power
)
expected = lr * 0.5**power
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = tf.compat.v1.train.polynomial_decay(
lr, step, 10, end_lr, power=power
)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = tf.compat.v1.train.polynomial_decay(
lr, step, 10, end_lr, power=power
)
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = tf.compat.v1.train.polynomial_decay(
lr, step, 10, end_lr, power=power
)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = tf.compat.v1.train.polynomial_decay(
lr, step, 10, end_lr, power=power, cycle=True
)
expected = (lr - end_lr) * 0.25**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class PolynomialDecayTest(test_combinations.TestCase):
def testBeginWithCycle(self):
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = tf.compat.v1.train.polynomial_decay(
lr, step, decay_steps, cycle=True
)
expected = lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class ExponentialDecayTest(test_combinations.TestCase):
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = tf.Variable(0)
decayed_lr = tf.compat.v1.train.natural_exp_decay(
initial_lr, step, k, decay_rate
)
self.evaluate(tf.compat.v1.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = tf.Variable(0)
decayed_lr = tf.compat.v1.train.natural_exp_decay(
initial_lr, step, k, decay_rate, staircase=True
)
self.evaluate(tf.compat.v1.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class InverseDecayTest(test_combinations.TestCase):
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = tf.Variable(0)
decayed_lr = tf.compat.v1.train.inverse_time_decay(
initial_lr, step, k, decay_rate
)
self.evaluate(tf.compat.v1.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = tf.Variable(0)
decayed_lr = tf.compat.v1.train.inverse_time_decay(
initial_lr, step, k, decay_rate, staircase=True
)
self.evaluate(tf.compat.v1.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CosineDecayTest(test_combinations.TestCase):
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.cosine_decay(
initial_lr, step, num_training_steps
)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.cosine_decay(
initial_lr, step, num_training_steps, alpha
)
expected = self.np_cosine_decay(step, num_training_steps, alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CosineDecayRestartsTest(test_combinations.TestCase):
def np_cosine_decay_restarts(
self, step, decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0
):
fac = 1.0
while step >= decay_steps:
step -= decay_steps
decay_steps *= t_mul
fac *= m_mul
completed_fraction = step / decay_steps
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.cosine_decay_restarts(
initial_lr, step, num_training_steps
)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.cosine_decay_restarts(
initial_lr, step, num_training_steps, alpha=alpha
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, alpha=alpha
)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testMMul(self):
num_training_steps = 1000
initial_lr = 1.0
m_mul = 0.9
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.cosine_decay_restarts(
initial_lr, step, num_training_steps, m_mul=m_mul
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, m_mul=m_mul
)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testTMul(self):
num_training_steps = 1000
initial_lr = 1.0
t_mul = 1.0
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.cosine_decay_restarts(
initial_lr, step, num_training_steps, t_mul=t_mul
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, t_mul=t_mul
)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class LinearCosineDecayTest(test_combinations.TestCase):
def np_linear_cosine_decay(
self, step, decay_steps, alpha=0.0, beta=0.001, num_periods=0.5
):
step = min(step, decay_steps)
linear_decayed = float(decay_steps - step) / decay_steps
fraction = 2.0 * num_periods * step / float(decay_steps)
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
return (alpha + linear_decayed) * cosine_decayed + beta
def testDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.linear_cosine_decay(
initial_lr, step, num_training_steps
)
expected = self.np_linear_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testNonDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = tf.compat.v1.train.linear_cosine_decay(
initial_lr,
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5,
)
expected = self.np_linear_cosine_decay(
step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5
)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class NoisyLinearCosineDecayTest(test_combinations.TestCase):
def testDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = tf.compat.v1.train.noisy_linear_cosine_decay(
initial_lr, step, num_training_steps
)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
def testNonDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = tf.compat.v1.train.noisy_linear_cosine_decay(
initial_lr,
step,
num_training_steps,
initial_variance=0.5,
variance_decay=0.1,
alpha=0.1,
beta=1e-4,
num_periods=5,
)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/legacy_learning_rate_decay_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy_learning_rate_decay_test.py",
"repo_id": "tf-keras",
"token_count": 8989
} | 251 |
# Placeholder: load unaliased py_library
# Description:
# Contains the TF-Keras Premade Models (internal TensorFlow version).
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//tf_keras:friends",
],
licenses = ["notice"],
)
py_library(
name = "premade_models",
srcs = [
"__init__.py",
"linear.py",
"wide_deep.py",
],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras:backend_config",
"//tf_keras:regularizers",
],
)
tf_py_test(
name = "linear_test",
size = "medium",
srcs = ["linear_test.py"],
python_version = "PY3",
shard_count = 2,
deps = [
":premade_models",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "wide_deep_test",
size = "medium",
srcs = ["wide_deep_test.py"],
python_version = "PY3",
shard_count = 2,
srcs_version = "PY3",
deps = [
":premade_models",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
| tf-keras/tf_keras/premade_models/BUILD/0 | {
"file_path": "tf-keras/tf_keras/premade_models/BUILD",
"repo_id": "tf-keras",
"token_count": 671
} | 252 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Protobuf containing the metadata for each TF-Keras object saved in a SavedModel.
syntax = "proto3";
package third_party.py.tf_keras.protobuf;
import "tf_keras/protobuf/versions.proto";
message SavedMetadata {
// Nodes represent trackable objects in the SavedModel. The data for every
// TF-Keras object is stored.
repeated SavedObject nodes = 1;
}
// Metadata of an individual TF-Keras object.
message SavedObject {
reserved 1; // For previous VersionDef info.
// Index of the node in the SavedModel SavedObjectGraph.
int32 node_id = 2;
// String path from root (e.g. "root.child_layer")
string node_path = 3;
// Identifier to determine loading function.
// Currently supported identifiers:
// _tf_keras_layer, _tf_keras_input_layer, _tf_keras_rnn_layer,
// _tf_keras_metric, _tf_keras_network, _tf_keras_model,
// _tf_keras_sequential
string identifier = 4;
// Metadata containing a JSON-serialized object with the non-TensorFlow
// attributes for this TF-Keras object.
string metadata = 5;
// Version defined by the code serializing this TF-Keras object.
third_party.py.tf_keras.protobuf.VersionDef version = 6;
}
| tf-keras/tf_keras/protobuf/saved_metadata.proto/0 | {
"file_path": "tf-keras/tf_keras/protobuf/saved_metadata.proto",
"repo_id": "tf-keras",
"token_count": 544
} | 253 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving and loading TF-Keras models and layers from SavedModel.
These should ensure that all layer properties are correctly assigned after
loading from the SavedModel.
Tests that focus on the model structure should go in revive_test.py
"""
import os
import shutil
import sys
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
import tf_keras as keras
from tf_keras import regularizers
from tf_keras.feature_column.dense_features import DenseFeatures
from tf_keras.protobuf import saved_metadata_pb2
from tf_keras.protobuf import versions_pb2
from tf_keras.saving import object_registration
from tf_keras.saving.legacy.saved_model import json_utils
from tf_keras.saving.legacy.saved_model import load as keras_load
from tf_keras.saving.legacy.saved_model import save_impl as keras_save
from tf_keras.saving.legacy.saved_model import utils as saved_model_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import control_flow_util
from tf_keras.utils import tf_contextlib
from tf_keras.utils import tf_inspect
class LayerWithLearningPhase(keras.engine.base_layer.Layer):
def build(self, input_shape):
self.input_spec = keras.layers.InputSpec(
shape=[None] * len(input_shape)
)
self.built = True
def call(self, x, training=None):
if training is None:
training = keras.backend.learning_phase()
output = control_flow_util.smart_cond(
training, lambda: x * 0, lambda: tf.identity(x)
)
if not tf.executing_eagerly():
output._uses_learning_phase = True
return output
def compute_output_shape(self, input_shape):
return input_shape
@property
def _use_input_spec_as_call_signature(self):
return True
class LayerWithLoss(keras.layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs))
return inputs * 2
class LayerWithUpdate(keras.layers.Layer):
def build(self, _):
self.v = self.add_weight(
"v",
shape=[],
initializer=keras.initializers.zeros,
trainable=False,
dtype=tf.float32,
)
def call(self, inputs, training=True):
if training:
self.add_update(self.v.assign_add(1.0))
return inputs * 2.0
@object_registration.register_keras_serializable("Testing")
class GlobalLayerThatShouldFailIfNotAdded(keras.layers.Layer):
_must_restore_from_config = True
@test_combinations.run_all_keras_modes
class TestSavedModelFormatAllModes(test_combinations.TestCase):
def _save_model_dir(self, dirname="saved_model"):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def _get_model(self):
model = test_utils.get_small_mlp(1, 4, input_dim=3)
model.layers[-1].activity_regularizer = regularizers.get("l2")
model.activity_regularizer = regularizers.get("l2")
model.compile(loss="mse", optimizer="rmsprop")
def callable_loss():
return tf.reduce_sum(model.weights[0])
model.add_loss(callable_loss)
return model
def _train_model(self, model, use_dataset=False):
x = np.random.random((1, 3))
y = np.random.random((1, 4))
if not tf.__internal__.tf2.enabled():
# The layer autocast behavior only runs when autocast is enabled, so
# in V1, the numpy inputs still need to be cast to float32.
x = x.astype(np.float32)
y = y.astype(np.float32)
if use_dataset:
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
model.fit(dataset)
else:
model.train_on_batch(x, y)
def _save_and_load(self, model):
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
return loaded
def _test_evaluation(self, model, loaded):
# Assert that original and loaded models have the same results when
# called.
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
self.assertAllClose(
self.evaluate(model.weights), self.evaluate(loaded.weights)
)
input_arr = tf.constant(np.random.random((1, 3)).astype(np.float32))
self.assertAllClose(
self.evaluate(model(input_arr)), self.evaluate(loaded(input_arr))
)
# Validate losses. The order of conditional losses may change between
# the model and loaded model, so sort the losses first.
if tf.executing_eagerly():
self.assertAllClose(
sorted(self.evaluate(model.losses)),
sorted(self.evaluate(loaded.losses)),
)
@test_combinations.run_with_all_model_types
def test_model_save_and_load(self):
model = self._get_model()
self._train_model(model, use_dataset=False)
loaded = self._save_and_load(model)
self._test_evaluation(model, loaded)
@test_combinations.run_with_all_model_types
def test_model_save_and_load_dataset(self):
model = self._get_model()
self._train_model(model, use_dataset=True)
loaded = self._save_and_load(model)
self._test_evaluation(model, loaded)
def test_trainable_weights(self):
"""Tests that trainable status of individual weights is preserved."""
layer = keras.layers.Dense(4, name="custom_layer")
layer.build([None, 3])
layer.add_weight(
"extra_weight",
shape=[],
initializer=tf.compat.v1.constant_initializer(11),
trainable=True,
)
layer.add_weight(
"extra_weight_2",
shape=[],
initializer=tf.compat.v1.constant_initializer(12),
trainable=False,
)
model = keras.Sequential(
[
keras.Input(
[
3,
]
),
layer,
]
)
saved_model_dir = self._save_model_dir()
self.evaluate(tf.compat.v1.variables_initializer(layer.variables))
model.save(saved_model_dir, save_format="tf")
loaded_model = keras_load.load(saved_model_dir)
self.evaluate(
tf.compat.v1.variables_initializer(loaded_model.variables)
)
loaded = loaded_model.layers[-1]
equal_attrs = ["name", "_expects_training_arg", "trainable"]
for attr in equal_attrs:
self.assertEqual(getattr(layer, attr), getattr(loaded, attr))
all_close = ["weights", "trainable_weights", "non_trainable_weights"]
for attr in all_close:
self.assertAllClose(
self.evaluate(getattr(layer, attr)),
self.evaluate(getattr(loaded, attr)),
)
@test_combinations.run_with_all_model_types
def test_trainable_layers(self):
"""Tests that trainable status of individual layers is preserved."""
model = model = self._get_model()
# Set the last layer to *not* be trainable.
model.layers[-1].trainable = False
self._train_model(model, use_dataset=True)
loaded = self._save_and_load(model)
self._test_evaluation(model, loaded)
self.assertFalse(model.layers[-1].trainable)
self.assertFalse(loaded.layers[-1].trainable)
def test_trainable_custom_model_false(self):
"""Tests that overall False trainable status of Model is preserved."""
# Set all layers to *not* be trainable.
model = test_utils.SmallSubclassMLP(1, 4, trainable=False)
model.compile(loss="mse", optimizer="rmsprop")
self._train_model(model, use_dataset=False)
loaded = self._save_and_load(model)
self._test_evaluation(model, loaded)
self.assertEmpty(model.trainable_variables)
self.assertEmpty(loaded.trainable_variables)
def test_maintains_losses(self):
"""Tests that the layer losses do not change before and after export."""
model = keras.models.Sequential([LayerWithLoss()])
model.compile(loss="mse", optimizer="rmsprop")
input_arr = np.random.random((1, 3))
target_arr = np.random.random((1, 3))
# Test that symbolic losses are maintained (train_on_batch saves
# symbolic losses.)
model.train_on_batch(input_arr, target_arr)
previous_losses = model.losses[:]
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
with previous_losses[0].graph.as_default():
# If we try to compare symbolic Tensors in eager mode assertAllEqual
# will return False even if they are the same Tensor.
self.assertEqual(previous_losses, model.losses)
if tf.executing_eagerly():
# Test that eager losses are maintained.
model(input_arr) # Calls model eagerly, creating eager losses.
previous_losses = model.losses[:]
model.save(saved_model_dir, save_format="tf")
self.assertAllEqual(previous_losses, model.losses)
def test_layer_with_learning_phase(self):
layer = LayerWithLearningPhase()
layer.build([None, None])
saved_model_dir = self._save_model_dir()
model = test_utils.get_model_from_layers(
[layer], input_shape=[None], model_type="functional"
)
model.save(saved_model_dir, save_format="tf")
loaded_model = keras_load.load(saved_model_dir)
loaded = loaded_model.layers[-1]
input_arr = tf.ones((4, 3))
# Run the layer, and use the keras backend learning phase
keras.backend.set_learning_phase(0)
self.assertAllEqual(input_arr, loaded(input_arr))
keras.backend.set_learning_phase(1)
self.assertAllEqual(tf.zeros((4, 3)), loaded(input_arr))
# Run the layer while explicitly setting the training argument
self.assertAllEqual(
input_arr, loaded(input_arr, training=tf.constant(False))
)
self.assertAllEqual(
tf.zeros((4, 3)), loaded(input_arr, training=tf.constant(True))
)
@test_combinations.run_with_all_model_types
def test_standard_loader(self):
model = test_utils.get_small_mlp(1, 4, input_dim=3)
model.activity_regularizer = regularizers.get("l2")
def eager_loss():
return tf.reduce_sum(model.weights[0])
model.add_loss(eager_loss)
# Call predict to ensure that all layers are built and inputs are set.
model.predict(np.random.random((1, 3)).astype(np.float32))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = tf.saved_model.load(saved_model_dir)
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
all_close = [
"variables",
"trainable_variables",
"non_trainable_variables",
]
for attr in all_close:
self.assertAllClose(
self.evaluate(getattr(model, attr)),
self.evaluate(getattr(loaded.keras_api, attr)),
)
self.assertLen(loaded.regularization_losses, 1)
expected_layers = len(model.layers)
self.assertEqual(expected_layers, len(loaded.keras_api.layers))
input_arr = tf.ones((4, 3))
self.assertAllClose(
self.evaluate(model(input_arr)),
self.evaluate(loaded(input_arr, training=False)),
)
@test_combinations.run_with_all_model_types
def test_compiled_model(self):
# TODO(b/134519980): Issue with model.fit if the model call function
# uses a tf.function (Graph mode only).
if not tf.executing_eagerly():
return
input_arr = np.random.random((1, 3))
target_arr = np.random.random((1, 4))
model = test_utils.get_small_mlp(1, 4, input_dim=3)
expected_predict = model.predict(input_arr)
# Compile and save model.
model.compile("rmsprop", "mse")
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
actual_predict = loaded.predict(input_arr)
self.assertAllClose(expected_predict, actual_predict)
loss_before = loaded.evaluate(input_arr, target_arr)
loaded.fit(input_arr, target_arr)
loss_after = loaded.evaluate(input_arr, target_arr)
self.assertLess(loss_after, loss_before)
predict = loaded.predict(input_arr)
ckpt_path = os.path.join(self.get_temp_dir(), "weights")
loaded.save_weights(ckpt_path)
# Ensure that the checkpoint is compatible with the original model.
model.load_weights(ckpt_path)
self.assertAllClose(predict, model.predict(input_arr))
def test_metadata_input_spec(self):
class LayerWithNestedSpec(keras.layers.Layer):
def __init__(self):
super().__init__()
self.input_spec = {
"a": keras.layers.InputSpec(max_ndim=3, axes={-1: 2}),
"b": keras.layers.InputSpec(
shape=(None, 2, 3), dtype="int32"
),
}
@property
def _use_input_spec_as_call_signature(self):
return True
layer = LayerWithNestedSpec()
saved_model_dir = self._save_model_dir()
model = test_utils.get_model_from_layers([layer], model_type="subclass")
model(
{
"a": tf.constant([[2, 4]]),
"b": tf.ones([1, 2, 3], dtype=tf.int32),
}
)
model.save(saved_model_dir, save_format="tf")
loaded_model = keras_load.load(saved_model_dir)
loaded = loaded_model.layers[-1]
self.assertEqual(3, loaded.input_spec["a"].max_ndim)
self.assertEqual({-1: 2}, loaded.input_spec["a"].axes)
self.assertAllEqual([None, 2, 3], loaded.input_spec["b"].shape)
self.assertEqual("int32", loaded.input_spec["b"].dtype)
def test_must_restore_from_config_fails_if_layer_is_not_in_scope(self):
class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
_must_restore_from_config = True
layer = LayerThatShouldFailIfNotAdded()
saved_model_dir = self._save_model_dir()
model = test_utils.get_model_from_layers(
[layer], input_shape=[3], model_type="functional"
)
model.save(saved_model_dir, save_format="tf")
with self.assertRaisesRegex(
ValueError, "Unknown layer: 'LayerThatShouldFailIfNotAdded'"
):
_ = keras_load.load(saved_model_dir)
def test_must_restore_from_config_custom_object_scope(self):
class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
_must_restore_from_config = True
layer = LayerThatShouldFailIfNotAdded()
model = test_utils.get_model_from_layers(
[layer], input_shape=[3], model_type="functional"
)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
with object_registration.CustomObjectScope(
{"LayerThatShouldFailIfNotAdded": LayerThatShouldFailIfNotAdded}
):
_ = keras_load.load(saved_model_dir)
def test_must_restore_from_config_registration(self):
layer = GlobalLayerThatShouldFailIfNotAdded()
saved_model_dir = self._save_model_dir()
model = test_utils.get_model_from_layers(
[layer], input_shape=[3], model_type="functional"
)
model.save(saved_model_dir, save_format="tf")
_ = keras_load.load(saved_model_dir)
def test_multi_input_model(self):
input_1 = keras.layers.Input(shape=(3,))
input_2 = keras.layers.Input(shape=(5,))
model = keras.Model([input_1, input_2], [input_1, input_2])
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
input_arr_1 = np.random.random((1, 3)).astype("float32")
input_arr_2 = np.random.random((1, 5)).astype("float32")
outputs = loaded([input_arr_1, input_arr_2])
self.assertAllEqual(input_arr_1, outputs[0])
self.assertAllEqual(input_arr_2, outputs[1])
def test_revived_sequential(self):
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
5, input_shape=(3,), kernel_regularizer=regularizers.get("l2")
)
)
model.add(
keras.layers.Dense(2, kernel_regularizer=regularizers.get("l2"))
)
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.assertLen(loaded.layers, 2)
self.assertLen(loaded.losses, 2)
loaded.pop()
self.assertLen(loaded.layers, 1)
self.assertLen(loaded.losses, 1)
loaded.add(
keras.layers.Dense(2, kernel_regularizer=regularizers.get("l2"))
)
self.assertLen(loaded.layers, 2)
self.assertLen(loaded.losses, 2)
def testBatchNormUpdates(self):
model = keras.models.Sequential(
keras.layers.BatchNormalization(input_shape=(1,))
)
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()
with self.captureWritesToStream(sys.stderr) as captured_logs:
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
# Assert that saving does not log deprecation warnings
# (even if it needs to set learning phase for compat reasons)
if tf.executing_eagerly():
self.assertNotIn("deprecated", captured_logs.contents())
input_arr = tf.constant([[11], [12], [13]], dtype=tf.float32)
input_arr2 = tf.constant([[14], [15], [16]], dtype=tf.float32)
self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0])
self.evaluate(loaded(input_arr, training=True))
if not tf.executing_eagerly():
self.evaluate(loaded.get_updates_for(input_arr))
self.assertAllClose(
self.evaluate(loaded.layers[-1].moving_mean), [0.12]
)
self.evaluate(loaded(input_arr2, training=False))
if not tf.executing_eagerly():
self.evaluate(loaded.get_updates_for(input_arr2))
self.assertAllClose(
self.evaluate(loaded.layers[-1].moving_mean), [0.12]
)
def testDisablingBatchNormTrainableBeforeSaving(self):
# We disable trainable on the batchnorm layers before saving
model = keras.models.Sequential(
keras.layers.BatchNormalization(input_shape=(1,))
)
model.trainable = False
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
input_arr = tf.constant([[11], [12], [13]], dtype=tf.float32)
input_arr2 = tf.constant([[14], [15], [16]], dtype=tf.float32)
self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0])
# Trainable should still be disabled after loading
self.evaluate(loaded(input_arr, training=True))
if not tf.executing_eagerly():
self.evaluate(loaded.get_updates_for(input_arr))
self.assertAllClose(self.evaluate(loaded.layers[-1].moving_mean), [0.0])
# Re-enabling trainable on the loaded model should cause the batchnorm
# layer to start training again.
# Note: this only works in v2.
if tf.executing_eagerly():
loaded.trainable = True
self.evaluate(loaded(input_arr, training=True))
self.assertAllClose(
self.evaluate(loaded.layers[-1].moving_mean), [0.12]
)
self.evaluate(loaded(input_arr2, training=False))
self.assertAllClose(
self.evaluate(loaded.layers[-1].moving_mean), [0.12]
)
def testSaveWithSignatures(self):
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
5, input_shape=(3,), kernel_regularizer=regularizers.get("l2")
)
)
model.add(keras.layers.Dropout(0.5))
model.add(
keras.layers.Dense(4, kernel_regularizer=regularizers.get("l2"))
)
input_arr = np.random.random((2, 3))
target_arr = np.random.random((2, 4))
model.compile(loss="mse", optimizer="rmsprop")
model.train_on_batch(input_arr, target_arr)
@tf.function(input_signature=[tf.TensorSpec((None, 3))])
def predict(inputs):
return {"predictions": model(inputs)}
feature_configs = {
"inputs": tf.io.FixedLenFeature(shape=[2, 3], dtype=tf.float32)
}
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def parse_and_predict(examples):
features = tf.compat.v1.parse_single_example(
examples[0], feature_configs
)
return {
"predictions": model(features["inputs"]),
"layer_1_outputs": model.layers[0](features["inputs"]),
}
saved_model_dir = self._save_model_dir()
model.save(
saved_model_dir,
save_format="tf",
signatures={
"predict": predict,
"parse_and_predict": parse_and_predict,
},
)
model.save(
"/tmp/saved",
save_format="tf",
signatures={
"predict": predict,
"parse_and_predict": parse_and_predict,
},
)
loaded = keras_load.load(saved_model_dir)
self.assertAllClose(
model.predict(input_arr),
loaded.signatures["predict"](
tf.convert_to_tensor(input_arr.astype("float32"))
)["predictions"],
)
feature = {
"inputs": feature_pb2.Feature(
float_list=feature_pb2.FloatList(
value=input_arr.astype("float32").flatten()
)
)
}
example = example_pb2.Example(
features=feature_pb2.Features(feature=feature)
)
outputs = loaded.signatures["parse_and_predict"](
tf.convert_to_tensor([example.SerializeToString()])
)
self.assertAllClose(model.predict(input_arr), outputs["predictions"])
self.assertAllClose(
model.layers[0](input_arr), outputs["layer_1_outputs"]
)
def testTrainingDefaults(self):
def assert_training_default(fn, default_value):
arg_spec = tf_inspect.getfullargspec(fn)
fn_defaults = arg_spec.defaults or []
defaults = dict()
# The call arg defaults are an n-tuple of the last n elements of the
# args list. (n = # of elements that have a default argument)
for i in range(-1 * len(fn_defaults), 0):
defaults[arg_spec.args[i]] = fn_defaults[i]
# The default training arg will be any (non-None) default specified
# in the method signature, or None if no value is specified.
defaults.update(arg_spec.kwonlydefaults or {})
self.assertEqual(defaults["training"], default_value)
class LayerWithTrainingRequiredArg(keras.engine.base_layer.Layer):
def call(self, inputs, training):
return control_flow_util.smart_cond(
training, lambda: inputs * 0, lambda: tf.identity(inputs)
)
class LayerWithTrainingDefaultTrue(keras.engine.base_layer.Layer):
def call(self, inputs, training=True):
return control_flow_util.smart_cond(
training, lambda: inputs * 0, lambda: tf.identity(inputs)
)
class Model(keras.models.Model):
def __init__(self):
super().__init__()
self.layer_with_training_default_none = LayerWithLearningPhase()
self.layer_with_training_default_true = (
LayerWithTrainingDefaultTrue()
)
self.layer_with_required_training_arg = (
LayerWithTrainingRequiredArg()
)
def call(self, inputs):
x = self.layer_with_training_default_none(inputs)
x += self.layer_with_training_default_true(inputs)
x += self.layer_with_required_training_arg(inputs, False)
return x
model = Model()
# Build and set model inputs
model.predict(np.ones([1, 3]).astype("float32"))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
load = tf.saved_model.load(saved_model_dir)
# Ensure that the TF-Keras loader is able to load and build the model.
_ = keras_load.load(saved_model_dir)
assert_training_default(load.__call__, False)
assert_training_default(
load.layer_with_training_default_none.__call__, False
)
assert_training_default(
load.layer_with_training_default_true.__call__, True
)
# Assert that there are no defaults for layer with required training arg
arg_spec = tf_inspect.getfullargspec(
load.layer_with_required_training_arg.__call__
)
self.assertFalse(arg_spec.defaults) # defaults is None or empty
def testTraceModelWithKwarg(self):
class Model(keras.models.Model):
def call(self, inputs, keyword=None):
return tf.identity(inputs)
model = Model()
prediction = model.predict(np.ones([1, 3]).astype("float32"))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
with object_registration.custom_object_scope({"Model": Model}):
loaded = keras_load.load(saved_model_dir)
self.assertAllClose(
prediction, loaded.predict(np.ones([1, 3]).astype("float32"))
)
loaded_without_scope = keras_load.load(saved_model_dir)
if tf.__internal__.tf2.enabled():
with self.assertRaises(NotImplementedError):
loaded_without_scope.predict(np.ones([1, 3]).astype("float32"))
def testFeatureColumns(self):
# TODO(b/120099662): Error with table initialization with TF-Keras
# models in graph mode.
if tf.executing_eagerly():
numeric = tf.feature_column.numeric_column("a")
bucketized = tf.feature_column.bucketized_column(
numeric, boundaries=[5, 10, 15]
)
cat_vocab = (
tf.feature_column.categorical_column_with_vocabulary_list(
"b", ["1", "2", "3"]
)
)
one_hot = tf.feature_column.indicator_column(cat_vocab)
embedding = tf.feature_column.embedding_column(
cat_vocab, dimension=8
)
feature_layer = DenseFeatures([bucketized, one_hot, embedding])
model = keras.models.Sequential(feature_layer)
features = {"a": np.array([13, 15]), "b": np.array(["1", "2"])}
predictions = model.predict(features)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
loaded_predictions = loaded.predict(features)
self.assertAllClose(predictions, loaded_predictions)
def testSaveTensorKwarg(self):
class LayerWithTensorKwarg(keras.layers.Layer):
def call(self, inputs, tensor=None):
if tensor is not None:
return inputs * tf.cast(tensor, tf.float32)
else:
return inputs
t = self.evaluate(tf.sequence_mask(1))
inputs = keras.layers.Input(shape=(3))
model = keras.models.Model(inputs, LayerWithTensorKwarg()(inputs, t))
input_arr = np.random.random((1, 3))
predictions = model.predict(input_arr)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
loaded_predictions = loaded.predict(input_arr)
self.assertAllClose(predictions, loaded_predictions)
def testModelWithTfFunctionCall(self):
class Subclass(keras.models.Model):
@tf.function
def call(self, inputs, training=False):
return inputs * tf.cast(training, tf.float32)
model = Subclass()
model.predict(tf.ones((1, 2)), steps=1)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.assertAllEqual(
[[1, 5]],
self.evaluate(loaded(tf.constant([[1, 5.0]]), training=True)),
)
self.assertAllEqual(
[[0, 0]],
self.evaluate(loaded(tf.constant([[1, 5.0]]), training=False)),
)
def testReviveFunctionalModel(self):
class CustomAdd(keras.layers.Add):
def build(self, input_shape):
self.w = self.add_weight("w", shape=[])
super().build(input_shape)
def call(self, inputs):
outputs = super().call(inputs)
return outputs * self.w
input1 = keras.layers.Input(shape=(None, 3), name="input_1")
input2 = keras.layers.Input(shape=(None, 3), name="input_2")
d = keras.layers.Dense(4, name="dense_with_two_inbound_nodes")
output1 = d(input1)
output2 = d(input2)
# Use a custom layer in this model to ensure that layers aren't being
# recreated directly from the config.
outputs = CustomAdd(name="custom")([output1, output2])
model = keras.models.Model([input1, input2], outputs, name="save_model")
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.assertEqual("save_model", loaded.name)
self.assertLen(
loaded.get_layer("dense_with_two_inbound_nodes")._inbound_nodes, 2
)
self.assertEqual("CustomAdd", type(loaded.get_layer("custom")).__name__)
self.assertLen(loaded.get_layer("custom").weights, 1)
def _testAddUpdate(self, scope):
with scope:
layer_with_update = LayerWithUpdate()
model = test_utils.get_model_from_layers(
[layer_with_update], input_shape=(3,)
)
x = np.ones((10, 3))
if test_utils.get_model_type() == "subclass":
model.predict(x, batch_size=10)
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
loaded_layer = loaded.layers[-1]
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
self.assertEqual(self.evaluate(loaded_layer.v), 0.0)
loaded.compile("sgd", "mse")
loaded.fit(x, x, batch_size=10)
self.assertEqual(self.evaluate(loaded_layer.v), 1.0)
@test_combinations.run_with_all_model_types
def testSaveLayerWithUpdates(self):
@tf_contextlib.contextmanager
def nullcontextmanager():
yield
self._testAddUpdate(nullcontextmanager())
@test_combinations.run_with_all_model_types
def testSaveInStrategyScope(self):
self._testAddUpdate(tf.distribute.MirroredStrategy().scope())
def testSaveTimeDistributedLayer(self):
model = keras.Sequential(
[
keras.layers.TimeDistributed(
keras.layers.Dense(
1, kernel_regularizer=regularizers.get("l2")
),
input_shape=(None, 1),
)
]
)
predictions = model.predict_on_batch(tf.ones((3, 2, 1)))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.assertAllClose(
loaded.predict_on_batch(tf.ones((3, 2, 1))), predictions
)
@parameterized.named_parameters(
[("with_unrolling", True), ("no_unrolling", False)]
)
def testSaveStatefulRNN(self, unroll):
batch = 12
timesteps = 10
input_dim = 8
input_arr = np.ones((batch, timesteps, input_dim)).astype("float32")
cells = [keras.layers.LSTMCell(32), keras.layers.LSTMCell(64)]
if unroll:
x = keras.Input(batch_shape=(batch, timesteps, input_dim))
else:
x = keras.Input(batch_shape=(batch, None, input_dim))
layer = keras.layers.RNN(cells, stateful=True, unroll=unroll)
y = layer(x)
model = keras.Model(x, y)
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)).astype("float32"),
np.zeros((batch, 64)).astype("float32"),
)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
loaded_layer = loaded.layers[1]
if not tf.executing_eagerly():
keras.backend.get_session() # force variable initialization
self.assertAllClose(layer.states, loaded_layer.states)
self.assertAllClose(model(input_arr), loaded(input_arr))
def testSaveBidirectionalLSTM(self):
# Make sure that the input spec of an unrolled RNN is not used when
# wrapped in a Bidirectional layer.
# https://github.com/keras-team/tf-keras/issues/15454
input_layer = keras.Input(
batch_input_shape=(1, 15, 128), name="input", dtype=tf.float32
)
lstm_layer = keras.layers.Bidirectional(
keras.layers.LSTM(
units=64,
name="lstm",
dropout=0.2,
trainable=False,
unroll=True,
)
)
output_layer = lstm_layer(input_layer)
model = keras.Model(input_layer, output_layer)
saved_model_dir = self._save_model_dir()
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
input_arr = np.random.random((1, 15, 128)).astype("float32")
self.assertAllClose(model(input_arr), loaded(input_arr))
@parameterized.named_parameters([("stateful", True), ("stateless", False)])
def testSaveConvLSTM2D(self, stateful):
data_format = "channels_first"
batch, timesteps, channels, rows, cols = 12, 10, 8, 4, 4
input_arr = np.ones((batch, timesteps, channels, rows, cols)).astype(
"float32"
)
layer = keras.layers.ConvLSTM2D(
filters=16,
kernel_size=(1, 1),
data_format=data_format,
stateful=stateful,
)
x = keras.Input(batch_shape=(batch, timesteps, channels, rows, cols))
y = layer(x)
model = keras.Model(x, y)
predict_1 = model(input_arr)
self.evaluate([v.initializer for v in model.variables])
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
del model
loaded = keras_load.load(saved_model_dir)
self.evaluate([v.initializer for v in loaded.variables])
if stateful:
loaded.reset_states()
predict_2 = loaded(input_arr)
self.assertAllClose(predict_1, predict_2)
def testSaveWithRaggedInputs(self):
class EmbeddingMerger(keras.layers.Layer):
def __init__(self, list_features, **kwargs):
super().__init__(**kwargs)
self._supports_ragged_inputs = True
self.embeddings = {
feature: keras.layers.Embedding(10, 3)
for feature in list_features
}
self.mean = keras.layers.Lambda(
tf.reduce_mean, arguments=dict(axis=1)
)
def call(self, inputs):
tensors = [self.embeddings[col](inputs[col]) for col in inputs]
tensors = [self.mean(inp) for inp in tensors]
return keras.layers.Add()(tensors)
list_features = ["feature_1", "feature_2"]
feature_1 = tf.ragged.constant([[0.0], [1, 3]])
feature_2 = tf.ragged.constant([[1.0, 2], [4]])
f = {"feature_1": feature_1, "feature_2": feature_2}
f_inputs = {
"feature_1": keras.Input(
shape=(None,), name="feature_1", ragged=True
),
"feature_2": keras.Input(
shape=(None,), name="feature_2", ragged=True
),
}
out = EmbeddingMerger(list_features)(f_inputs)
model = keras.Model(f_inputs, out)
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.evaluate(tf.compat.v1.variables_initializer(loaded.variables))
self.assertAllClose(model.predict(f), loaded.predict(f))
def testSaveMultipleInputs(self):
class CustomLayer(keras.layers.Layer):
def call(self, *input_list):
self.add_loss(input_list[-2] * 2)
return sum(
input_list[:-1]
) # The test's last input is a non-tensor arg
class CustomModel(keras.Model):
def build(self, _):
self.layer = CustomLayer()
def call(self, *inputs):
inputs = list(inputs)
inputs.append(
object()
) # Test that the layer handles non-tensor inputs
return self.layer(*inputs)
model = CustomModel()
inp = [
tf.constant(i, shape=[1, 1], dtype=tf.float32) for i in range(1, 5)
]
expected = model(*inp)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
actual = loaded(*inp)
self.assertAllEqual(self.evaluate(expected), self.evaluate(actual))
def testSaveMultipleInputsWithTraining(self):
class CustomModel(keras.Model):
def call(self, input_1, training, input_2):
if training:
return input_1
else:
return input_2
inp1 = tf.constant(1.0, shape=[1])
inp2 = tf.constant(2.0, shape=[1])
model = CustomModel()
self.assertEqual(self.evaluate(model(inp1, True, inp2)), 1.0)
self.assertEqual(self.evaluate(model(inp1, False, inp2)), 2.0)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.assertEqual(self.evaluate(loaded(inp1, True, inp2)), 1.0)
self.assertEqual(self.evaluate(loaded(inp1, False, inp2)), 2.0)
def test_wrapped_layer_training(self):
class Custom(keras.models.Model):
def __init__(self):
super().__init__()
self.layer = LayerWithLearningPhase()
def call(self, inputs):
return self.layer(inputs)
model = Custom()
x = tf.constant(1.0, shape=[1, 1])
expected_default = model(x)
expected_training_true = model(x, training=True)
expected_training_false = model(x, training=False)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
def assert_loaded_model(loaded):
actual_default = loaded(x)
actual_training_true = loaded(x, training=True)
actual_training_false = loaded(x, training=False)
self.assertAllClose(
[
expected_default,
expected_training_true,
expected_training_false,
],
[actual_default, actual_training_true, actual_training_false],
)
assert_loaded_model(keras_load.load(saved_model_dir))
assert_loaded_model(tf.saved_model.load(saved_model_dir))
@parameterized.named_parameters([("true", True), ("false", False)])
def test_save_layer_autocast(self, autocast):
class CustomLayer(keras.layers.Layer):
def __init__(self):
super().__init__(autocast=autocast)
class CustomModel(keras.Model):
def __init__(self):
super().__init__(autocast=autocast)
def call(self, inputs):
return inputs
x = tf.constant([3], dtype=tf.float64)
x_in = keras.Input((1,))
output = CustomLayer()(x_in)
output = CustomModel()(output)
model = keras.Model(inputs=x_in, outputs=output)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
self.assertEqual(autocast, loaded.layers[-1]._autocast)
self.assertEqual(autocast, loaded.layers[-2]._autocast)
self.assertEqual(self.evaluate(model(x)), self.evaluate(loaded(x)))
class TestSavedModelFormat(tf.test.TestCase):
def _save_model_dir(self, dirname="saved_model"):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def test_load_with_custom_model_and_layer(self):
class CustomLayer(keras.layers.Layer):
def __call__(self, inputs):
return inputs
class Model(keras.models.Model):
def __init__(self):
super().__init__()
self.layer = CustomLayer() # noqa: F821
@tf.function(input_signature=[tf.TensorSpec([None, 1])])
def call(self, inputs):
return self.layer(inputs)
model = Model()
inp = tf.constant([[1.0]])
model(inp)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
# Even if the `CustomLayer` is not provided in `custom_object_scope`,
# `Model` still has that reference.
with object_registration.custom_object_scope({"Model": Model}):
loaded = keras_load.load(saved_model_dir)
self.assertAllEqual([[1.0]], self.evaluate(loaded(inp)))
self.assertAllEqual([[1.0]], self.evaluate(loaded.layer(inp)))
self.assertIsInstance(loaded.layer, CustomLayer)
# If `CustomLayer` is provided in `custom_object_scope`, it should of
# course use that custom class.
with object_registration.custom_object_scope(
{"Model": Model, "CustomLayer": CustomLayer}
):
loaded = keras_load.load(saved_model_dir)
self.assertAllEqual([[1.0]], self.evaluate(loaded(inp)))
self.assertAllEqual([[1.0]], self.evaluate(loaded.layer(inp)))
self.assertIsInstance(loaded.layer, CustomLayer)
def test_save_without_tracing(self):
class DoNotTrace(keras.layers.Layer):
def __init__(self):
super().__init__()
self.input_spec = keras.layers.InputSpec(shape=[None])
self.built = True
def call(self, inputs):
raise ValueError("I said do not trace")
def get_config(self):
return {}
@property
def _use_input_spec_as_call_signature(self):
return True
root = keras.models.Sequential()
root.add(keras.layers.Input(shape=(3,)))
root.attached_layer = DoNotTrace()
saved_model_dir = self._save_model_dir()
# With the default settings, the call function is traced.
with self.assertRaisesRegex(ValueError, "do not trace"):
root.save(saved_model_dir, save_format="tf")
# When saving the config only, the layer call function should not be not
# traced.
root.save(saved_model_dir, save_format="tf", save_traces=False)
loaded = tf.saved_model.load(saved_model_dir)
self.assertTrue(hasattr(loaded, "attached_layer"))
# This should raise an error when loaded without the custom object
loaded = keras_load.load(saved_model_dir)
with self.assertRaisesRegex(ValueError, "Cannot call custom layer"):
loaded.attached_layer(tf.constant([1.0]))
# Try loading with the custom objects
with object_registration.CustomObjectScope({"DoNotTrace": DoNotTrace}):
loaded = keras_load.load(saved_model_dir)
with self.assertRaisesRegex(ValueError, "I said do not trace"):
loaded.attached_layer(tf.constant([1.0]))
def test_load_non_keras_saved_model(self):
model = test_utils.get_small_functional_mlp(1, 4, input_dim=3)
saved_model_dir = self._save_model_dir()
tf.saved_model.save(model, saved_model_dir)
with self.assertRaisesRegex(
ValueError, "Unable to create a TF-Keras model"
):
keras_load.load(saved_model_dir)
def test_random_generator_custom_layer(self):
class CustomDropout(keras.layers.Layer):
def __init__(self, dropout_rate=0.1, **kwargs):
super().__init__(**kwargs)
self.dropout_rate = dropout_rate
self.dropout = keras.layers.Dropout(
dropout_rate, rng_type="stateful"
)
def call(self, inputs, training=False):
return self.dropout(inputs, training=training)
root = keras.models.Sequential(
[keras.layers.Input(shape=(3,)), CustomDropout()]
)
saved_model_dir = self._save_model_dir()
root.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
output = loaded(tf.random.uniform([1, 3]), training=True)
self.assertAllEqual([1, 3], output.shape)
def test_random_generator_with_tracing(self):
# This test is to ensure we trace the training = True function first,
# otherwise tf.function will raise error about creating variables in the
# non-first call.
class LayerWithDropout(keras.layers.Layer):
def __init__(self, dropout_rate):
super().__init__()
self.dropout_rate = dropout_rate
self.dropout_layer = keras.layers.Dropout(self.dropout_rate)
def call(self, inputs, training=None):
if not training:
return inputs
else:
return self.dropout_layer(inputs, training=training)
root = keras.models.Sequential(
[keras.layers.Input(shape=(3,)), LayerWithDropout(0.1)]
)
saved_model_dir = self._save_model_dir()
root.save(saved_model_dir, save_format="tf")
loaded = keras_load.load(saved_model_dir)
output = loaded(tf.random.uniform([1, 3]), training=True)
self.assertAllEqual([1, 3], output.shape)
class TestLayerCallTracing(tf.test.TestCase, parameterized.TestCase):
def test_functions_have_same_trace(self):
class Layer(keras.engine.base_layer.Layer):
def call(self, inputs):
return inputs
def call2(self, inputs):
return inputs * 2
layer = Layer()
call_collection = keras_save.LayerCallCollection(layer)
fn = call_collection.add_function(layer.call, "call", True)
fn2 = call_collection.add_function(layer.call2, "call2", True)
with keras_save.tracing_scope():
fn(np.ones((2, 3)))
fn(np.ones((4, 5)))
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(), 2
)
self.assertLen(
fn2.wrapped_call._list_all_concrete_functions_for_serialization(), 2
)
# Check that the shapes are correct
self.assertEqual(
{(2, 3), (4, 5)},
set(
tuple(c.structured_input_signature[0][0].shape.as_list())
for c in fn2.wrapped_call._list_all_concrete_functions_for_serialization() # noqa: E501
),
)
def test_training_arg_replacement(self):
def assert_num_traces(layer_cls, training_keyword):
layer = layer_cls()
call_collection = keras_save.LayerCallCollection(layer)
fn = call_collection.add_function(layer.call, "call", True)
with keras_save.tracing_scope():
fn(np.ones((2, 3)), training=True)
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
2,
)
with keras_save.tracing_scope():
fn(np.ones((2, 4)), training=False)
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
4,
)
if training_keyword:
with keras_save.tracing_scope():
fn(np.ones((2, 5)), True)
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
6,
)
with keras_save.tracing_scope():
fn(np.ones((2, 6)))
self.assertLen(
fn.wrapped_call._list_all_concrete_functions_for_serialization(), # noqa: E501
8,
)
class LayerWithTrainingKeyword(keras.engine.base_layer.Layer):
def call(self, inputs, training=False):
return inputs * training
assert_num_traces(LayerWithTrainingKeyword, training_keyword=True)
class LayerWithKwargs(keras.engine.base_layer.Layer):
def call(self, inputs, **kwargs):
return inputs * kwargs["training"]
assert_num_traces(LayerWithKwargs, training_keyword=False)
class LayerWithChildLayer(keras.engine.base_layer.Layer):
def __init__(self):
self.child = LayerWithKwargs()
super().__init__()
def call(self, inputs):
return self.child(inputs)
assert_num_traces(LayerWithChildLayer, training_keyword=False)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_maintains_losses(self):
layer = LayerWithLoss()
layer(np.ones((2, 3)))
previous_losses = layer.losses[:]
call_collection = keras_save.LayerCallCollection(layer)
fn = call_collection.add_function(layer.call, "call", True)
fn(np.ones((2, 3)))
self.assertAllEqual(
self.evaluate(previous_losses), self.evaluate(layer.losses)
)
@object_registration.register_keras_serializable("Testing")
class CustomMeanMetric(keras.metrics.Mean):
def update_state(self, *args):
# Sometimes built-in metrics return an op in update_state. Custom
# metrics don't support returning ops, so wrap the update_state method
# while returning nothing.
super().update_state(*args)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MetricTest(tf.test.TestCase, parameterized.TestCase):
def _save_model_dir(self, dirname="saved_model"):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def generate_inputs(self, num_tensor_args, shape=(1, 5)):
return [
np.random.uniform(0, 1, shape).astype("float32")
for _ in range(num_tensor_args)
]
def _test_metric_save_and_load(
self,
metric,
save_dir,
num_tensor_args,
shape=(1, 5),
test_sample_weight=True,
):
with self.cached_session():
model = test_utils.get_model_from_layers(
[keras.layers.Layer()], input_shape=[3], model_type="functional"
)
model.saved_metric = metric
model.save(save_dir, save_format="tf")
loaded_model = keras_load.load(save_dir)
loaded = loaded_model.saved_metric
self.evaluate([v.initializer for v in loaded.variables])
self.assertEqual(metric.name, loaded.name)
self.assertEqual(metric.dtype, loaded.dtype)
inputs = self.generate_inputs(num_tensor_args, shape)
actual = self.evaluate(metric(*inputs))
self.assertAllClose(actual, loaded(*inputs))
self.assertAllClose(metric.variables, loaded.variables)
# Test with separate calls to update state and result.
inputs = self.generate_inputs(num_tensor_args, shape)
self.evaluate(metric.update_state(*inputs))
self.evaluate(loaded.update_state(*inputs))
actual = self.evaluate(metric.result())
self.assertAllClose(actual, loaded.result())
if test_sample_weight:
# Test with sample weights input.
inputs = self.generate_inputs(num_tensor_args, shape)
sample_weight = self.generate_inputs(1, [])[0]
inputs.append(sample_weight)
actual = self.evaluate(metric(*inputs))
self.assertAllClose(actual, loaded(*inputs))
return loaded
@parameterized.named_parameters(
[
("mean", keras.metrics.Mean, 1, (1, 5)),
("false_positives", keras.metrics.FalsePositives, 2, (1, 5)),
(
"precision_at_top_k",
keras.metrics.Precision,
2,
(2, 3, 4),
{"top_k": 2, "class_id": 1},
),
(
"precision_at_recall",
keras.metrics.PrecisionAtRecall,
2,
(1, 5),
{"recall": 0.8},
),
("auc", keras.metrics.AUC, 2, (1, 5), {"multi_label": True}),
("cosine_similarity", keras.metrics.CosineSimilarity, 2, (2, 3, 1)),
]
)
def test_metric(self, metric_cls, num_tensor_args, shape, init_kwargs=None):
init_kwargs = init_kwargs or {}
metric = metric_cls(**init_kwargs)
metric(*self.generate_inputs(num_tensor_args, shape))
self.evaluate([v.initializer for v in metric.variables])
loaded = self._test_metric_save_and_load(
metric, self._save_model_dir(), num_tensor_args, shape
)
self.assertEqual(type(loaded), type(metric))
@parameterized.named_parameters(
[
("mean", keras.metrics.Mean, 1, False),
("auc", keras.metrics.AUC, 2, False),
("mean_tensor", keras.metrics.MeanTensor, 1, True),
]
)
def test_custom_metric(self, base_cls, num_tensor_args, requires_build):
class CustomMetric(base_cls):
def update_state(self, *args):
# Sometimes built-in metrics return an op in update_state.
# Custom metrics don't support returning ops, so wrap the
# update_state method while returning nothing.
super().update_state(*args)
with self.cached_session():
metric = CustomMetric()
save_dir = self._save_model_dir("first_save")
if requires_build:
metric(*self.generate_inputs(num_tensor_args))
self.evaluate([v.initializer for v in metric.variables])
with self.assertRaisesRegex(
ValueError, "Unable to restore custom object"
):
self._test_metric_save_and_load(
metric, save_dir, num_tensor_args
)
with object_registration.CustomObjectScope(
{"CustomMetric": CustomMetric}
):
loaded = self._test_metric_save_and_load(
metric, save_dir, num_tensor_args, test_sample_weight=False
)
self._test_metric_save_and_load(
loaded,
self._save_model_dir("second_save"),
num_tensor_args,
test_sample_weight=False,
)
def test_registered_custom_metric(self):
with self.cached_session():
metric = CustomMeanMetric()
save_dir = self._save_model_dir("first_save")
self.evaluate([v.initializer for v in metric.variables])
loaded = self._test_metric_save_and_load(
metric, save_dir, num_tensor_args=1, test_sample_weight=False
)
self._test_metric_save_and_load(
loaded,
self._save_model_dir("second_save"),
num_tensor_args=1,
test_sample_weight=False,
)
def test_custom_metric_wrapped_call(self):
class NegativeMean(keras.metrics.Mean):
@tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
def update_state(self, value):
super().update_state(-value)
metric = NegativeMean()
self.evaluate([v.initializer for v in metric.variables])
with object_registration.CustomObjectScope(
{"NegativeMean": NegativeMean}
):
self._test_metric_save_and_load(
metric, self._save_model_dir(), 1, test_sample_weight=False
)
@test_combinations.run_with_all_model_types
def test_custom_metric_model(self):
# TODO(b/134519980): Issue with `model.fit` if the model call function
# uses a `tf.function` in graph mode.
if not tf.executing_eagerly():
return
x = np.random.random((1, 3))
y = np.random.random((1, 4))
class CustomMetric(keras.metrics.MeanSquaredError):
pass
def zero_metric(y_true, y_pred):
del y_true, y_pred
return 0
model = test_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
loss="mse", optimizer="SGD", metrics=[CustomMetric(), zero_metric]
)
model.fit(x, y)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir, save_format="tf")
with self.assertRaisesRegex(ValueError, "custom_objects"):
keras_load.load(saved_model_dir)
with object_registration.CustomObjectScope(
{"CustomMetric": CustomMetric, "zero_metric": zero_metric}
):
loaded = keras_load.load(saved_model_dir)
self.evaluate([v.initializer for v in loaded.variables])
loaded.fit(x, y)
class TestUpdateMetadata(tf.test.TestCase):
def testAddFullSaveSpec(self):
save_spec = tf.TensorSpec([3, 5], dtype=tf.int32)
node_metadata = json_utils.Encoder().encode({"save_spec": save_spec})
metadata = saved_metadata_pb2.SavedMetadata()
metadata.nodes.add(
version=versions_pb2.VersionDef(
producer=1, min_consumer=1, bad_consumers=[]
),
identifier="_tf_keras_model",
metadata=node_metadata,
)
new_metadata = keras_load._update_to_current_version(metadata)
node_metadata = json_utils.decode(new_metadata.nodes[0].metadata)
expected_full_spec = ([tf.TensorSpec(shape=(3, 5), dtype=tf.int32)], {})
self.assertAllEqual(
expected_full_spec, node_metadata.get("full_save_spec")
)
if __name__ == "__main__":
with saved_model_utils.keras_option_scope(
save_traces=False, in_tf_saved_model_scope=True
):
tf.test.main()
| tf-keras/tf_keras/saving/legacy/saved_model/saved_model_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/saved_model_test.py",
"repo_id": "tf-keras",
"token_count": 29646
} | 254 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run doctests for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import tensorflow.compat.v2 as tf
from absl import flags
from absl.testing import absltest
from tf_keras.testing_infra import keras_doctest_lib
tf.compat.v1.enable_v2_behavior()
# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest # noqa: E402
FLAGS = flags.FLAGS
flags.DEFINE_string("module", None, "A specific module to run doctest on.")
flags.DEFINE_boolean(
"list", None, "List all the modules in the core package imported."
)
flags.DEFINE_string("file", None, "A specific file to run doctest on.")
flags.mark_flags_as_mutual_exclusive(["module", "file"])
flags.mark_flags_as_mutual_exclusive(["list", "file"])
PACKAGE = "keras."
def find_modules():
"""Finds all the modules in the core package imported.
Returns:
A list containing all the modules in tensorflow.python.
"""
tf_modules = []
for name, module in sys.modules.items():
if name.startswith(PACKAGE):
tf_modules.append(module)
return tf_modules
def filter_on_submodules(all_modules, submodule):
"""Filters all the modules based on the module flag.
The module flag has to be relative to the core package imported.
For example, if `submodule=keras.layers` then, this function will return
all the modules in the submodule.
Args:
all_modules: All the modules in the core package.
submodule: Submodule to filter from all the modules.
Returns:
All the modules in the submodule.
"""
filtered_modules = [
mod for mod in all_modules if PACKAGE + submodule in mod.__name__
]
return filtered_modules
def get_module_and_inject_docstring(file_path):
"""Replaces the docstring of the module with the changed file's content.
Args:
file_path: Path to the file
Returns:
A list containing the module changed by the file.
"""
file_path = os.path.abspath(file_path)
mod_index = file_path.find(PACKAGE.replace(".", os.sep))
file_mod_name, _ = os.path.splitext(file_path[mod_index:])
file_module = sys.modules[file_mod_name.replace(os.sep, ".")]
with open(file_path, "r") as f:
content = f.read()
file_module.__doc__ = content
return [file_module]
class TfTestCase(tf.test.TestCase):
def set_up(self, _):
self.setUp()
def tear_down(self, _):
self.tearDown()
def load_tests(unused_loader, tests, unused_ignore):
"""Loads all the tests in the docstrings and runs them."""
tf_modules = find_modules()
if FLAGS.module:
tf_modules = filter_on_submodules(tf_modules, FLAGS.module)
if FLAGS.list:
print("**************************************************")
for mod in tf_modules:
print(mod.__name__)
print("**************************************************")
return tests
if FLAGS.file:
tf_modules = get_module_and_inject_docstring(FLAGS.file)
for module in tf_modules:
testcase = TfTestCase()
tests.addTests(
doctest.DocTestSuite(
module,
test_finder=doctest.DocTestFinder(exclude_empty=False),
extraglobs={"tf": tf, "np": np, "os": os},
setUp=testcase.set_up,
tearDown=testcase.tear_down,
checker=keras_doctest_lib.KerasDoctestOutputChecker(),
optionflags=(
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| doctest.IGNORE_EXCEPTION_DETAIL
| doctest.DONT_ACCEPT_BLANKLINE
),
)
)
return tests
if __name__ == "__main__":
absltest.main()
| tf-keras/tf_keras/tests/keras_doctest.py/0 | {
"file_path": "tf-keras/tf_keras/tests/keras_doctest.py",
"repo_id": "tf-keras",
"token_count": 1771
} | 255 |
"""Keras common starlark macros."""
# Placeholder: load aliased py_test
# Macro to run Keras py_tests against pip installation.
def py_test(deps = [], data = [], kernels = [], **kwargs):
native.py_test(
deps = select({
"//conditions:default": deps,
"//tf_keras:no_keras_py_deps": [],
}),
data = data + kernels,
**kwargs
)
# This is a trimmed down version of tf_py_test since a lot of internal
# features are just not available to OSS build, and also not applicable to Keras.
# So far xla, grpc and tfrt are ignored.
def tf_py_test(
name,
srcs,
size = "medium",
data = [],
deps = [],
main = None,
args = [],
tags = [],
shard_count = 1,
additional_visibility = [],
kernels = [],
flaky = 0,
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False,
tfrt_enabled = False,
tfrt_enabled_internal = False,
**kwargs):
kwargs.setdefault("python_version", "PY3")
kwargs.setdefault("srcs_version", "PY3")
py_test(
name = name,
size = size,
srcs = srcs,
args = args,
data = data,
flaky = flaky,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = tags,
deps = deps,
**kwargs
)
# This is a trimmed down version of cuda_py_test since a lot of internal
# features are just not available to OSS build, and also not applicable to Keras.
# So far xla, grpc and tfrt are ignored.
def cuda_py_test(
name,
srcs,
size = "medium",
data = [],
main = None,
args = [],
shard_count = 1,
kernels = [],
tags = [],
flaky = 0,
xla_enable_strict_auto_jit = False,
xla_enabled = False,
grpc_enabled = False,
xla_tags = [], # additional tags for xla_gpu tests
**kwargs):
if main == None:
main = name + ".py"
for config in ["cpu", "gpu"]:
test_name = name
test_tags = tags
if config == "gpu":
test_tags = test_tags + ["requires-gpu-nvidia", "gpu"]
if xla_enable_strict_auto_jit:
tf_py_test(
name = test_name + "_xla_" + config,
size = size,
srcs = srcs,
args = args,
data = data,
flaky = flaky,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = test_tags + xla_tags + ["xla", "manual"],
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = True,
**kwargs
)
if config == "gpu":
test_name += "_gpu"
tf_py_test(
name = test_name,
size = size,
srcs = srcs,
args = args,
data = data,
flaky = flaky,
grpc_enabled = grpc_enabled,
kernels = kernels,
main = main,
shard_count = shard_count,
tags = test_tags,
xla_enabled = xla_enabled,
xla_enable_strict_auto_jit = False,
**kwargs
)
def tpu_py_test(**kwargs):
# Skip the tpu test for Keras oss.
pass
# This is a trimmed down version of distribute_py_test since a lot of internal
# features are just not available to OSS build, and also not applicable to Keras.
# Especially the TPU tests branches are removed.
def distribute_py_test(
name,
srcs = [],
size = "medium",
deps = [],
tags = [],
data = [],
main = None,
args = [],
tpu_args = [],
tpu_tags = None,
shard_count = 1,
full_precision = False,
xla_enable_strict_auto_jit = True,
disable_mlir_bridge = True,
disable_tpu_use_tfrt = None,
**kwargs):
# Default to PY3 since multi worker tests require PY3.
kwargs.setdefault("python_version", "PY3")
main = main if main else "%s.py" % name
cuda_py_test(
name = name,
srcs = srcs,
data = data,
main = main,
size = size,
deps = deps,
shard_count = shard_count,
tags = tags,
args = args,
**kwargs
)
# We are never indexing generated code in the OSS build, but still
# return a select() for consistency.
def if_indexing_source_code(
if_true, # @unused
if_false):
"""Return a select() on whether or not we are building for source code indexing."""
return select({
"//conditions:default": if_false,
})
| tf-keras/tf_keras/tf_keras.bzl/0 | {
"file_path": "tf-keras/tf_keras/tf_keras.bzl",
"repo_id": "tf-keras",
"token_count": 2433
} | 256 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv_utils."""
import itertools
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.utils import conv_utils
def _get_const_output_shape(input_shape, dim):
return tuple([min(d, dim) for d in input_shape])
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
class TestBasicConvUtilsTest(tf.test.TestCase):
def test_convert_data_format(self):
self.assertEqual(
"NCDHW", conv_utils.convert_data_format("channels_first", 5)
)
self.assertEqual(
"NCHW", conv_utils.convert_data_format("channels_first", 4)
)
self.assertEqual(
"NCW", conv_utils.convert_data_format("channels_first", 3)
)
self.assertEqual(
"NHWC", conv_utils.convert_data_format("channels_last", 4)
)
self.assertEqual(
"NWC", conv_utils.convert_data_format("channels_last", 3)
)
self.assertEqual(
"NDHWC", conv_utils.convert_data_format("channels_last", 5)
)
with self.assertRaises(ValueError):
conv_utils.convert_data_format("invalid", 2)
def test_normalize_tuple(self):
self.assertEqual(
(2, 2, 2),
conv_utils.normalize_tuple(2, n=3, name="strides", allow_zero=True),
)
self.assertEqual(
(2, 1, 2),
conv_utils.normalize_tuple(
(2, 1, 2), n=3, name="strides", allow_zero=True
),
)
self.assertEqual(
(
1,
2,
3,
),
conv_utils.normalize_tuple((1, 2, 3), n=3, name="pool_size"),
)
self.assertEqual(
(3, 3, 3), conv_utils.normalize_tuple(3, n=3, name="pool_size")
)
with self.assertRaisesRegex(
ValueError,
r"including \{-1\} that does not satisfy the requirement `> 0`",
):
conv_utils.normalize_tuple((3, -1, 3), n=3, name="negative_size")
with self.assertRaisesRegex(
ValueError,
r"The `strides` argument .* a tuple of 3 integers.* \(2, 1\)$",
):
conv_utils.normalize_tuple(
(2, 1), n=3, name="strides", allow_zero=True
)
with self.assertRaisesRegex(
ValueError,
r"The `kernel_size` argument .* tuple of 3 integers.* None$",
):
conv_utils.normalize_tuple(None, n=3, name="kernel_size")
with self.assertRaisesRegex(
ValueError, r"including \{-4\} that does not .* `>= 0`"
):
conv_utils.normalize_tuple(-4, n=3, name="strides", allow_zero=True)
with self.assertRaisesRegex(
ValueError, r"including \{0\} that does not .* `> 0`"
):
conv_utils.normalize_tuple((0, 1, 2), n=3, name="pool_size")
def test_normalize_data_format(self):
self.assertEqual(
"channels_last", conv_utils.normalize_data_format("Channels_Last")
)
self.assertEqual(
"channels_first", conv_utils.normalize_data_format("CHANNELS_FIRST")
)
with self.assertRaises(ValueError):
conv_utils.normalize_data_format("invalid")
def test_normalize_padding(self):
self.assertEqual("same", conv_utils.normalize_padding("SAME"))
self.assertEqual("valid", conv_utils.normalize_padding("VALID"))
with self.assertRaises(ValueError):
conv_utils.normalize_padding("invalid")
def test_conv_output_length(self):
self.assertEqual(4, conv_utils.conv_output_length(4, 2, "same", 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, "same", 2, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, "valid", 1, 1))
self.assertEqual(2, conv_utils.conv_output_length(4, 2, "valid", 2, 1))
self.assertEqual(5, conv_utils.conv_output_length(4, 2, "full", 1, 1))
self.assertEqual(3, conv_utils.conv_output_length(4, 2, "full", 2, 1))
self.assertEqual(2, conv_utils.conv_output_length(5, 2, "valid", 2, 2))
def test_conv_input_length(self):
self.assertEqual(3, conv_utils.conv_input_length(4, 2, "same", 1))
self.assertEqual(2, conv_utils.conv_input_length(2, 2, "same", 2))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, "valid", 1))
self.assertEqual(4, conv_utils.conv_input_length(2, 2, "valid", 2))
self.assertEqual(3, conv_utils.conv_input_length(4, 2, "full", 1))
self.assertEqual(4, conv_utils.conv_input_length(3, 2, "full", 2))
def test_deconv_output_length(self):
self.assertEqual(
4, conv_utils.deconv_output_length(4, 2, "same", stride=1)
)
self.assertEqual(
8, conv_utils.deconv_output_length(4, 2, "same", stride=2)
)
self.assertEqual(
5, conv_utils.deconv_output_length(4, 2, "valid", stride=1)
)
self.assertEqual(
8, conv_utils.deconv_output_length(4, 2, "valid", stride=2)
)
self.assertEqual(
3, conv_utils.deconv_output_length(4, 2, "full", stride=1)
)
self.assertEqual(
6, conv_utils.deconv_output_length(4, 2, "full", stride=2)
)
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, "same", output_padding=2, stride=1
),
)
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, "same", output_padding=1, stride=2
),
)
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, "valid", output_padding=2, stride=1
),
)
self.assertEqual(
9,
conv_utils.deconv_output_length(
4, 2, "valid", output_padding=1, stride=2
),
)
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, "full", output_padding=2, stride=1
),
)
self.assertEqual(
7,
conv_utils.deconv_output_length(
4, 2, "full", output_padding=1, stride=2
),
)
self.assertEqual(
5,
conv_utils.deconv_output_length(
4, 2, "same", output_padding=1, stride=1, dilation=2
),
)
self.assertEqual(
12,
conv_utils.deconv_output_length(
4, 2, "valid", output_padding=2, stride=2, dilation=3
),
)
self.assertEqual(
6,
conv_utils.deconv_output_length(
4, 2, "full", output_padding=2, stride=2, dilation=3
),
)
@parameterized.parameters(input_shapes)
class TestConvUtils(tf.test.TestCase, parameterized.TestCase):
def test_conv_kernel_mask_fc(self, *input_shape):
padding = "valid"
kernel_shape = input_shape
ndims = len(input_shape)
strides = (1,) * ndims
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.ones(input_shape + output_shape, bool)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape, kernel_shape, strides, padding
),
)
def test_conv_kernel_mask_diag(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
for padding in ["valid", "same"]:
mask = np.identity(int(np.prod(input_shape)), bool)
mask = np.reshape(mask, input_shape * 2)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape, kernel_shape, strides, padding
),
)
def test_conv_kernel_mask_full_stride(self, *input_shape):
padding = "valid"
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=1)
mask = np.zeros(input_shape + output_shape, bool)
if all(d > 0 for d in mask.shape):
mask[(0,) * len(output_shape)] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape, kernel_shape, strides, padding
),
)
def test_conv_kernel_mask_almost_full_stride(self, *input_shape):
padding = "valid"
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = tuple([max(d - 1, 1) for d in input_shape])
output_shape = _get_const_output_shape(input_shape, dim=2)
mask = np.zeros(input_shape + output_shape, bool)
if all(d > 0 for d in mask.shape):
for in_position in itertools.product(
*[[0, d - 1] for d in input_shape]
):
out_position = tuple([min(p, 1) for p in in_position])
mask[in_position + out_position] = True
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape, kernel_shape, strides, padding
),
)
def test_conv_kernel_mask_rect_kernel(self, *input_shape):
padding = "valid"
ndims = len(input_shape)
strides = (1,) * ndims
for d in range(ndims):
kernel_shape = [1] * ndims
kernel_shape[d] = input_shape[d]
output_shape = list(input_shape)
output_shape[d] = min(1, input_shape[d])
mask = np.identity(int(np.prod(input_shape)), bool)
mask = np.reshape(mask, input_shape * 2)
for p in itertools.product(
*[range(input_shape[dim]) for dim in range(ndims)]
):
p = list(p)
p[d] = slice(None)
mask[tuple(p * 2)] = True
mask = np.take(mask, range(0, min(1, input_shape[d])), ndims + d)
self.assertAllEqual(
mask,
conv_utils.conv_kernel_mask(
input_shape, kernel_shape, strides, padding
),
)
def test_conv_kernel_mask_wrong_padding(self, *input_shape):
ndims = len(input_shape)
kernel_shape = (1,) * ndims
strides = (1,) * ndims
conv_utils.conv_kernel_mask(input_shape, kernel_shape, strides, "valid")
conv_utils.conv_kernel_mask(input_shape, kernel_shape, strides, "same")
self.assertRaises(
NotImplementedError,
conv_utils.conv_kernel_mask,
input_shape,
kernel_shape,
strides,
"full",
)
def test_conv_kernel_mask_wrong_dims(self, *input_shape):
kernel_shape = 1
strides = 1
conv_utils.conv_kernel_mask(input_shape, kernel_shape, strides, "valid")
ndims = len(input_shape)
kernel_shape = (2,) * (ndims + 1)
self.assertRaises(
ValueError,
conv_utils.conv_kernel_mask,
input_shape,
kernel_shape,
strides,
"same",
)
strides = (1,) * ndims
self.assertRaises(
ValueError,
conv_utils.conv_kernel_mask,
input_shape,
kernel_shape,
strides,
"valid",
)
kernel_shape = (1,) * ndims
strides = (2,) * (ndims - 1)
self.assertRaises(
ValueError,
conv_utils.conv_kernel_mask,
input_shape,
kernel_shape,
strides,
"valid",
)
strides = (2,) * ndims
conv_utils.conv_kernel_mask(input_shape, kernel_shape, strides, "valid")
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/conv_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/conv_utils_test.py",
"repo_id": "tf-keras",
"token_count": 6662
} | 257 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for io_utils."""
import builtins
import sys
from pathlib import Path
import tensorflow.compat.v2 as tf
from tf_keras.testing_infra import test_combinations
from tf_keras.utils import io_utils
class TestIOUtils(test_combinations.TestCase):
def test_ask_to_proceed_with_overwrite(self):
with tf.compat.v1.test.mock.patch.object(builtins, "input") as mock_log:
mock_log.return_value = "y"
self.assertTrue(
io_utils.ask_to_proceed_with_overwrite("/tmp/not_exists")
)
mock_log.return_value = "n"
self.assertFalse(
io_utils.ask_to_proceed_with_overwrite("/tmp/not_exists")
)
mock_log.side_effect = ["m", "y"]
self.assertTrue(
io_utils.ask_to_proceed_with_overwrite("/tmp/not_exists")
)
mock_log.side_effect = ["m", "n"]
self.assertFalse(
io_utils.ask_to_proceed_with_overwrite("/tmp/not_exists")
)
def test_path_to_string(self):
class PathLikeDummy:
def __fspath__(self):
return "dummypath"
dummy = object()
# conversion of PathLike
self.assertEqual(io_utils.path_to_string(Path("path")), "path")
self.assertEqual(io_utils.path_to_string(PathLikeDummy()), "dummypath")
# pass-through, works for all versions of python
self.assertEqual(io_utils.path_to_string("path"), "path")
self.assertIs(io_utils.path_to_string(dummy), dummy)
def test_print_msg(self):
enabled = io_utils.is_interactive_logging_enabled()
io_utils.disable_interactive_logging()
self.assertFalse(io_utils.is_interactive_logging_enabled())
with self.assertLogs(level="INFO") as logged:
io_utils.print_msg("Testing Message")
self.assertIn("Testing Message", logged.output[0])
io_utils.enable_interactive_logging()
self.assertTrue(io_utils.is_interactive_logging_enabled())
with self.captureWritesToStream(sys.stdout) as printed:
io_utils.print_msg("Testing Message")
self.assertEqual("Testing Message\n", printed.contents())
if enabled:
io_utils.enable_interactive_logging()
else:
io_utils.disable_interactive_logging()
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/io_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/io_utils_test.py",
"repo_id": "tf-keras",
"token_count": 1283
} | 258 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python module for evaluation loop."""
import re
import tensorflow as tf
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tf_keras.callbacks import ModelCheckpoint
from tf_keras.optimizers import optimizer
from tensorflow.python.util.tf_export import keras_export
_PRINT_EVAL_STEP_EVERY_SEC = 60.0
_ITERATIONS_UNINITIALIZED = -1
_CHECKPOINT_TIMEOUT_SEC = 30
def list_checkpoint_attributes(ckpt_dir_or_file):
"""Lists all the attributes in a checkpoint.
Checkpoint keys are paths in a checkpoint graph, and attribute is the first
element in the path. e.g. with a checkpoint key
"optimizer/iter/.ATTRIBUTES/VARIABLE_VALUE", optimizer is the attribute. The
attribute is also used to save/restore a variable in a checkpoint,
e.g. tf.train.Checkpoint(optimizer=optimizer, model=model).
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
Set of attributes in a checkpoint.
"""
reader = tf.train.load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
return {name.split("/")[0] for name in variable_map.keys()}
@keras_export("keras.utils.SidecarEvaluator", v1=[])
class SidecarEvaluator:
"""A class designed for a dedicated evaluator task.
`SidecarEvaluator` is expected to be run in a process on a separate machine
from the training cluster. It is meant for the purpose of a dedicated
evaluator, evaluating the metric results of a training cluster which has one
or more workers performing the training, and saving checkpoints.
The `SidecarEvaluator` API is compatible with both Custom Training Loop
(CTL), and TF-Keras `Model.fit` to be used in the training cluster. Using
the model (with compiled metrics) provided at `__init__`, `SidecarEvaluator`
repeatedly performs evaluation "epochs" when it finds a checkpoint that has
not yet been used. Depending on the `steps` argument, an eval epoch is
evaluation over all eval data, or up to certain number of steps (batches).
See examples below for how the training program should save the checkpoints
in order to be recognized by `SidecarEvaluator`.
Since under the hood, `SidecarEvaluator` uses `model.evaluate` for
evaluation, it also supports arbitrary TF-Keras callbacks. That is, if one
or more callbacks are provided, their `on_test_batch_begin` and
`on_test_batch_end` methods are called at the start and end of a batch, and
their `on_test_begin` and `on_test_end` are called at the start and end of
an evaluation epoch. Note that `SidecarEvaluator` may skip some checkpoints
because it always picks up the latest checkpoint available, and during an
evaluation epoch, multiple checkpoints can be produced from the training
side.
Example:
```python
model = tf.keras.models.Sequential(...)
model.compile(metrics=tf.keras.metrics.SparseCategoricalAccuracy(
name="eval_metrics"))
data = tf.data.Dataset.from_tensor_slices(...)
tf.keras.SidecarEvaluator(
model=model,
data=data,
# dir for training-saved checkpoint
checkpoint_dir='/tmp/checkpoint_dir',
steps=None, # Eval until dataset is exhausted
max_evaluations=None, # The evaluation needs to be stopped manually
callbacks=[tf.keras.callbacks.TensorBoard(log_dir='/tmp/log_dir')]
).start()
```
`SidecarEvaluator.start` writes a series of summary files which can be
visualized by tensorboard (which provides a webpage link):
```bash
$ tensorboard --logdir=/tmp/log_dir/validation
...
TensorBoard 2.4.0a0 at http://host:port (Press CTRL+C to quit)
```
If the training cluster uses a CTL, the `checkpoint_dir` should contain
checkpoints that track both `model` and `optimizer`, to fulfill
`SidecarEvaluator`'s expectation. This can be done by a
`tf.train.Checkpoint` and a `tf.train.CheckpointManager`:
```python
# Same `checkpoint_dir` supplied to `SidecarEvaluator`.
checkpoint_dir = ...
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir=..., max_to_keep=...)
checkpoint_manager.save()
```
If the training cluster uses TF-Keras `Model.fit` API, a
`tf.keras.callbacks.ModelCheckpoint` should be used, with
`save_weights_only=True`, and the `filepath` should have 'ckpt-{epoch}'
appended:
```python
# Same `checkpoint_dir` supplied to `SidecarEvaluator`.
checkpoint_dir = ...
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_dir, 'ckpt-{epoch}'),
save_weights_only=True)
model.fit(dataset, epochs, callbacks=[model_checkpoint])
```
"""
def __init__(
self,
model,
data,
checkpoint_dir,
steps=None,
max_evaluations=None,
callbacks=None,
):
"""Initializes an `SidecarEvaluator` object.
Args:
model: Model to use for evaluation. The model object used here should
be a `tf.keras.Model`, and should be the same as the one that is
used in training, where `tf.keras.Model`s are checkpointed. The
model should have one or more metrics compiled before using
`SidecarEvaluator`.
data: The input data for evaluation. `SidecarEvaluator` supports all
data types that TF-Keras `model.evaluate` supports as the input data
`x`, such as a `tf.data.Dataset`.
checkpoint_dir: Directory where checkpoint files are saved.
steps: Number of steps to perform evaluation for, when evaluating a
single checkpoint file. If `None`, evaluation continues until the
dataset is exhausted. For repeated evaluation dataset, user must
specify `steps` to avoid infinite evaluation loop.
max_evaluations: Maximum number of the checkpoint file to be
evaluated, for `SidecarEvaluator` to know when to stop. The
evaluator will stop after it evaluates a checkpoint filepath ending
with '<ckpt_name>-<max_evaluations>'. If using
`tf.train.CheckpointManager.save` for saving checkpoints, the kth
saved checkpoint has the filepath suffix '<ckpt_name>-<k>' (k=1 for
the first saved), and if checkpoints are saved every epoch after
training, the filepath saved at the kth epoch would end with
'<ckpt_name>-<k>. Thus, if training runs for n epochs, and the
evaluator should end after the training finishes, use n for this
parameter. Note that this is not necessarily equal to the number of
total evaluations, since some checkpoints may be skipped if
evaluation is slower than checkpoint creation. If `None`,
`SidecarEvaluator` will evaluate indefinitely, and the user must
terminate evaluator program themselves.
callbacks: List of `keras.callbacks.Callback` instances to apply
during evaluation. See
[callbacks](/api_docs/python/tf/tf_keras/callbacks).
"""
self.model = model
self.data = data
self.checkpoint_dir = checkpoint_dir
self._iterations = tf.Variable(
name="iterations",
initial_value=_ITERATIONS_UNINITIALIZED,
dtype=tf.int64,
)
self.max_evaluations = max_evaluations
self.steps = steps
self.callbacks = callbacks or []
def _timeout_fn(self):
logging.info(
"No checkpoints appear to be found after "
f"{_CHECKPOINT_TIMEOUT_SEC} seconds. "
"Please check if you are properly using a "
"`tf.train.Checkpoint/CheckpointManager` or "
"`tf.keras.callbacks.ModelCheckpoint(save_weights_only=True)` to "
"save checkpoints by the training. See "
"`tf.keras.SidecarEvaluator` doc for recommended flows "
"of saving checkpoints."
)
return False
def start(self):
"""Starts the evaluation loop."""
if self.model.optimizer and isinstance(
self.model.optimizer, optimizer.Optimizer
):
checkpoint = tf.train.Checkpoint(
model=self.model, optimizer=self.model.optimizer
)
else:
optimizer_checkpoint = tf.train.Checkpoint(iter=self._iterations)
checkpoint = tf.train.Checkpoint(
model=self.model, optimizer=optimizer_checkpoint
)
for latest_checkpoint in tf.train.checkpoints_iterator(
self.checkpoint_dir,
timeout=_CHECKPOINT_TIMEOUT_SEC,
timeout_fn=self._timeout_fn,
):
try:
# `expect_partial` because the checkpoint can have other
# `Trackable`s such as `optimizer`.
checkpoint.restore(latest_checkpoint).expect_partial()
checkpoint_attributes = list_checkpoint_attributes(
latest_checkpoint
)
# The checkpoint should contain model and optimizer for
# SidecarEvaluator to work. But the model weights saved by
# ModelCheckpoint callback does not contain model as an
# attribute. To make SidecarEvaluator compatibly work in this
# case, use model.load_weights to load the model's weights,
# while self._iterations is still restored by checkpoint
# variable.
if "model" not in checkpoint_attributes:
self.model.load_weights(latest_checkpoint)
# The model checkpoint might not include optimizer in cases,
# e.g. using a custom training loop. Directly assign the
# iterations property to be used in callbacks.
if self.model.optimizer and not isinstance(
self.model.optimizer,
optimizer.Optimizer,
):
# experimental optimizer automatically restores the
# iteration value.
self.model.optimizer.iterations.assign(self._iterations)
except (tf.errors.OpError,) as e:
if isinstance(e, tf.errors.UnavailableError):
# With distribute training, worker preemption can result in
# `UnavailableError`. Raise this to be handled outside the
# evaluation loop.
raise e
# A couple errors can happen here with the coordinator racing to
# write checkpoint:
# 1) OpError: open failed for <file path>: No such file or
# directory
# 2) NotFoundError (subclass of OpError): Unsuccessful
# TensorSliceReader constructor.
# TODO(rchao): Remove this except block once b/150954027 is
# resolved.
logging.info(
"SidecarEvaluator encountered an error when loading the "
f"checkpoint at {latest_checkpoint}. Retrying. "
f"Error: {e.__class__.__name__}: {e}"
)
continue
if (
self._iterations.numpy() == _ITERATIONS_UNINITIALIZED
and not isinstance(
self.model.optimizer,
optimizer.Optimizer,
)
):
raise RuntimeError(
"Variable `iterations` cannot be loaded from the "
f"checkpoint file at {self.checkpoint_dir}. "
"Please ensure `iterations` is "
"included in the checkpoint saved during training."
)
logging.info(
"Evaluation starts: Model weights loaded from latest "
f"checkpoint file {latest_checkpoint}"
)
self.model.evaluate(
self.data, steps=self.steps, callbacks=self.callbacks, verbose=2
)
return_metrics = {}
for metric in self.model.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
logging.info(
"End of evaluation. Metrics: %s",
" ".join(
[
f"{name}={value.numpy()}"
for name, value in return_metrics.items()
]
),
)
if self.max_evaluations and (
self.max_evaluations <= int(latest_checkpoint.split("-")[-1])
):
# Exit the loop because we have evaluated the final checkpoint
# file.
logging.info(
"Last checkpoint evaluated. SidecarEvaluator stops."
)
return
@keras_export("keras.experimental.SidecarEvaluator", v1=[])
@deprecation.deprecated_endpoints("keras.experimental.SidecarEvaluator")
class SidecarEvaluatorExperimental(SidecarEvaluator):
"""Deprecated. Please use `tf.keras.utils.SidecarEvaluator` instead.
Caution: `tf.keras.experimental.SidecarEvaluator` endpoint is
deprecated and will be removed in a future release. Please use
`tf.keras.utils.SidecarEvaluator`.
"""
def __init__(self, *args, **kwargs):
logging.warning(
"`tf.keras.experimental.SidecarEvaluator` endpoint is "
"deprecated and will be removed in a future release. Please use "
"`tf.keras.utils.SidecarEvaluator`."
)
super().__init__(*args, **kwargs)
@keras_export("keras.callbacks.SidecarEvaluatorModelExport")
class SidecarEvaluatorModelExport(ModelCheckpoint):
"""Callback to save the best TF-Keras model.
It expands the functionality of the existing ModelCheckpoint callback to
enable exporting the best models after evaluation with validation dataset.
When using the `SidecarEvaluatorModelExport` callback in conjunction with
`keras.utils.SidecarEvaluator`, users should provide the `filepath`, which
is the path for this callback to export model or save weights to, and
`ckpt_filepath`, which is where the checkpoint is available to extract
the epoch number from. The callback will then export the model that the
evaluator deems as the best (among the checkpoints saved by the training
counterpart) to the specified `filepath`. This callback is intended to be
used by SidecarEvaluator only.
Example:
```python
model.compile(loss=..., optimizer=...,
metrics=['accuracy'])
sidecar_evaluator = keras.utils.SidecarEvaluator(
model=model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
callbacks=[
SidecarEvaluatorModelExport(
export_filepath=os.path.join(checkpoint_dir,
'best_model_eval',
'best-model-{epoch:04d}'),
checkpoint_filepath=os.path.join(checkpoint_dir,
'ckpt-{epoch:04d}'),
save_freq="eval",
save_weights_only=True,
monitor="loss",
mode="min",
verbose=1,
),
],
)
sidecar_evaluator.start()
# Model weights are saved if evaluator deems it's the best seen so far.
Args:
export_filepath: Path where best models should be saved by this
`SidecarEvaluatorModelExport` callback. Epoch formatting options, such
as `os.path.join(best_model_dir, 'best-model-{epoch:04d}')`, can be
used to allow saved model to preserve epoch information in the file
name. SidecarEvaluatorModelExport will use the "training epoch" at
which the checkpoint was saved by training to fill the epoch
placeholder in the path.
checkpoint_filepath: Path where checkpoints were saved by training. This
should be the same as what is provided to `filepath` argument of
`ModelCheckpoint` on the training side, such as
`os.path.join(checkpoint_dir, 'ckpt-{epoch:04d}')`.
"""
def __init__(self, export_filepath, checkpoint_filepath, **kwargs):
super().__init__(
filepath=export_filepath,
save_best_only=True,
**kwargs,
)
self._checkpoint_filepath = checkpoint_filepath
def on_test_begin(self, logs=None):
"""Updates export_index to the latest checkpoint."""
most_recent_filepath = (
self._get_most_recently_modified_file_matching_pattern(
self._checkpoint_filepath
)
)
if most_recent_filepath is not None:
self.export_index = (
int(
re.match(r".*ckpt-(?P<ckpt>\d+)", most_recent_filepath)[
"ckpt"
]
)
- 1
)
else:
self.export_index = 0
def on_test_end(self, logs):
"""Saves best model at the end of an evaluation epoch."""
self.epochs_since_last_save += 1
self._save_model(epoch=self.export_index, batch=None, logs=logs)
| tf-keras/tf_keras/utils/sidecar_evaluator.py/0 | {
"file_path": "tf-keras/tf_keras/utils/sidecar_evaluator.py",
"repo_id": "tf-keras",
"token_count": 7794
} | 259 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for TF-Keras classes with v1 and v2 versions."""
import tensorflow.compat.v2 as tf
from tf_keras.utils.generic_utils import LazyLoader
# TODO(b/134426265): Switch back to single-quotes once the issue
# with copybara is fixed.
training = LazyLoader("training", globals(), "tf_keras.engine.training")
training_v1 = LazyLoader(
"training_v1", globals(), "tf_keras.engine.training_v1"
)
base_layer = LazyLoader("base_layer", globals(), "tf_keras.engine.base_layer")
base_layer_v1 = LazyLoader(
"base_layer_v1", globals(), "tf_keras.engine.base_layer_v1"
)
callbacks = LazyLoader("callbacks", globals(), "tf_keras.callbacks")
callbacks_v1 = LazyLoader("callbacks_v1", globals(), "tf_keras.callbacks_v1")
class ModelVersionSelector:
"""Chooses between TF-Keras v1 and v2 Model class."""
def __new__(cls, *args, **kwargs):
use_v2 = should_use_v2()
cls = swap_class(cls, training.Model, training_v1.Model, use_v2)
return super(ModelVersionSelector, cls).__new__(cls)
class LayerVersionSelector:
"""Chooses between TF-Keras v1 and v2 Layer class."""
def __new__(cls, *args, **kwargs):
use_v2 = should_use_v2()
cls = swap_class(cls, base_layer.Layer, base_layer_v1.Layer, use_v2)
return super(LayerVersionSelector, cls).__new__(cls)
class TensorBoardVersionSelector:
"""Chooses between TF-Keras v1 and v2 TensorBoard callback class."""
def __new__(cls, *args, **kwargs):
use_v2 = should_use_v2()
start_cls = cls
cls = swap_class(
start_cls, callbacks.TensorBoard, callbacks_v1.TensorBoard, use_v2
)
if (
start_cls == callbacks_v1.TensorBoard
and cls == callbacks.TensorBoard
):
# Since the v2 class is not a subclass of the v1 class, __init__ has
# to be called manually.
return cls(*args, **kwargs)
return super(TensorBoardVersionSelector, cls).__new__(cls)
def should_use_v2():
"""Determine if v1 or v2 version should be used."""
if tf.executing_eagerly():
return True
elif tf.compat.v1.executing_eagerly_outside_functions():
# Check for a v1 `wrap_function` FuncGraph.
# Code inside a `wrap_function` is treated like v1 code.
graph = tf.compat.v1.get_default_graph()
if getattr(graph, "name", False) and graph.name.startswith(
"wrapped_function"
):
return False
return True
else:
return False
def swap_class(cls, v2_cls, v1_cls, use_v2):
"""Swaps in v2_cls or v1_cls depending on graph mode."""
if cls == object:
return cls
if cls in (v2_cls, v1_cls):
return v2_cls if use_v2 else v1_cls
# Recursively search superclasses to swap in the right TF-Keras class.
new_bases = []
for base in cls.__bases__:
if (
use_v2
and issubclass(base, v1_cls)
# `v1_cls` often extends `v2_cls`, so it may still call `swap_class`
# even if it doesn't need to. That being said, it may be the safest
# not to over optimize this logic for the sake of correctness,
# especially if we swap v1 & v2 classes that don't extend each
# other, or when the inheritance order is different.
or (not use_v2 and issubclass(base, v2_cls))
):
new_base = swap_class(base, v2_cls, v1_cls, use_v2)
else:
new_base = base
new_bases.append(new_base)
cls.__bases__ = tuple(new_bases)
return cls
def disallow_legacy_graph(cls_name, method_name):
if not tf.compat.v1.executing_eagerly_outside_functions():
error_msg = (
f"Calling `{cls_name}.{method_name}` in graph mode is not "
f"supported when the `{cls_name}` instance was constructed with "
f"eager mode enabled. Please construct your `{cls_name}` instance "
f"in graph mode or call `{cls_name}.{method_name}` with "
"eager mode enabled."
)
raise ValueError(error_msg)
def is_v1_layer_or_model(obj):
return isinstance(obj, (base_layer_v1.Layer, training_v1.Model))
| tf-keras/tf_keras/utils/version_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/version_utils.py",
"repo_id": "tf-keras",
"token_count": 2049
} | 260 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
from autokeras import test_utils
from autokeras.analysers import input_analysers
def test_structured_data_input_less_col_name_error():
with pytest.raises(ValueError) as info:
analyser = input_analysers.StructuredDataAnalyser(
column_names=list(range(8))
)
dataset = tf.data.Dataset.from_tensor_slices(
np.random.rand(20, 10)
).batch(32)
for x in dataset:
analyser.update(x)
analyser.finalize()
assert "Expect column_names to have length" in str(info.value)
def test_structured_data_infer_col_types():
analyser = input_analysers.StructuredDataAnalyser(
column_names=test_utils.COLUMN_NAMES,
column_types=None,
)
x = pd.read_csv(test_utils.TRAIN_CSV_PATH)
x.pop("survived")
dataset = tf.data.Dataset.from_tensor_slices(x.values.astype(str)).batch(32)
for data in dataset:
analyser.update(data)
analyser.finalize()
assert analyser.column_types == test_utils.COLUMN_TYPES
def test_dont_infer_specified_column_types():
column_types = copy.copy(test_utils.COLUMN_TYPES)
column_types.pop("sex")
column_types["age"] = "categorical"
analyser = input_analysers.StructuredDataAnalyser(
column_names=test_utils.COLUMN_NAMES,
column_types=column_types,
)
x = pd.read_csv(test_utils.TRAIN_CSV_PATH)
x.pop("survived")
dataset = tf.data.Dataset.from_tensor_slices(x.values.astype(str)).batch(32)
for data in dataset:
analyser.update(data)
analyser.finalize()
assert analyser.column_types["age"] == "categorical"
def test_structured_data_input_with_illegal_dim():
analyser = input_analysers.StructuredDataAnalyser(
column_names=test_utils.COLUMN_NAMES,
column_types=None,
)
dataset = tf.data.Dataset.from_tensor_slices(
np.random.rand(100, 32, 32)
).batch(32)
with pytest.raises(ValueError) as info:
for data in dataset:
analyser.update(data)
analyser.finalize()
assert "Expect the data to StructuredDataInput to have shape" in str(
info.value
)
def test_image_input_analyser_shape_is_list_of_int():
analyser = input_analysers.ImageAnalyser()
dataset = tf.data.Dataset.from_tensor_slices(
np.random.rand(100, 32, 32, 3)
).batch(32)
for data in dataset:
analyser.update(data)
analyser.finalize()
assert isinstance(analyser.shape, list)
assert all(map(lambda x: isinstance(x, int), analyser.shape))
def test_image_input_with_three_dim():
analyser = input_analysers.ImageAnalyser()
dataset = tf.data.Dataset.from_tensor_slices(
np.random.rand(100, 32, 32)
).batch(32)
for data in dataset:
analyser.update(data)
analyser.finalize()
assert len(analyser.shape) == 3
def test_image_input_with_illegal_dim():
analyser = input_analysers.ImageAnalyser()
dataset = tf.data.Dataset.from_tensor_slices(np.random.rand(100, 32)).batch(
32
)
with pytest.raises(ValueError) as info:
for data in dataset:
analyser.update(data)
analyser.finalize()
assert "Expect the data to ImageInput to have shape" in str(info.value)
def test_text_input_with_illegal_dim():
analyser = input_analysers.TextAnalyser()
dataset = tf.data.Dataset.from_tensor_slices(np.random.rand(100, 32)).batch(
32
)
with pytest.raises(ValueError) as info:
for data in dataset:
analyser.update(data)
analyser.finalize()
assert "Expect the data to TextInput to have shape" in str(info.value)
def test_text_analyzer_with_one_dim_doesnt_crash():
analyser = input_analysers.TextAnalyser()
dataset = tf.data.Dataset.from_tensor_slices(["a b c", "b b c"]).batch(32)
for data in dataset:
analyser.update(data)
analyser.finalize()
def test_text_illegal_type_error():
analyser = input_analysers.TextAnalyser()
dataset = tf.data.Dataset.from_tensor_slices(np.random.rand(100, 1)).batch(
32
)
with pytest.raises(TypeError) as info:
for data in dataset:
analyser.update(data)
analyser.finalize()
assert "Expect the data to TextInput to be strings" in str(info.value)
def test_time_series_input_with_illegal_dim():
analyser = input_analysers.TimeseriesAnalyser(
column_names=test_utils.COLUMN_NAMES,
column_types=None,
)
dataset = tf.data.Dataset.from_tensor_slices(
np.random.rand(100, 32, 32)
).batch(32)
with pytest.raises(ValueError) as info:
for data in dataset:
analyser.update(data)
analyser.finalize()
assert "Expect the data to TimeseriesInput to have shape" in str(info.value)
| autokeras/autokeras/analysers/input_analysers_test.py/0 | {
"file_path": "autokeras/autokeras/analysers/input_analysers_test.py",
"repo_id": "autokeras",
"token_count": 2269
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import os
import keras_tuner
from tensorflow import keras
from tensorflow import nest
from tensorflow.keras import callbacks as tf_callbacks
from tensorflow.keras.layers.experimental import preprocessing
from autokeras import pipeline as pipeline_module
from autokeras.utils import data_utils
from autokeras.utils import utils
class AutoTuner(keras_tuner.engine.tuner.Tuner):
"""A Tuner class based on KerasTuner for AutoKeras.
Different from KerasTuner's Tuner class. AutoTuner's not only tunes the
Hypermodel which can be directly built into a Keras model, but also the
preprocessors. Therefore, a HyperGraph stores the overall search space
containing both the Preprocessors and Hypermodel. For every trial, the
HyperGraph builds the PreprocessGraph and KerasGraph with the provided
HyperParameters.
The AutoTuner uses EarlyStopping for acceleration during the search and
fully trains the model with full epochs and with both training and
validation data. The fully trained model is the best model to be used by
AutoModel.
# Arguments
oracle: keras_tuner Oracle.
hypermodel: keras_tuner HyperModel.
**kwargs: The args supported by KerasTuner.
"""
def __init__(self, oracle, hypermodel, **kwargs):
# Initialize before super() for reload to work.
self._finished = False
super().__init__(oracle, hypermodel, **kwargs)
# Save or load the HyperModel.
self.hypermodel.save(os.path.join(self.project_dir, "graph"))
self.hyper_pipeline = None
def _populate_initial_space(self):
# Override the function to prevent building the model during
# initialization.
return
def get_best_model(self):
with keras_tuner.engine.tuner.maybe_distribute(
self.distribution_strategy
):
model = keras.models.load_model(self.best_model_path)
return model
def get_best_pipeline(self):
return pipeline_module.load_pipeline(self.best_pipeline_path)
def _pipeline_path(self, trial_id):
return os.path.join(self.get_trial_dir(trial_id), "pipeline")
def _prepare_model_build(self, hp, **kwargs):
"""Prepare for building the Keras model.
It builds the Pipeline from HyperPipeline, transforms the dataset to set
the input shapes and output shapes of the HyperModel.
"""
dataset = kwargs["x"]
pipeline = self.hyper_pipeline.build(hp, dataset)
pipeline.fit(dataset)
dataset = pipeline.transform(dataset)
self.hypermodel.set_io_shapes(data_utils.dataset_shape(dataset))
if "validation_data" in kwargs:
validation_data = pipeline.transform(kwargs["validation_data"])
else:
validation_data = None
return pipeline, dataset, validation_data
def _build_and_fit_model(self, trial, *args, **kwargs):
model = self._try_build(trial.hyperparameters)
(
pipeline,
kwargs["x"],
kwargs["validation_data"],
) = self._prepare_model_build(trial.hyperparameters, **kwargs)
pipeline.save(self._pipeline_path(trial.trial_id))
self.adapt(model, kwargs["x"])
_, history = utils.fit_with_adaptive_batch_size(
model, self.hypermodel.batch_size, **kwargs
)
return history
@staticmethod
def adapt(model, dataset):
"""Adapt the preprocessing layers in the model."""
# Currently, only support using the original dataset to adapt all the
# preprocessing layers before the first non-preprocessing layer.
# TODO: Use PreprocessingStage for preprocessing layers adapt.
# TODO: Use Keras Tuner for preprocessing layers adapt.
x = dataset.map(lambda x, y: x)
def get_output_layers(tensor):
output_layers = []
tensor = nest.flatten(tensor)[0]
for layer in model.layers:
if isinstance(layer, keras.layers.InputLayer):
continue
input_node = nest.flatten(layer.input)[0]
if input_node is tensor:
if isinstance(layer, preprocessing.PreprocessingLayer):
output_layers.append(layer)
return output_layers
dq = collections.deque()
for index, input_node in enumerate(nest.flatten(model.input)):
in_x = x.map(lambda *args: nest.flatten(args)[index])
for layer in get_output_layers(input_node):
dq.append((layer, in_x))
while len(dq):
layer, in_x = dq.popleft()
layer.adapt(in_x)
out_x = in_x.map(layer)
for next_layer in get_output_layers(layer.output):
dq.append((next_layer, out_x))
return model
def search(
self,
epochs=None,
callbacks=None,
validation_split=0,
verbose=1,
**fit_kwargs
):
"""Search for the best HyperParameters.
If there is not early-stopping in the callbacks, the early-stopping
callback is injected to accelerate the search process. At the end of the
search, the best model will be fully trained with the specified number
of epochs.
# Arguments
callbacks: A list of callback functions. Defaults to None.
validation_split: Float.
"""
if self._finished:
return
if callbacks is None:
callbacks = []
self.hypermodel.set_fit_args(validation_split, epochs=epochs)
# Insert early-stopping for adaptive number of epochs.
epochs_provided = True
if epochs is None:
epochs_provided = False
epochs = 1000
if not utils.contain_instance(
callbacks, tf_callbacks.EarlyStopping
):
callbacks.append(
tf_callbacks.EarlyStopping(patience=10, min_delta=1e-4)
)
# Insert early-stopping for acceleration.
early_stopping_inserted = False
new_callbacks = self._deepcopy_callbacks(callbacks)
if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
early_stopping_inserted = True
new_callbacks.append(
tf_callbacks.EarlyStopping(patience=10, min_delta=1e-4)
)
# Populate initial search space.
hp = self.oracle.get_space()
self._prepare_model_build(hp, **fit_kwargs)
self._try_build(hp)
self.oracle.update_space(hp)
super().search(
epochs=epochs,
callbacks=new_callbacks,
verbose=verbose,
**fit_kwargs
)
# Train the best model use validation data.
# Train the best model with enough number of epochs.
if validation_split > 0 or early_stopping_inserted:
copied_fit_kwargs = copy.copy(fit_kwargs)
# Remove early-stopping since no validation data.
# Remove early-stopping since it is inserted.
copied_fit_kwargs["callbacks"] = self._remove_early_stopping(
callbacks
)
# Decide the number of epochs.
copied_fit_kwargs["epochs"] = epochs
if not epochs_provided:
copied_fit_kwargs["epochs"] = self._get_best_trial_epochs()
# Concatenate training and validation data.
if validation_split > 0:
copied_fit_kwargs["x"] = copied_fit_kwargs["x"].concatenate(
fit_kwargs["validation_data"]
)
copied_fit_kwargs.pop("validation_data")
self.hypermodel.set_fit_args(0, epochs=copied_fit_kwargs["epochs"])
copied_fit_kwargs["verbose"] = verbose
pipeline, model, history = self.final_fit(**copied_fit_kwargs)
else:
# TODO: Add return history functionality in Keras Tuner
model = self.get_best_models()[0]
history = None
pipeline = pipeline_module.load_pipeline(
self._pipeline_path(self.oracle.get_best_trials(1)[0].trial_id)
)
model.save(self.best_model_path)
pipeline.save(self.best_pipeline_path)
self._finished = True
return history
def get_state(self):
state = super().get_state()
state.update({"finished": self._finished})
return state
def set_state(self, state):
super().set_state(state)
self._finished = state.get("finished")
@staticmethod
def _remove_early_stopping(callbacks):
return [
copy.deepcopy(callbacks)
for callback in callbacks
if not isinstance(callback, tf_callbacks.EarlyStopping)
]
def _get_best_trial_epochs(self):
best_trial = self.oracle.get_best_trials(1)[0]
# steps counts from 0, so epochs = step + 1.
return self.oracle.get_trial(best_trial.trial_id).best_step + 1
def _build_best_model(self):
best_trial = self.oracle.get_best_trials(1)[0]
best_hp = best_trial.hyperparameters
return self._try_build(best_hp)
def final_fit(self, **kwargs):
best_trial = self.oracle.get_best_trials(1)[0]
best_hp = best_trial.hyperparameters
(
pipeline,
kwargs["x"],
kwargs["validation_data"],
) = self._prepare_model_build(best_hp, **kwargs)
model = self._build_best_model()
self.adapt(model, kwargs["x"])
model, history = utils.fit_with_adaptive_batch_size(
model, self.hypermodel.batch_size, **kwargs
)
return pipeline, model, history
@property
def best_model_path(self):
return os.path.join(self.project_dir, "best_model")
@property
def best_pipeline_path(self):
return os.path.join(self.project_dir, "best_pipeline")
@property
def objective(self):
return self.oracle.objective
@property
def max_trials(self):
return self.oracle.max_trials
| autokeras/autokeras/engine/tuner.py/0 | {
"file_path": "autokeras/autokeras/engine/tuner.py",
"repo_id": "autokeras",
"token_count": 4699
} | 1 |
Usage: python benchmark/run.py structured_data_classification report.csv
| autokeras/benchmark/README.md/0 | {
"file_path": "autokeras/benchmark/README.md",
"repo_id": "autokeras",
"token_count": 18
} | 2 |
# AutoKeras Documentation
The source for AutoKeras documentation is in this directory.
Our documentation uses extended Markdown, as implemented by [MkDocs](http://mkdocs.org).
## Building the documentation
- Install dependencies: `pip install -r docs/requirements.txt`
- `pip install -e .` to make sure that Python will import your modified version of AutoKeras.
- From the root directory, `cd` into the `docs/` folder and run:
- `python autogen.py`
- `mkdocs serve` # Starts a local webserver: [localhost:8000](http://localhost:8000)
- `mkdocs build` # Builds a static site in `site/` directory
## Generate contributors list
- Prerequisites:
- Install Pillow: `pip install Pillow`
- Generate:
- Run: `sh shell/contributors.sh`
- The generated file is: `docs/templates/img/contributors.svg` | autokeras/docs/README.md/0 | {
"file_path": "autokeras/docs/README.md",
"repo_id": "autokeras",
"token_count": 260
} | 3 |
import os
import pathlib
def copy_examples(examples_dir, destination_dir):
"""Copy the examples directory in the documentation.
Prettify files by extracting the docstrings written in Markdown.
"""
pathlib.Path(destination_dir).mkdir(exist_ok=True)
for file in os.listdir(examples_dir):
if not file.endswith(".py"):
continue
module_path = os.path.join(examples_dir, file)
docstring, starting_line = get_module_docstring(module_path)
destination_file = os.path.join(destination_dir, file[:-2] + "md")
with open(destination_file, "w+", encoding="utf-8") as f_out, open(
examples_dir / file, "r+", encoding="utf-8"
) as f_in:
f_out.write(docstring + "\n\n")
# skip docstring
for _ in range(starting_line):
next(f_in)
f_out.write("```python\n")
# next line might be empty.
line = next(f_in)
if line != "\n":
f_out.write(line)
# copy the rest of the file.
for line in f_in:
f_out.write(line)
f_out.write("```")
def get_module_docstring(filepath):
"""Extract the module docstring.
Also finds the line at which the docstring ends.
"""
co = compile(open(filepath, encoding="utf-8").read(), filepath, "exec")
if co.co_consts and isinstance(co.co_consts[0], str):
docstring = co.co_consts[0]
else:
print("Could not get the docstring from " + filepath)
docstring = ""
return docstring, co.co_firstlineno
| autokeras/docs/keras_autodoc/examples.py/0 | {
"file_path": "autokeras/docs/keras_autodoc/examples.py",
"repo_id": "autokeras",
"token_count": 738
} | 4 |
# AutoKeras Redesign
## Motivation
redesign how the graph can tune preprocessor, model, and post-processor
altogether. This is mainly to adapt to the latest best practices of Keras,
which do not include the preprocessing layers in the main model.
## The problem
The following problems should all be resolved by the new design.
Some of the problems are supported by AutoKeras today, but some are not.
* Hyperparameters exists in both preprocessors.
* Example: Data augmentation contains hyperparameters.
* The preprocessors needs to know if it is training, validation, or testing.
* Example: Data augmentation only apply when training.
* The hyperparameter for model selection is deciding the preprocessors.
* Example: If choose BERT, the preprocessor should use `BertTokenizer`.
* The preprocessor need to access the target (y) data.
* Example: Feature selection based on y.
* Example: Object detection preprocessing the images.
* All the preprocessors applied needs to be tracked to export the preprocessors.
* Post-processor needs information from preprocessors.
* Example: Post-processor to decode probability vector back to
classification label needs information from the y encoder.
## The current design
It stacks the preprocessors based on input type.
Do not select preprocessors based on the hyperparameter value for model selection.
## Infeasible best solution
The ideal solution would be separate the search space representation (the
`HyperModel`s) from the actual implementation (the `tf.data` operations and
`tf.keras.Model`). We just use the `HyperModel` to get all the hyperparameter
values and construct an abstraction of the actual implementation, which may also
be called an intermediate representation (IR), and build the IR into the actual
implementation. It sounds it would be easy to separate the `tf.data` operations
and the `tf.keras.Model`. However, this is not possible to implement without
adding significant burden for all the `autokeras.Block` subclasses
implementations. It is because KerasTuner create hyperparameters as it builds
the `HyperModel`. So, it is not possible to get all the hyperparameter values
without running the code of actual `tf.data` operations and `tf.keras.Model`
building.
## The new design
### Mixing the preprocessors and model
No longer create the preprocessors based on the input nodes.
Make preprocessors part of the graph.
Each block can be build into a mix of preprocessors and parts of the model. The
input to `Block.build()` can be either a `KerasTensor` or `tf.data.Dataset`.
Whenever we need to switch a `Dataset` into a `KerasTensor`, we register the
`keras.Input` in the graph, so that we can use it to build the `keras.Model`
later. Similar to the Keras saving mechanism of custom objects registered in
[`_GLOBAL_CUSTOM_OBJECTS`](https://github.com/keras-team/keras/blob/v2.11.0/keras/saving/object_registration.py#L23).
How to register?
Using `Block._build_wrapper()`. If a dataset is passed to a block that suppose
to be part of the Keras model, it should do the switch from dataset to Keras
input.
In `Block._build_wrapper()`, it should try to modify some constant value from
another module to put the input node in.
Note: we require, wherever switching from dataset to a Keras input, it has to
rely on the `Block._build_wrapper()` to do the job.
### Export the preprocessor
We keep the current design of building `HyperPreprocessor` into `Preprocessor`.
Whenever a preprocessing is done on the `Dataset`, we record the input and
output dataset and the preprocessor, so that we can track and reconstruct the
preprocessing computation graph (not rebuilding the preprocessors from the
`HyperProcessor`s because fit preprocessors takes time. We can directly use the
already built preprocessors) and export the prerpocessors. The current saving
mechanism of the preprocessors can also be kept.
How to track?
Similar to register `input`s above. In `HyperPreprocessor._build_wrapper()`, it
register the input and output datasets and the preprocessor built processed
them.
### About handling the target values
`y` can be either treat separately or also be part of the graph. Treating
separately would be much easier. The preprocessing and postprocessing needs to
share info, but they don't share info with the rest of the graph. Some
preprocessors needs `y`.
The analyzer and heads information flow can be kept without touch. The analyzer
analyzes the data and pass the configs into the heads, for example, use sigmoid
or softmax for classification.
However, the coupling between heads and preprocessing and postprocessing of `y`
may be decoupled. The information needed for pre & post processing came from the
analysers. No need to route it into the heads. Routing through heads would make
implementing custom heads harder.
Although, we get the analysers by the heads, no information is actually passed
from the heads to the analysers. This design is only because the users are
specifying the `y`s using the heads.
### Reduce unnecessary dataset conversions
Have more strict type and shape checks, which reduce the overhead of the
preprocessors to reshape and type convert the dataset. First, analyze the
dataset. If doesn't meet the requirements, raise a clear error.
### Accomendations in KerasTuner
Seems no accomendations needed. Should continue to override
`Tuner._build_and_fit_model()`.
## TODOs
* Remove prototype directory.
* Remove prototype from coverage exclusion in setup.cfg. | autokeras/docs/redesign.md/0 | {
"file_path": "autokeras/docs/redesign.md",
"repo_id": "autokeras",
"token_count": 1414
} | 5 |
# AutoKeras 1.0 Tutorial
## Supported Tasks
AutoKeras supports several tasks with an extremely simple interface.
You can click the links below to see the detailed tutorial for each task.
**Supported Tasks**:
[Image Classification](/tutorial/image_classification)
[Image Regression](/tutorial/image_regression)
[Text Classification](/tutorial/text_classification)
[Text Regression](/tutorial/text_regression)
[Structured Data Classification](/tutorial/structured_data_classification)
[Structured Data Regression](/tutorial/structured_data_regression)
**Coming Soon**: Time Series Forecasting, Object Detection, Image Segmentation.
## Multi-Task and Multi-Modal Data
If you are dealing with multi-task or multi-modal dataset, you can refer to this
[tutorial](/tutorial/multi) for details.
## Customized Model
Follow this [tutorial](/tutorial/customized), to use AutoKeras building blocks to quickly construct your own model.
With these blocks, you only need to specify the high-level architecture of your model.
AutoKeras would search for the best detailed configuration for you.
Moreover, you can override the base classes to create your own block.
The following are the links to the documentation of the predefined input nodes and blocks in AutoKeras.
**Nodes**:
[ImageInput](/node/#imageinput-class)
[Input](/node/#input-class)
[StructuredDataInput](/node/#structureddatainput-class)
[TextInput](/node/#textinput-class)
**Blocks**:
[ImageAugmentation](/block/#imageaugmentation-class)
[Normalization](/block/#normalization-class)
[TextToIntSequence](/block/#texttointsequence-class)
[TextToNgramVector](/block/#texttongramvector-class)
[CategoricalToNumerical](/block/#categoricaltonumerical-class)
[ConvBlock](/block/#convblock-class)
[DenseBlock](/block/#denseblock-class)
[Embedding](/block/#embedding-class)
[Merge](/block/#merge-class)
[ResNetBlock](/block/#resnetblock-class)
[RNNBlock](/block/#rnnblock-class)
[SpatialReduction](/block/#spatialreduction-class)
[TemporalReduction](/block/#temporalreduction-class)
[XceptionBlock](/block/#xceptionblock-class)
[ImageBlock](/block/#imageblock-class)
[StructuredDataBlock](/block/#structureddatablock-class)
[TextBlock](/block/#textblock-class)
[ClassificationHead](/block/#classificationhead-class)
[RegressionHead](/block/#regressionhead-class)
## Export Model
You can follow this [tutorial](/tutorial/export) to export the best model.
| autokeras/docs/templates/tutorial/overview.md/0 | {
"file_path": "autokeras/docs/templates/tutorial/overview.md",
"repo_id": "autokeras",
"token_count": 748
} | 6 |
# node shell/generate_json.js
gh api -H "Accept: application/vnd.github+json" /repos/keras-team/autokeras/contributors --paginate > response.json
sed "s/\]\[/,/g" response.json > contributors.json
rm response.json
mkdir avatars
python shell/contributors.py avatars > docs/templates/img/contributors.svg
rm contributors.json
rm -rf avatars
| autokeras/shell/contributors.sh/0 | {
"file_path": "autokeras/shell/contributors.sh",
"repo_id": "autokeras",
"token_count": 118
} | 7 |
# keras-cv Two Stage Two-Dimensional Object Detection API
| Status | Proposed |
:-------------- |:---------------------------------------------------- |
| **Author(s)** | Zhenyu Tan ([email protected])|
| **Contributor(s)** | Francois Chollet ([email protected])|
| **Updated** | 2022-08-04 |
## Objective
We aim at providing the core primitive components for training and serving two-stage two-dimensional object
detection models, specifically Faster RCNN.
Pretrained models will also be provided, similar to keras-applications.
## Key Benefits
Two-stage object detection models are state-of-art technique that powers many computer vision tasks, they provide
more accurate detection compared to single-stage models (such as SSD), while maintaining lower inference speed.
With this proposal, Keras users will be able to build end-to-end models with a simple API.
## Design overview
This proposal includes the specific core components for building faster rcnn models. It does not, however, include:
1. Model backbone, such as ResNet, or functions to generate feature maps
2. Detection heads, such as Feature Pyramid
3. metrics utilities such as COCO Evaluator, or visualization utils.
4. primitive components from [single-stage detector]([url](https://github.com/keras-team/governance/blob/master/rfcs/20200928-keras-cv-single-stage-2d-object-detection.md)), we will re-use those components in this design.
Data augmentation with ground truth box processing is currently being developed in KerasCV.
In this document, region of interest (roi) is used interchangeably with region proposal, or simply proposal.
#### Training
Case where a user want to train from scratch:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_cv
# Considering a COCO dataset
coco_dataset = tfds.load('coco/2017')
train_ds, eval_ds = coco_dataset['train'], coco_dataset['validation']
def preprocess(features):
image, gt_boxes, gt_labels = features['image'], features['objects']['bbox'], features['objects']['label']
# preprocess image, gt_boxes, gt_labels, such as flip, resize, and padding, and reserve 0 for background label.
# but a batch of images (typically 2 per GPU) should have same size.
return image, gt_boxes, gt_labels
anchor_generator = keras_cv.ops.AnchorGenerator(anchor_sizes, scales, aspect_ratios, strides)
similarity_calculator = keras_cv.layers.IOUSimilarity()
# positive anchor with IOU > 0.7, negative anchor with IOU <= 0.3
rpn_box_matcher = keras_cv.ops.BoxMatcher([0.7, 0.3])
# positive ROI with IOU > 0.5, negative ROI with IOU <= 0.5
rcnn_box_mather = keras_cv.ops.BoxMatcher(0.5)
target_gather = keras_cv.ops.TargetGather()
box_coder = keras_cv.ops.BoxCoder(offset='sigmoid')
rpn_= keras_cv.layers.ProposalSampler(positive_fraction=0.5, batch_size=256)
rcnn_sampler = keras_cv.layers.ProposalSampler(positive_fraction=0.25, batch_size=128)
rpn_labeler = keras_cv.ops.AnchorLabeler(rpn_sampler, rpn_box_matcher, similarity_calculator, box_coder)
rcnn_labeler = keras_cv.ops.AnchorLabeler(rcnn_sampler, rcnn_box_matcher, similarity_calculator, box_coder)
roi_filter = keras_cv.layers.ROIFilter(pre_nms_top_k=2000, nms_iou_threshold=0.7, test_pre_nms_top_k=1000)
roi_pooler = keras_cv.layers.ROIPooler(output_size=[7, 7])
# Build RPN and ROI Heads, use Keras backbone
backbone = tf.keras.applications.ResNet50()
def encode_rpn_label(image, gt_boxes, gt_labels):
anchor_boxes = anchor_generator(image_size)
cls_targets, box_targets, cls_weights, box_weights = rpn_labeler(anchor_boxes, gt_boxes, gt_labels)
return (gt_boxes, gt_labels, cls_targets, box_targets), (cls_weights, box_weights)
class FasterRCNN(tf.keras.Model):
# includes backbone and feature pyramid head.
def __init__(self, backbone='resnet50_fpn', rpn_head, roi_head, roi_filter, roi_pooler):
# self.backbone = Model Backbone that returns dict of feature map, or Feature Pyramid Network that wraps it
# self.rpn_head = Region Proposal Network that provides objectness scores and bbox offset against anchor boxes
# self.roi_filter = A filter layer that shrinks from a dense predictions to topk sparse predictions based on scores
# self.roi_head = RCNN detection network that provides softmaxed classification score and bbox offset against rois
# self.rpn_cls_loss_fn = a Binary CrossEntropy Keras loss
# self.rpn_reg_loss_fn = a Regression Keras loss, e.g., Huber loss
# self.rcnn_cls_loss_fn = a Binary CrossEntropy Keras loss
# self.rcnn_reg_loss_fn = a Regression Keras loss, e.g., Huber loss
def call(self, image, training=None):
# returns a single or multi level feature maps
feature_map = self.backbone(image, training)
# from the region proposal network, returns the predicted objectness scores
# and class-agnostic offsets relative to anchor boxes
rpn_cls_pred, rpn_bbox_pred = self.rpn_head(feature_map)
# apply offset to anchors and recover proposal in (x1, y1, x2, y2) format
rpn_rois = box_coder.decode_offset(anchors, rpn_bbox_pred)
# select top-k proposals according to objectness scores
rois, cls_pred = self.roi_filter(rpn_rois, rpn_cls_pred)
# pooling feature map with variable sized rois to fixed size feature map
feature_map = self.roi_pooler(feature_map, rois)
# get class independent scores and bounding boxes offsets relative to proposals
rcnn_cls_pred, rcnn_bbox_pred = self.roi_head(feature_map)
if not training:
rcnn_cls_pred, rcnn_bbox_pred = self.nms_detection_decoder(rois, rcnn_cls_pred, rcnn_bbox_pred, image_shape)
return rcnn_cls_pred, rcnn_bbox_pred
return {"rpn_cls_pred": rpn_cls_pred, "rpn_bbox_pred": rpn_bbox_pred, "rois": rois,
"rcnn_cls_pred": rcnn_cls_pred, "rcnn_bbox_pred": rcnn_bbox_pred}
def train_step(self, data):
image, (gt_labels, gt_boxes, rpn_cls_targets, rpn_box_targets), (rpn_cls_weights, rpn_box_weights) = data
# Using approximate joint training instead of alternating training
with tf.GradientTape() as tape:
outputs = self(x, training=True)
# Compute RPN losses using targets from input pipeline, this will normalize by N_cls and N_reg as well
rpn_cls_loss = rpn_cls_loss_fn(rpn_cls_targets, outputs["rpn_cls_pred"], rpn_cls_weights)
rpn_box_loss = rpn_reg_loss_fn(rpn_box_targets, outputs["rpn_boxes_pred"], rpn_box_weights)
# Compute RCNN losses which only picks k-th bbox prediction where k is the predicted class
rois = outputs["rpn_rois"]
rcnn_cls_true, rcnn_box_true, rcnn_cls_weights, rcnn_box_weights = self.rcnn_labeler(rois, gt_boxes, gt_labels)
rcnn_cls_loss = rcnn_cls_loss_fn(rcnn_scores_true, outputs["rcnn_cls_scores"], rcnn_cls_weights)
rcnn_box_loss = rcnn_reg_loss_fn(rcnn_box_true, outputs["rcnn_bbox_offsets"], rcnn_box_weights)
total_loss = rpn_cls_loss + rpn_box_loss + rcnn_cls_loss + rcnn_box_loss
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
return self.compute_metrics(...)
transformed_train_ds = train_ds.map(preprocess).map(encode_rpn_label).batch(128).shuffle(1024)
transformed_eval_ds = eval_ds.map(preprocess).map(encode_rpn_label).batch(128)
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
optimizer = tf.keras.optimizers.SGD(lr_scheduler)
model = RetinaNet()
model.compile(optimizer=optimizer,
loss={'classification': keras_cv.losses.Focal(), 'regression': tf.keras.losses.Huber()},
metrics=[])
model.fit(transformed_train_ds, epochs=100, validation_data=transformed_eval_ds)
model.save(file_path)
```
#### Serving
Case where a user want to serve the trained model for a single image, this will be identical to single-stage object detector.
## Detailed Design
For the rest of the design, we denote `B` as batch size, `N` as the number of ground truth boxes, and `M` as the number
of anchor boxes.
We propose 3 layers and 1 op in this RFC.
#### Layers -- ProposalSampler
Given a dense anchor/proposal set, we propose ProposalSampler layer to for selecting positive and negative proposals according
to the required batch size and positive : negative ratio
boxes or anchor boxes and pass a mask
```python
class ProposalSampler(tf.keras.layers.Layer):
"""Class to select positive and negative proposals."""
def __init__(self, positive_fraction, batch_size, positive_indicator=1, negative_indicator=-1):
"""Initializes ProposalSampler layer.
Args:
positive_fraction: A float number between [0, 1], 0.5 means positive:negative ratio is 1:1
batch_size: the number of samples to generate
positive_indicator: for the inputs to the layer, value for positive proposal, default to 1
negative_indicator: for the inputs to the layer, value for negative proposal, default to -1
"""
def call(self, matched_indicators):
"""Get a balanced positive and negative samples.
Args:
matched_indicators: A int Tensor [N], or [B, N] represent positive or negative values
Returns:
Int tensors with shape [sample_size] or [B, sample_size] representing the selected indices for propsals.
"""
```
#### Layers -- ROIPooler
We propose ROIPooler layer to crop feature maps from proposals
```python
class ROIPooler(tf.keras.layers.Layer):
"""Class to compute extract feature maps from region proposals by quantization."""
def __init__(self, output_size=[7, 7]):
"""Initializes ROIPooler layer.
Args:
output_size: A tuple representing the output height and width.
"""
def call(self, feature_maps, rois):
"""Compute pairwise IOU similarity between ground truth boxes and anchors.
Args:
groundtruth_boxes: A float Tensor [H, W, C] or [B, H, W, C] or dict of multiple levels
rois: A float or int Tensor [M], or [B, M] represent coordinates within [H, W].
Returns:
A float tensor with shape [output_size] or [B, output_size] representing cropped feature maps.
"""
```
#### Layers -- ROIFilter
We propose ROIFilter layer to select top-k proposals based on some score
```python
class ROIFilter(tf.keras.layers.Layer):
"""Class to select top-k proposals based on some score."""
def __init__(self,
pre_nms_top_k: int = 2000,
pre_nms_score_threshold: float = 0.0,
pre_nms_min_size_threshold: float = 0.0,
nms_iou_threshold: float = 0.7,
num_proposals: int = 1000,
test_pre_nms_top_k: int = 1000,
test_pre_nms_score_threshold: float = 0.0,
test_pre_nms_min_size_threshold: float = 0.0,
test_nms_iou_threshold: float = 0.7,
test_num_proposals: int = 1000,
use_batched_nms: bool = False,):
"""Initializes ROIFilter layer.
Args:
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
pre_nms_min_size_threshold: A `float` of the threshold of each side of the
box (w.r.t. the scaled image). Proposals whose sides are below this
threshold are thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
num_proposals: An `int` of the final number of proposals to generate.
test_pre_nms_top_k: An `int` of the number of top scores proposals to be
kept before applying NMS in testing.
test_pre_nms_score_threshold: A `float` of the score threshold to apply
before applying NMS in testing. Proposals whose scores are below this
threshold are thrown away.
test_pre_nms_min_size_threshold: A `float` of the threshold of each side
of the box (w.r.t. the scaled image) in testing. Proposals whose sides
are below this threshold are thrown away.
test_nms_iou_threshold: A `float` in [0, 1] of the NMS IoU threshold in
testing.
test_num_proposals: An `int` of the final number of proposals to generate
in testing.
use_batched_nms: A `bool` of whether or not use
`tf.image.combined_non_max_suppression`.
"""
def call(self, self,
rois: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
image_shape: tf.Tensor):
""".
Args:
rois: A float Tensor [N], or [B, N] represent region proposals.
roi_scores: A float Tensor [N], or [B, N] represent scores for each region.
image_shape: A int tensor [2] or [B, 2] representing image size.
Returns:
roi: A `tf.Tensor` of shape [B, num_proposals, 4], the proposed
ROIs in the scaled image coordinate.
roi_scores: A `tf.Tensor` of shape [B, num_proposals], scores of the
proposed ROIs.
"""
```
#### Ops -- AnchorLabeler
```python
class AnchorLabeler:
"""Labelers that matches ground truth with anchors and proposals."""
def __init__(self,
proposal_sampler,
proposal_matcher,
similarity_calculator,
box_coder):
""".
Args:
proposal_sampler: a ProposalSampler
proposal_matcher: A BoxMatcher
similarity_calculator: Such as IOU layer
box_coder: a BoxCoder that transforms between different formats
"""
def __call__(self, proposals, gt_boxes, gt_labels):
"""
Args:
proposals: a float [N, 4] Tensor represent different proposals.
gt_boxes: a float [M, 4] Tensor represent ground truth boxes.
gt_labels: a int [M] Tensor represent ground truth labels.
Returns:
cls_targets: a int [K] Tensor represent mapped proposal labels from ground truth labels.
box_targets: a float [K, 4] Tensor represent mapped proposal boxes from ground truth boxes.
cls_weights: a float [K] Tensor represent weights for each cls_targets
box_weights: a float [K] or [K, 4] Tensor represent weights for each box_targets
"""
```
## Questions and Discussion Topics
* Should we provide a meta arch for FasterRCNN.
* SHould we provide some default out-of-box RPN Head and ROI Head.
| governance/rfcs/20220804-keras-cv-two-stage-2d-object-detection.md/0 | {
"file_path": "governance/rfcs/20220804-keras-cv-two-stage-2d-object-detection.md",
"repo_id": "governance",
"token_count": 5327
} | 8 |
<!--
If your issue is an implementation question, please ask your question on [StackOverflow](http://stackoverflow.com/questions/tagged/keras) or [join the Keras Slack channel](https://keras-slack-autojoin.herokuapp.com/) and ask there instead of filing a GitHub issue.
The following is a list of frequently asked questions.
- `AttributeError: 'NoneType' object has no attribute 'image_data_format'`
- It is recommended to import with `from keras.applications import model_name` ***not*** `from keras_applications import model_name` because the keras-applications is not a standalone library.
- Or, you can use the keras-applications directly with [a work-around](https://github.com/keras-team/keras-applications/issues/54#issuecomment-530954413).
- `ImportError: cannot import name 'ResNeXt50'`
- The latest releases may not include the latest models.
- If you want to use the bleeding edge version, you can try `pip install -U git+https://github.com/keras-team/keras git+https://github.com/keras-team/keras-applications`.
- Lack of training configuration
- The keras-applications is designed for inference only, so don't provide training details such as data augmentation (e.g., rotating, shifting), optimization hyperparameters (e.g., lr, decay), and a release number of ImageNet used for training.
- For such information, you can check the original repositories shown in the table in README.
-->
### Summary
### Environment
- Python version:
- Keras version:
- Keras-applications version:
- Keras backend with version:
### Logs or source codes for reproduction
| keras-applications/ISSUE_TEMPLATE.md/0 | {
"file_path": "keras-applications/ISSUE_TEMPLATE.md",
"repo_id": "keras-applications",
"token_count": 439
} | 9 |
"""ResNet, ResNetV2, and ResNeXt models for Keras.
# Reference papers
- [Deep Residual Learning for Image Recognition]
(https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award)
- [Identity Mappings in Deep Residual Networks]
(https://arxiv.org/abs/1603.05027) (ECCV 2016)
- [Aggregated Residual Transformations for Deep Neural Networks]
(https://arxiv.org/abs/1611.05431) (CVPR 2017)
# Reference implementations
- [TensorNets]
(https://github.com/taehoonlee/tensornets/blob/master/tensornets/resnets.py)
- [Caffe ResNet]
(https://github.com/KaimingHe/deep-residual-networks/tree/master/prototxt)
- [Torch ResNetV2]
(https://github.com/facebook/fb.resnet.torch/blob/master/models/preresnet.lua)
- [Torch ResNeXt]
(https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from . import get_submodules_from_kwargs
from .imagenet_utils import _obtain_input_shape
backend = None
layers = None
models = None
keras_utils = None
BASE_WEIGHTS_PATH = (
'https://github.com/keras-team/keras-applications/'
'releases/download/resnet/')
WEIGHTS_HASHES = {
'resnet50': ('2cb95161c43110f7111970584f804107',
'4d473c1dd8becc155b73f8504c6f6626'),
'resnet101': ('f1aeb4b969a6efcfb50fad2f0c20cfc5',
'88cf7a10940856eca736dc7b7e228a21'),
'resnet152': ('100835be76be38e30d865e96f2aaae62',
'ee4c566cf9a93f14d82f913c2dc6dd0c'),
'resnet50v2': ('3ef43a0b657b3be2300d5770ece849e0',
'fac2f116257151a9d068a22e544a4917'),
'resnet101v2': ('6343647c601c52e1368623803854d971',
'c0ed64b8031c3730f411d2eb4eea35b5'),
'resnet152v2': ('a49b44d1979771252814e80f8ec446f9',
'ed17cf2e0169df9d443503ef94b23b33'),
'resnext50': ('67a5b30d522ed92f75a1f16eef299d1a',
'62527c363bdd9ec598bed41947b379fc'),
'resnext101': ('34fb605428fcc7aa4d62f44404c11509',
'0f678c91647380debd923963594981b3')
}
def block1(x, filters, kernel_size=3, stride=1,
conv_shortcut=True, name=None):
"""A residual block.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
# Returns
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
if conv_shortcut is True:
shortcut = layers.Conv2D(4 * filters, 1, strides=stride,
name=name + '_0_conv')(x)
shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.Conv2D(filters, kernel_size, padding='SAME',
name=name + '_2_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_3_bn')(x)
x = layers.Add(name=name + '_add')([shortcut, x])
x = layers.Activation('relu', name=name + '_out')(x)
return x
def stack1(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
# Returns
Output tensor for the stacked blocks.
"""
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def block2(x, filters, kernel_size=3, stride=1,
conv_shortcut=False, name=None):
"""A residual block.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
# Returns
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
preact = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_preact_bn')(x)
preact = layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut is True:
shortcut = layers.Conv2D(4 * filters, 1, strides=stride,
name=name + '_0_conv')(preact)
else:
shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = layers.Conv2D(filters, 1, strides=1, use_bias=False,
name=name + '_1_conv')(preact)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.Conv2D(filters, kernel_size, strides=stride,
use_bias=False, name=name + '_2_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.Add(name=name + '_out')([shortcut, x])
return x
def stack2(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
# Returns
Output tensor for the stacked blocks.
"""
x = block2(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = block2(x, filters, name=name + '_block' + str(i))
x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def block3(x, filters, kernel_size=3, stride=1, groups=32,
conv_shortcut=True, name=None):
"""A residual block.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
groups: default 32, group size for grouped convolution.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
# Returns
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
if conv_shortcut is True:
shortcut = layers.Conv2D((64 // groups) * filters, 1, strides=stride,
use_bias=False, name=name + '_0_conv')(x)
shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
c = filters // groups
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c,
use_bias=False, name=name + '_2_conv')(x)
kernel = np.zeros((1, 1, filters * c, filters), dtype=np.float32)
for i in range(filters):
start = (i // c) * c * c + i % c
end = start + c * c
kernel[:, :, start:end:c, i] = 1.
x = layers.Conv2D(filters, 1, use_bias=False, trainable=False,
kernel_initializer={'class_name': 'Constant',
'config': {'value': kernel}},
name=name + '_2_gconv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D((64 // groups) * filters, 1,
use_bias=False, name=name + '_3_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name=name + '_3_bn')(x)
x = layers.Add(name=name + '_add')([shortcut, x])
x = layers.Activation('relu', name=name + '_out')(x)
return x
def stack3(x, filters, blocks, stride1=2, groups=32, name=None):
"""A set of stacked residual blocks.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
groups: default 32, group size for grouped convolution.
name: string, stack label.
# Returns
Output tensor for the stacked blocks.
"""
x = block3(x, filters, stride=stride1, groups=groups, name=name + '_block1')
for i in range(2, blocks + 1):
x = block3(x, filters, groups=groups, conv_shortcut=False,
name=name + '_block' + str(i))
return x
def ResNet(stack_fn,
preact,
use_bias,
model_name='resnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
stack_fn: a function that returns output tensor for the
stacked residual blocks.
preact: whether to use pre-activation or not
(True for ResNetV2, False for ResNet and ResNeXt).
use_bias: whether to use biases for convolutional layers or not
(True for ResNet and ResNetV2, False for ResNeXt).
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
if preact is False:
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name='conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if preact is True:
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
name='post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='probs')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name=model_name)
# Load weights.
if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
if include_top:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = keras_utils.get_file(file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
by_name = True if 'resnext' in model_name else False
model.load_weights(weights_path, by_name=by_name)
elif weights is not None:
model.load_weights(weights)
return model
def ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
x = stack1(x, 512, 3, name='conv5')
return x
return ResNet(stack_fn, False, True, 'resnet50',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
def ResNet101(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 23, name='conv4')
x = stack1(x, 512, 3, name='conv5')
return x
return ResNet(stack_fn, False, True, 'resnet101',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
def ResNet152(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 8, name='conv3')
x = stack1(x, 256, 36, name='conv4')
x = stack1(x, 512, 3, name='conv5')
return x
return ResNet(stack_fn, False, True, 'resnet152',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
def ResNet50V2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack2(x, 64, 3, name='conv2')
x = stack2(x, 128, 4, name='conv3')
x = stack2(x, 256, 6, name='conv4')
x = stack2(x, 512, 3, stride1=1, name='conv5')
return x
return ResNet(stack_fn, True, True, 'resnet50v2',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
def ResNet101V2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack2(x, 64, 3, name='conv2')
x = stack2(x, 128, 4, name='conv3')
x = stack2(x, 256, 23, name='conv4')
x = stack2(x, 512, 3, stride1=1, name='conv5')
return x
return ResNet(stack_fn, True, True, 'resnet101v2',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
def ResNet152V2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack2(x, 64, 3, name='conv2')
x = stack2(x, 128, 8, name='conv3')
x = stack2(x, 256, 36, name='conv4')
x = stack2(x, 512, 3, stride1=1, name='conv5')
return x
return ResNet(stack_fn, True, True, 'resnet152v2',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
def ResNeXt50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack3(x, 128, 3, stride1=1, name='conv2')
x = stack3(x, 256, 4, name='conv3')
x = stack3(x, 512, 6, name='conv4')
x = stack3(x, 1024, 3, name='conv5')
return x
return ResNet(stack_fn, False, False, 'resnext50',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
def ResNeXt101(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
def stack_fn(x):
x = stack3(x, 128, 3, stride1=1, name='conv2')
x = stack3(x, 256, 4, name='conv3')
x = stack3(x, 512, 23, name='conv4')
x = stack3(x, 1024, 3, name='conv5')
return x
return ResNet(stack_fn, False, False, 'resnext101',
include_top, weights,
input_tensor, input_shape,
pooling, classes,
**kwargs)
setattr(ResNet50, '__doc__', ResNet.__doc__)
setattr(ResNet101, '__doc__', ResNet.__doc__)
setattr(ResNet152, '__doc__', ResNet.__doc__)
setattr(ResNet50V2, '__doc__', ResNet.__doc__)
setattr(ResNet101V2, '__doc__', ResNet.__doc__)
setattr(ResNet152V2, '__doc__', ResNet.__doc__)
setattr(ResNeXt50, '__doc__', ResNet.__doc__)
setattr(ResNeXt101, '__doc__', ResNet.__doc__)
| keras-applications/keras_applications/resnet_common.py/0 | {
"file_path": "keras-applications/keras_applications/resnet_common.py",
"repo_id": "keras-applications",
"token_count": 11093
} | 10 |
site_name: "Keras-contrib Documentation"
generate:
- layers/core.md:
- keras_contrib.layers.CosineDense
- layers/convolutional.md:
- keras_contrib.layers.CosineConv2D
- keras_contrib.layers.SubPixelUpscaling
- layers/normalization.md:
- keras_contrib.layers.InstanceNormalization
- keras_contrib.layers.GroupNormalization
- layers/advanced-activations.md:
- keras_contrib.layers.SineReLU
- keras_contrib.layers.SReLU
- keras_contrib.layers.Swish
- keras_contrib.layers.PELU
- layers/crf.md:
- keras_contrib.layers.CRF
- losses.md:
- keras_contrib.losses:
- keras_contrib.losses.DSSIMObjective
- keras_contrib.losses.jaccard_distance
- keras_contrib.losses.crf_loss
- keras_contrib.losses.crf_nll
- optimizers.md:
- keras_contrib.optimizers:
- keras_contrib.optimizers.FTML
- keras_contrib.optimizers.Padam
- keras_contrib.optimizers.Yogi
- keras_contrib.optimizers.LARS
- callbacks.md:
- keras_contrib.callbacks:
- keras_contrib.callbacks.TensorBoardGrouped
- keras_contrib.callbacks.CyclicLR
- keras_contrib.callbacks.SnapshotCallbackBuilder
- keras_contrib.callbacks.SnapshotModelCheckpoint
- keras_contrib.callbacks.DeadReluDetector
pages:
- Home: index.md << ../README.md
- layers:
- Core layers: layers/core.md
- Convolutional layers: layers/convolutional.md
- normalization layers: layers/normalization.md
- Advanced activations layers: layers/advanced-activations.md
- CRF layers: layers/crf.md
- Losses: losses.md
- Optimizers: optimizers.md
- Callbacks: callbacks.md
headers: html
| keras-contrib/contrib_docs/pydocmd.yml/0 | {
"file_path": "keras-contrib/contrib_docs/pydocmd.yml",
"repo_id": "keras-contrib",
"token_count": 720
} | 11 |
import os
import sys
list_conversions = [('import keras.', 'import tensorflow.keras.'),
('import keras ', 'from tensorflow import keras '),
('import keras\n', 'from tensorflow import keras\n'),
('from keras.', 'from tensorflow.keras.'),
('from keras ', 'from tensorflow.keras ')]
def replace_imports_in_text(string, revert):
if revert:
list_imports_to_change = [x[::-1] for x in list_conversions]
else:
list_imports_to_change = list_conversions
text_updated = string
for old_str, new_str in list_imports_to_change:
text_updated = text_updated.replace(old_str, new_str)
return text_updated
def replace_imports_in_file(file_path, revert):
if not file_path.endswith('.py'):
return False
if os.path.abspath(file_path) == os.path.abspath(__file__):
return False
with open(file_path, 'r') as f:
text = f.read()
text_updated = replace_imports_in_text(text, revert)
with open(file_path, 'w+') as f:
f.write(text_updated)
return text_updated != text
def convert_codebase(revert):
nb_of_files_changed = 0
keras_dir = os.path.dirname(os.path.abspath(__file__))
for root, dirs, files in os.walk(keras_dir):
for name in files:
if replace_imports_in_file(os.path.join(root, name), revert):
nb_of_files_changed += 1
print('Changed imports in ' + str(nb_of_files_changed) + ' files.')
print('Those files were found in the directory ' + keras_dir)
def convert_to_tf_keras():
"""Convert the codebase to tf.keras"""
convert_codebase(False)
def convert_to_keras_team_keras():
"""Convert the codebase from tf.keras to keras-team/keras"""
convert_codebase(True)
def test_replace_imports():
python_code = """
import keras
from keras import backend as K
import os
import keras_contrib
import keras_contrib.layers as lay
import keras.layers
from keras.layers import Dense
if K.backend() == 'tensorflow':
import tensorflow as tf
function = tf.max
"""
expected_code = """
from tensorflow import keras
from tensorflow.keras import backend as K
import os
import keras_contrib
import keras_contrib.layers as lay
import tensorflow.keras.layers
from tensorflow.keras.layers import Dense
if K.backend() == 'tensorflow':
import tensorflow as tf
function = tf.max
"""
code_with_replacement = replace_imports_in_text(python_code, False)
assert expected_code == code_with_replacement
assert python_code == replace_imports_in_text(code_with_replacement, True)
if __name__ == '__main__':
if '--revert' in sys.argv:
convert_to_keras_team_keras()
else:
convert_to_tf_keras()
| keras-contrib/convert_to_tf_keras.py/0 | {
"file_path": "keras-contrib/convert_to_tf_keras.py",
"repo_id": "keras-contrib",
"token_count": 1208
} | 12 |
"""Collection of NASNet models
The reference paper:
- [Learning Transferable Architectures for Scalable Image Recognition]
(https://arxiv.org/abs/1707.07012)
The reference implementation:
1. TF Slim
- https://github.com/tensorflow/models/blob/master/research/slim/nets/
nasnet/nasnet.py
2. TensorNets
- https://github.com/taehoonlee/tensornets/blob/master/tensornets/nasnets.py
3. Weights
- https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
from keras.models import Model
from keras.layers import Input
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import Conv2D
from keras.layers import SeparableConv2D
from keras.layers import ZeroPadding2D
from keras.layers import Cropping2D
from keras.layers import concatenate
from keras.layers import add
from keras.regularizers import l2
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras_applications.imagenet_utils import _obtain_input_shape
from keras import backend as K
_BN_DECAY = 0.9997
_BN_EPSILON = 1e-3
NASNET_MOBILE_WEIGHT_PATH = (
"https://github.com/titu1994/Keras-NASNet/"
"releases/download/v1.0/NASNet-mobile.h5")
NASNET_MOBILE_WEIGHT_PATH_NO_TOP = (
"https://github.com/titu1994/Keras-NASNet/"
"releases/download/v1.0/NASNet-mobile-no-top.h5")
NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY = (
"https://github.com/titu1994/Keras-NASNet/"
"releases/download/v1.0/NASNet-auxiliary-mobile.h5")
NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY_NO_TOP = (
"https://github.com/titu1994/Keras-NASNet/"
"releases/download/v1.0/NASNet-auxiliary-mobile-no-top.h5")
NASNET_LARGE_WEIGHT_PATH = (
"https://github.com/titu1994/Keras-NASNet/releases/download/v1.1/NASNet-large.h5")
NASNET_LARGE_WEIGHT_PATH_NO_TOP = (
"https://github.com/titu1994/Keras-NASNet/"
"releases/download/v1.1/NASNet-large-no-top.h5")
NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary = (
"https://github.com/titu1994/Keras-NASNet/"
"releases/download/v1.1/NASNet-auxiliary-large.h5")
NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary_NO_TOP = (
"https://github.com/titu1994/Keras-NASNet/"
"releases/download/v1.1/NASNet-auxiliary-large-no-top.h5")
def NASNet(input_shape=None,
penultimate_filters=4032,
nb_blocks=6,
stem_filters=96,
initial_reduction=True,
skip_reduction_layer_input=True,
use_auxiliary_branch=False,
filters_multiplier=2,
dropout=0.5,
weight_decay=5e-5,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000,
default_size=None,
activation='softmax'):
"""Instantiates a NASNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge or
`(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
penultimate_filters: number of filters in the penultimate layer.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
nb_blocks: number of repeated blocks of the NASNet model.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
stem_filters: number of filters in the initial stem block
initial_reduction: Whether to perform the reduction step at the beginning
end of the network. Set to `True` for CIFAR models.
skip_reduction_layer_input: Determines whether to skip the reduction layers
when calculating the previous layer to connect to.
use_auxiliary_branch: Whether to use the auxiliary branch during
training or evaluation.
filters_multiplier: controls the width of the network.
- If `filters_multiplier` < 1.0, proportionally decreases the number
of filters in each layer.
- If `filters_multiplier` > 1.0, proportionally increases the number
of filters in each layer.
- If `filters_multiplier` = 1, default number of filters from the paper
are used at each layer.
dropout: dropout rate
weight_decay: l2 regularization weight
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
default_size: specifies the default image size of the model
activation: Type of activation at the top layer.
Can be one of 'softmax' or 'sigmoid'.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only Tensorflow backend is currently supported, '
'as other backends do not support '
'separable convolution.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
if default_size is None:
default_size = 331
# Determine proper input shape and default size.
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top or weights)
if K.image_data_format() != 'channels_last':
warnings.warn('The NASNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
assert penultimate_filters % 24 == 0, "`penultimate_filters` needs to be " \
"divisible by 24."
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
filters = penultimate_filters // 24
if initial_reduction:
x = Conv2D(stem_filters, (3, 3), strides=(2, 2), padding='valid',
use_bias=False, name='stem_conv1', kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(img_input)
else:
x = Conv2D(stem_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False,
name='stem_conv1', kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
name='stem_bn1')(x)
p = None
if initial_reduction: # imagenet / mobile mode
x, p = _reduction_A(x, p, filters // (filters_multiplier ** 2), weight_decay,
id='stem_1')
x, p = _reduction_A(x, p, filters // filters_multiplier, weight_decay,
id='stem_2')
for i in range(nb_blocks):
x, p = _normal_A(x, p, filters, weight_decay, id='%d' % i)
x, p0 = _reduction_A(x, p, filters * filters_multiplier, weight_decay,
id='reduce_%d' % nb_blocks)
p = p0 if not skip_reduction_layer_input else p
for i in range(nb_blocks):
x, p = _normal_A(x, p, filters * filters_multiplier, weight_decay,
id='%d' % (nb_blocks + i + 1))
auxiliary_x = None
if not initial_reduction: # imagenet / mobile mode
if use_auxiliary_branch:
auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
include_top, activation)
x, p0 = _reduction_A(x, p, filters * filters_multiplier ** 2, weight_decay,
id='reduce_%d' % (2 * nb_blocks))
if initial_reduction: # CIFAR mode
if use_auxiliary_branch:
auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
include_top, activation)
p = p0 if not skip_reduction_layer_input else p
for i in range(nb_blocks):
x, p = _normal_A(x, p, filters * filters_multiplier ** 2, weight_decay,
id='%d' % (2 * nb_blocks + i + 1))
x = Activation('relu')(x)
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dropout(dropout)(x)
x = Dense(classes, activation=activation,
kernel_regularizer=l2(weight_decay), name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if use_auxiliary_branch:
model = Model(inputs, [x, auxiliary_x], name='NASNet_with_auxiliary')
else:
model = Model(inputs, x, name='NASNet')
# load weights
if weights == 'imagenet':
if default_size == 224: # mobile version
if include_top:
if use_auxiliary_branch:
weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY
model_name = 'nasnet_mobile_with_aux.h5'
else:
weight_path = NASNET_MOBILE_WEIGHT_PATH
model_name = 'nasnet_mobile.h5'
else:
if use_auxiliary_branch:
weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY_NO_TOP
model_name = 'nasnet_mobile_with_aux_no_top.h5'
else:
weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP
model_name = 'nasnet_mobile_no_top.h5'
weights_file = get_file(model_name, weight_path, cache_subdir='models')
model.load_weights(weights_file, by_name=True)
elif default_size == 331: # large version
if include_top:
if use_auxiliary_branch:
weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary
model_name = 'nasnet_large_with_aux.h5'
else:
weight_path = NASNET_LARGE_WEIGHT_PATH
model_name = 'nasnet_large.h5'
else:
if use_auxiliary_branch:
weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary_NO_TOP
model_name = 'nasnet_large_with_aux_no_top.h5'
else:
weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP
model_name = 'nasnet_large_no_top.h5'
weights_file = get_file(model_name, weight_path, cache_subdir='models')
model.load_weights(weights_file, by_name=True)
else:
raise ValueError('ImageNet weights can only be loaded on NASNetLarge '
'or NASNetMobile')
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def NASNetLarge(input_shape=(331, 331, 3),
dropout=0.5,
weight_decay=5e-5,
use_auxiliary_branch=False,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
"""Instantiates a NASNet architecture in ImageNet mode.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
use_auxiliary_branch: Whether to use the auxiliary branch during
training or evaluation.
dropout: dropout rate
weight_decay: l2 regularization weight
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
default_size: specifies the default image size of the model
activation: Type of activation at the top layer.
Can be one of 'softmax' or 'sigmoid'.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
global _BN_DECAY, _BN_EPSILON
_BN_DECAY = 0.9997
_BN_EPSILON = 1e-3
return NASNet(input_shape,
penultimate_filters=4032,
nb_blocks=6,
stem_filters=96,
initial_reduction=True,
skip_reduction_layer_input=True,
use_auxiliary_branch=use_auxiliary_branch,
filters_multiplier=2,
dropout=dropout,
weight_decay=weight_decay,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=331,
activation=activation)
def NASNetMobile(input_shape=(224, 224, 3),
dropout=0.5,
weight_decay=4e-5,
use_auxiliary_branch=False,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
"""Instantiates a NASNet architecture in Mobile ImageNet mode.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
use_auxiliary_branch: Whether to use the auxiliary branch during
training or evaluation.
dropout: dropout rate
weight_decay: l2 regularization weight
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
default_size: specifies the default image size of the model
activation: Type of activation at the top layer.
Can be one of 'softmax' or 'sigmoid'.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
global _BN_DECAY, _BN_EPSILON
_BN_DECAY = 0.9997
_BN_EPSILON = 1e-3
return NASNet(input_shape,
penultimate_filters=1056,
nb_blocks=4,
stem_filters=32,
initial_reduction=True,
skip_reduction_layer_input=False,
use_auxiliary_branch=use_auxiliary_branch,
filters_multiplier=2,
dropout=dropout,
weight_decay=weight_decay,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224)
def NASNetCIFAR(input_shape=(32, 32, 3),
dropout=0.0,
weight_decay=5e-4,
use_auxiliary_branch=False,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=10,
activation='softmax'):
"""Instantiates a NASNet architecture in CIFAR mode.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(32, 32, 3)` would be one valid value.
use_auxiliary_branch: Whether to use the auxiliary branch during
training or evaluation.
dropout: dropout rate
weight_decay: l2 regularization weight
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
default_size: specifies the default image size of the model
activation: Type of activation at the top layer.
Can be one of 'softmax' or 'sigmoid'.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
global _BN_DECAY, _BN_EPSILON
_BN_DECAY = 0.9
_BN_EPSILON = 1e-5
return NASNet(input_shape,
penultimate_filters=768,
nb_blocks=6,
stem_filters=32,
initial_reduction=False,
skip_reduction_layer_input=False,
use_auxiliary_branch=use_auxiliary_branch,
filters_multiplier=2,
dropout=dropout,
weight_decay=weight_decay,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224,
activation=activation)
def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1),
weight_decay=5e-5, id=None):
'''Adds 2 blocks of [relu-separable conv-batchnorm]
# Arguments:
ip: input tensor
filters: number of output filters per layer
kernel_size: kernel size of separable convolutions
strides: strided convolution for downsampling
weight_decay: l2 regularization weight
id: string id
# Returns:
a Keras tensor
'''
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('separable_conv_block_%s' % id):
x = Activation('relu')(ip)
x = SeparableConv2D(filters, kernel_size, strides=strides,
name='separable_conv_1_%s' % id, padding='same',
use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
epsilon=_BN_EPSILON,
name="separable_conv_1_bn_%s" % id)(x)
x = Activation('relu')(x)
x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,
padding='same', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
epsilon=_BN_EPSILON,
name="separable_conv_2_bn_%s" % id)(x)
return x
def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):
'''
Adjusts the input `p` to match the shape of the `input`
or situations where the output number of filters needs to
be changed
# Arguments:
p: input tensor which needs to be modified
ip: input tensor whose shape needs to be matched
filters: number of output filters to be matched
weight_decay: l2 regularization weight
id: string id
# Returns:
an adjusted Keras tensor
'''
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
img_dim = 2 if K.image_data_format() == 'channels_first' else -2
with K.name_scope('adjust_block'):
if p is None:
p = ip
elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:
with K.name_scope('adjust_reduction_block_%s' % id):
p = Activation('relu', name='adjust_relu_1_%s' % id)(p)
p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid',
name='adjust_avg_pool_1_%s' % id)(p)
p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay),
name='adjust_conv_1_%s' % id,
kernel_initializer='he_normal')(p1)
p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid',
name='adjust_avg_pool_2_%s' % id)(p2)
p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay),
name='adjust_conv_2_%s' % id,
kernel_initializer='he_normal')(p2)
p = concatenate([p1, p2], axis=channel_dim)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
elif p._keras_shape[channel_dim] != filters:
with K.name_scope('adjust_projection_block_%s' % id):
p = Activation('relu')(p)
p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same',
name='adjust_conv_projection_%s' % id, use_bias=False,
kernel_regularizer=l2(weight_decay),
kernel_initializer='he_normal')(p)
p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
epsilon=_BN_EPSILON,
name='adjust_bn_%s' % id)(p)
return p
def _normal_A(ip, p, filters, weight_decay=5e-5, id=None):
'''Adds a Normal cell for NASNet-A (Fig. 4 in the paper)
# Arguments:
ip: input tensor `x`
p: input tensor `p`
filters: number of output filters
weight_decay: l2 regularization weight
id: string id
# Returns:
a Keras tensor
'''
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('normal_A_block_%s' % id):
p = _adjust_block(p, ip, filters, weight_decay, id)
h = Activation('relu')(ip)
h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same',
name='normal_conv_1_%s' % id, use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(h)
h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
epsilon=_BN_EPSILON, name='normal_bn_1_%s' % id)(h)
with K.name_scope('block_1'):
x1_1 = _separable_conv_block(h, filters, kernel_size=(5, 5),
weight_decay=weight_decay,
id='normal_left1_%s' % id)
x1_2 = _separable_conv_block(p, filters, weight_decay=weight_decay,
id='normal_right1_%s' % id)
x1 = add([x1_1, x1_2], name='normal_add_1_%s' % id)
with K.name_scope('block_2'):
x2_1 = _separable_conv_block(p, filters, (5, 5), weight_decay=weight_decay,
id='normal_left2_%s' % id)
x2_2 = _separable_conv_block(p, filters, (3, 3), weight_decay=weight_decay,
id='normal_right2_%s' % id)
x2 = add([x2_1, x2_2], name='normal_add_2_%s' % id)
with K.name_scope('block_3'):
x3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same',
name='normal_left3_%s' % id)(h)
x3 = add([x3, p], name='normal_add_3_%s' % id)
with K.name_scope('block_4'):
x4_1 = AveragePooling2D((3, 3), strides=(1, 1), padding='same',
name='normal_left4_%s' % id)(p)
x4_2 = AveragePooling2D((3, 3), strides=(1, 1), padding='same',
name='normal_right4_%s' % id)(p)
x4 = add([x4_1, x4_2], name='normal_add_4_%s' % id)
with K.name_scope('block_5'):
x5 = _separable_conv_block(h, filters, weight_decay=weight_decay,
id='normal_left5_%s' % id)
x5 = add([x5, h], name='normal_add_5_%s' % id)
x = concatenate([p, x1, x2, x3, x4, x5], axis=channel_dim,
name='normal_concat_%s' % id)
return x, ip
def _reduction_A(ip, p, filters, weight_decay=5e-5, id=None):
'''Adds a Reduction cell for NASNet-A (Fig. 4 in the paper)
# Arguments:
ip: input tensor `x`
p: input tensor `p`
filters: number of output filters
weight_decay: l2 regularization weight
id: string id
# Returns:
a Keras tensor
'''
""""""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('reduction_A_block_%s' % id):
p = _adjust_block(p, ip, filters, weight_decay, id)
h = Activation('relu')(ip)
h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same',
name='reduction_conv_1_%s' % id, use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(h)
h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
epsilon=_BN_EPSILON,
name='reduction_bn_1_%s' % id)(h)
with K.name_scope('block_1'):
x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2),
weight_decay=weight_decay,
id='reduction_left1_%s' % id)
x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2),
weight_decay=weight_decay,
id='reduction_1_%s' % id)
x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % id)
with K.name_scope('block_2'):
x2_1 = MaxPooling2D((3, 3), strides=(2, 2), padding='same',
name='reduction_left2_%s' % id)(h)
x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2),
weight_decay=weight_decay,
id='reduction_right2_%s' % id)
x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % id)
with K.name_scope('block_3'):
x3_1 = AveragePooling2D((3, 3), strides=(2, 2), padding='same',
name='reduction_left3_%s' % id)(h)
x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2),
weight_decay=weight_decay,
id='reduction_right3_%s' % id)
x3 = add([x3_1, x3_2], name='reduction_add3_%s' % id)
with K.name_scope('block_4'):
x4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same',
name='reduction_left4_%s' % id)(x1)
x4 = add([x2, x4])
with K.name_scope('block_5'):
x5_1 = _separable_conv_block(x1, filters, (3, 3),
weight_decay=weight_decay,
id='reduction_left4_%s' % id)
x5_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='same',
name='reduction_right5_%s' % id)(h)
x5 = add([x5_1, x5_2], name='reduction_add4_%s' % id)
x = concatenate([x2, x3, x4, x5], axis=channel_dim,
name='reduction_concat_%s' % id)
return x, ip
def _add_auxiliary_head(x, classes, weight_decay, pooling, include_top, activation):
'''Adds an auxiliary head for training the model
From section A.7 "Training of ImageNet models" of the paper, all NASNet models are
trained using an auxiliary classifier around 2/3 of the depth of the network, with
a loss weight of 0.4
# Arguments
x: input tensor
classes: number of output classes
weight_decay: l2 regularization weight
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
include_top: whether to include the fully-connected
layer at the top of the network.
activation: Type of activation at the top layer.
Can be one of 'softmax' or 'sigmoid'.
# Returns
a keras Tensor
'''
img_height = 1 if K.image_data_format() == 'channels_last' else 2
img_width = 2 if K.image_data_format() == 'channels_last' else 3
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('auxiliary_branch'):
auxiliary_x = Activation('relu')(x)
auxiliary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid',
name='aux_pool')(auxiliary_x)
auxiliary_x = Conv2D(128, (1, 1), padding='same', use_bias=False,
name='aux_conv_projection', kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay))(auxiliary_x)
auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY,
epsilon=_BN_EPSILON,
name='aux_bn_projection')(auxiliary_x)
auxiliary_x = Activation('relu')(auxiliary_x)
auxiliary_x = Conv2D(768, (auxiliary_x._keras_shape[img_height],
auxiliary_x._keras_shape[img_width]),
padding='valid', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name='aux_conv_reduction')(auxiliary_x)
auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY,
epsilon=_BN_EPSILON,
name='aux_bn_reduction')(auxiliary_x)
auxiliary_x = Activation('relu')(auxiliary_x)
if include_top:
auxiliary_x = Flatten()(auxiliary_x)
auxiliary_x = Dense(classes, activation=activation,
kernel_regularizer=l2(weight_decay),
name='aux_predictions')(auxiliary_x)
else:
if pooling == 'avg':
auxiliary_x = GlobalAveragePooling2D()(auxiliary_x)
elif pooling == 'max':
auxiliary_x = GlobalMaxPooling2D()(auxiliary_x)
return auxiliary_x
| keras-contrib/keras_contrib/applications/nasnet.py/0 | {
"file_path": "keras-contrib/keras_contrib/applications/nasnet.py",
"repo_id": "keras-contrib",
"token_count": 19407
} | 13 |
#!/usr/bin/env python
# coding=utf-8
"""
This is a script for downloading and converting the microsoft coco dataset
from mscoco.org. This can be run as an independent executable to download
the dataset or be imported by scripts used for larger experiments.
"""
from __future__ import division, print_function, unicode_literals
import os
import errno
import zipfile
import json
from sacred import Experiment, Ingredient
import numpy as np
from PIL import Image
from keras.utils import get_file
from keras.utils.generic_utils import Progbar
from pycocotools.coco import COCO
def palette():
max_cid = max(ids()) + 1
return [(cid, cid, cid) for cid in range(max_cid)]
def cids_to_ids_map():
return {cid: idx for idx, cid in enumerate(ids())}
def ids():
return [0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73,
74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def id_to_palette_map():
return {idx: color for idx, color in enumerate(palette())}
# return {0: (0, 0, 0), idx: (idx, idx, idx)
# for idx, _ in enumerate(categories())}
def cid_to_palette_map():
return {ids()[idx]: color for idx, color in enumerate(palette())}
def palette_to_id_map():
return {color: ids()[idx] for idx, color in enumerate(palette())}
# return {(0, 0, 0): 0, (idx, idx, idx): idx
# for idx, _ in enumerate(categories())}
def class_weight(image_segmentation_stats_file=None,
weighting_algorithm='total_pixels_p_complement'):
# weights = defaultdict(lambda: 1.5)
if image_segmentation_stats_file is None:
weights = {i: 1.5 for i in ids()}
weights[0] = 0.5
return weights
else:
with open(image_segmentation_stats_file, 'r') as fjson:
stats = json.loads(fjson)
return stats[weighting_algorithm]
def mask_to_palette_map(cid):
mapper = id_to_palette_map()
return {0: mapper[0], 255: mapper[cid]}
def categories(): # 80 classes
return ['background', # class zero
'person', 'bicycle', 'car', 'motorcycle',
'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite',
'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle',
'wine glass', 'cup', 'fork', 'knife',
'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
def id_to_category(category_id):
return {cid: categories()[idx] for idx, cid in enumerate(ids())}[category_id]
def category_to_cid_map():
return {category: ids()[idx] for idx, category in enumerate(categories())}
def mkdir_p(path):
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# ============== Ingredient 2: dataset =======================
data_coco = Experiment("dataset")
@data_coco.config
def coco_config():
# TODO(ahundt) add md5 sums for each file
verbose = 1
coco_api = 'https://github.com/pdollar/coco/'
dataset_root = os.path.join(os.path.expanduser('~'), 'datasets')
dataset_path = os.path.join(dataset_root, 'coco')
urls = [
'coco2014/train2014.zip',
'coco2014/val2014.zip',
'coco2014/test2014.zip',
'coco2015/test2015.zip',
'annotations-1-0-3/instances_train-val2014.zip',
'annotations-1-0-3/person_keypoints_trainval2014.zip',
'annotations-1-0-4/image_info_test2014.zip',
'annotations-1-0-4/image_info_test2015.zip',
'annotations-1-0-3/captions_train-val2014.zip'
]
base_url = 'http://msvocds.blob.core.windows.net/'
urls = [base_url + x for x in urls]
data_prefixes = [
'train2014',
'val2014',
'test2014',
'test2015',
]
image_filenames = [prefix + '.zip' for prefix in data_prefixes]
annotation_filenames = [
'instances_train-val2014.zip', # training AND validation info
'image_info_test2014.zip', # basic info like download links + category
'image_info_test2015.zip', # basic info like download links + category
'person_keypoints_trainval2014.zip', # elbows, head, wrist etc
'captions_train-val2014.zip', # descriptions of images
]
md5s = [
'0da8c0bd3d6becc4dcb32757491aca88', # train2014.zip
'a3d79f5ed8d289b7a7554ce06a5782b3', # val2014.zip
'04127eef689ceac55e3a572c2c92f264', # test2014.zip
'65562e58af7d695cc47356951578c041', # test2015.zip
'59582776b8dd745d649cd249ada5acf7', # instances_train-val2014.zip
'926b9df843c698817ee62e0e049e3753', # person_keypoints_trainval2014.zip
'f3366b66dc90d8ae0764806c95e43c86', # image_info_test2014.zip
'8a5ad1a903b7896df7f8b34833b61757', # image_info_test2015.zip
'5750999c8c964077e3c81581170be65b' # captions_train-val2014.zip
]
filenames = image_filenames + annotation_filenames
seg_mask_path = os.path.join(dataset_path, 'seg_mask')
annotation_json = [
'annotations/instances_train2014.json',
'annotations/instances_val2014.json'
]
annotation_paths = [os.path.join(dataset_path, postfix)
for postfix in annotation_json]
# only first two data prefixes contain segmentation masks
seg_mask_image_paths = [os.path.join(dataset_path, prefix)
for prefix in data_prefixes[0:1]]
seg_mask_output_paths = [os.path.join(seg_mask_path, prefix)
for prefix in data_prefixes[0:1]]
seg_mask_extensions = ['.npy' for prefix in data_prefixes[0:1]]
image_dirs = [os.path.join(dataset_path, prefix) for prefix in data_prefixes]
image_extensions = ['.jpg' for prefix in data_prefixes]
voc_imageset_txt_paths = [os.path.join(dataset_path,
'annotations', prefix + '.txt')
for prefix in data_prefixes]
@data_coco.capture
def coco_files(dataset_path, filenames, dataset_root, urls, md5s, annotation_paths):
print(dataset_path)
print(dataset_root)
print(urls)
print(filenames)
print(md5s)
print(annotation_paths)
return [os.path.join(dataset_path, file) for file in filenames]
@data_coco.command
def print_coco_files(dataset_path, filenames, dataset_root,
urls, md5s, annotation_paths):
coco_files(dataset_path, filenames, dataset_root, urls, md5s, annotation_paths)
@data_coco.command
def coco_download(dataset_path, filenames, dataset_root,
urls, md5s, annotation_paths):
zip_paths = coco_files(dataset_path, filenames, dataset_root,
urls, md5s, annotation_paths)
for url, filename, md5 in zip(urls, filenames, md5s):
path = get_file(filename, url, md5_hash=md5,
extract=True, cache_subdir=dataset_path)
# TODO(ahundt) check if it is already extracted, don't re-extract. see
# https://github.com/fchollet/keras/issues/5861
zip_file = zipfile.ZipFile(path, 'r')
zip_file.extractall(path=dataset_path)
zip_file.close()
@data_coco.command
def coco_json_to_segmentation(seg_mask_output_paths,
annotation_paths, seg_mask_image_paths, verbose):
for (seg_mask_path, annFile, image_path) in zip(
seg_mask_output_paths, annotation_paths, seg_mask_image_paths):
print('Loading COCO Annotations File: ', annFile)
print('Segmentation Mask Output Folder: ', seg_mask_path)
print('Source Image Folder: ', image_path)
print('\n'
'WARNING: Each pixel can have multiple classes! That means'
'class data overlaps. Also, single objects can be outlined'
'multiple times because they were labeled by different people!'
'In other words, even a single object may be segmented twice.'
'This means the .png files are missing entire objects.\n\n'
'Use of categorical one-hot encoded .npy files is recommended,'
'but .npy files also have limitations, because the .npy files'
'only have one label per pixel for each class,'
'and currently take the union of multiple human class labels.'
'Improving how your data is handled will improve your results'
'so remember to consider that limitation. There is still'
'an opportunity to improve how this training data is handled &'
'integrated with your training scripts and utilities...')
coco = COCO(annFile)
print('Converting Annotations to Segmentation Masks...')
mkdir_p(seg_mask_path)
total_imgs = len(coco.imgToAnns.keys())
progbar = Progbar(total_imgs + len(coco.getImgIds()), verbose=verbose)
# 'annotations' was previously 'instances' in an old version
for img_num in range(total_imgs):
# Both [0]'s are used to extract the element from a list
img = coco.loadImgs(
coco.imgToAnns[coco.imgToAnns.keys()[img_num]][0]['image_id'])[0]
h = img['height']
w = img['width']
name = img['file_name']
root_name = name[:-4]
filename = os.path.join(seg_mask_path, root_name + ".png")
file_exists = os.path.exists(filename)
if file_exists:
progbar.update(img_num, [('file_fraction_already_exists', 1)])
continue
else:
progbar.update(img_num, [('file_fraction_already_exists', 0)])
print(filename)
MASK = np.zeros((h, w), dtype=np.uint8)
np.where(MASK > 0)
for ann in coco.imgToAnns[coco.imgToAnns.keys()[img_num]]:
mask = coco.annToMask(ann)
idxs = np.where(mask > 0)
MASK[idxs] = ann['category_id']
im = Image.fromarray(MASK)
im.save(filename)
print('\nConverting Annotations to one hot encoded'
'categorical .npy Segmentation Masks...')
img_ids = coco.getImgIds()
use_original_dims = True # not target_shape
for idx, img_id in enumerate(img_ids):
img = coco.loadImgs(img_id)[0]
name = img['file_name']
root_name = name[:-4]
filename = os.path.join(seg_mask_path, root_name + ".npy")
file_exists = os.path.exists(filename)
if file_exists:
progbar.add(1, [('file_fraction_already_exists', 1)])
continue
else:
progbar.add(1, [('file_fraction_already_exists', 0)])
if use_original_dims:
target_shape = (img['height'], img['width'], max(ids()) + 1)
ann_ids = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(ann_ids)
mask_one_hot = np.zeros(target_shape, dtype=np.uint8)
mask_one_hot[:, :, 0] = 1 # every pixel begins as background
# mask_one_hot = cv2.resize(mask_one_hot,
# target_shape[:2],
# interpolation=cv2.INTER_NEAREST)
for ann in anns:
mask_partial = coco.annToMask(ann)
# mask_partial = cv2.resize(mask_partial,
# (target_shape[1], target_shape[0]),
# interpolation=cv2.INTER_NEAREST)
# # width and height match
# assert mask_one_hot.shape[:2] == mask_partial.shape[:2]
# print('another shape:',
# mask_one_hot[mask_partial > 0].shape)
mask_one_hot[mask_partial > 0, ann['category_id']] = 1
mask_one_hot[mask_partial > 0, 0] = 0
np.save(filename, mask_one_hot)
@data_coco.command
def coco_to_pascal_voc_imageset_txt(voc_imageset_txt_paths, image_dirs,
image_extensions):
# os.environ["CUDA_VISIBLE_DEVICES"] = '1'
# Get some image/annotation pairs for example
for imgset_path, img_dir, t_ext in zip(
voc_imageset_txt_paths, image_dirs, image_extensions):
with open(imgset_path, 'w') as txtfile:
[txtfile.write(os.path.splitext(os.path.basename(file))[0] + '\n')
for file in os.listdir(img_dir) if file.endswith(t_ext)]
@data_coco.command
def coco_image_segmentation_stats(seg_mask_output_paths, annotation_paths,
seg_mask_image_paths, verbose):
for (seg_mask_path, annFile, image_path) in zip(
seg_mask_output_paths, annotation_paths, seg_mask_image_paths):
print('Loading COCO Annotations File: ', annFile)
print('Segmentation Mask Output Folder: ', seg_mask_path)
print('Source Image Folder: ', image_path)
stats_json = os.path.join(seg_mask_path,
'image_segmentation_class_stats.json')
print('Image stats will be saved to:', stats_json)
cat_csv = os.path.join(seg_mask_path,
'class_counts_over_sum_category_counts.csv')
print('Category weights will be saved to:', cat_csv)
coco = COCO(annFile)
print('Annotation file info:')
coco.info()
print('category ids, not including 0 for background:')
print(coco.getCatIds())
# display COCO categories and supercategories
cats = coco.loadCats(coco.getCatIds())
nms = [cat['name'] for cat in cats]
print('categories: \n\n', ' '.join(nms))
nms = set([cat['supercategory'] for cat in cats])
print('supercategories: \n', ' '.join(nms))
img_ids = coco.getImgIds()
use_original_dims = True # not target_shape
max_ids = max(ids()) + 1 # add background category
# 0 indicates no category (not even background) for counting bins
max_bin_count = max_ids + 1
bin_count = np.zeros(max_bin_count)
total_pixels = 0
print('Calculating image segmentation stats...')
progbar = Progbar(len(img_ids), verbose=verbose)
i = 0
for idx, img_id in enumerate(img_ids):
img = coco.loadImgs(img_id)[0]
i += 1
progbar.update(i)
ann_ids = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(ann_ids)
target_shape = (img['height'], img['width'], max_ids)
# print('\ntarget_shape:', target_shape)
mask_one_hot = np.zeros(target_shape, dtype=np.uint8)
# Note to only count background pixels once, we define a temporary
# null class of 0, and shift all class category ids up by 1
mask_one_hot[:, :, 0] = 1 # every pixel begins as background
for ann in anns:
mask_partial = coco.annToMask(ann)
above_zero = mask_partial > 0
mask_one_hot[above_zero, ann['category_id']] = ann['category_id'] + 1
mask_one_hot[above_zero, 0] = 0
# print( mask_one_hot)
# print('initial bin_count shape:', np.shape(bin_count))
# flat_mask_one_hot = mask_one_hot.flatten()
bincount_result = np.bincount(mask_one_hot.flatten())
# print('bincount_result TYPE:', type(bincount_result))
# np.array(np.ndarray.flatten(np.bincount(np.ndarray.
# flatten(np.array(mask_one_hot)).astype(int))).resize(max_bin_count))
# print('bincount_result:', bincount_result)
# print('bincount_result_shape', np.shape(bincount_result))
length = int(np.shape(bincount_result)[0])
zeros_to_add = max_bin_count - length
z = np.zeros(zeros_to_add)
# print('zeros_to_add TYPE:', type(zeros_to_add))
# this is a workaround because for some strange reason the
# output type of bincount couldn't interact with other numpy arrays
bincount_result_long = bincount_result.tolist() + z.tolist()
# bincount_result = bincount_result.resize(max_bin_count)
# print('bincount_result2:', bincount_result_long)
# print('bincount_result2_shape',bincount_result_long)
bin_count = bin_count + np.array(bincount_result_long)
total_pixels += (img['height'] * img['width'])
print('Final Tally:')
# shift categories back down by 1
bin_count = bin_count[1:]
category_ids = range(bin_count.size)
sum_category_counts = np.sum(bin_count)
# sum will be =1 as a pixel can be in multiple categories
category_counts_over_sum_category_counts = \
np.true_divide(bin_count.astype(np.float64), sum_category_counts)
np.savetxt(cat_csv, category_counts_over_sum_category_counts)
# sum will be >1 as a pixel can be in multiple categories
category_counts_over_total_pixels = \
np.true_divide(bin_count.astype(np.float64), total_pixels)
# less common categories have more weight, sum = 1
category_counts_p_complement = \
[1 - x if x > 0.0 else 0.0
for x in category_counts_over_sum_category_counts]
# less common categories have more weight, sum > 1
total_pixels_p_complement = \
[1 - x if x > 0.0 else 0.0
for x in category_counts_over_total_pixels]
print(bin_count)
stat_dict = {
'total_pixels': total_pixels,
'category_counts': dict(zip(category_ids, bin_count)),
'sum_category_counts': sum_category_counts,
'category_counts_over_sum_category_counts':
dict(zip(category_ids,
category_counts_over_sum_category_counts)),
'category_counts_over_total_pixels':
dict(zip(category_ids, category_counts_over_total_pixels)),
'category_counts_p_complement':
dict(zip(category_ids, category_counts_p_complement)),
'total_pixels_p_complement':
dict(zip(category_ids, total_pixels_p_complement)),
'ids': ids(),
'categories': categories()
}
print(stat_dict)
with open(stats_json, 'w') as fjson:
json.dump(stat_dict, fjson, ensure_ascii=False)
@data_coco.command
def coco_setup(dataset_root, dataset_path, data_prefixes,
filenames, urls, md5s, annotation_paths,
image_dirs, seg_mask_output_paths, verbose,
image_extensions):
# download the dataset
coco_download(dataset_path, filenames, dataset_root,
urls, md5s, annotation_paths)
# convert the relevant files to a more useful format
coco_json_to_segmentation(seg_mask_output_paths, annotation_paths)
coco_to_pascal_voc_imageset_txt(voc_imageset_txt_paths, image_dirs,
image_extensions)
@data_coco.automain
def main(dataset_root, dataset_path, data_prefixes,
filenames, urls, md5s, annotation_paths,
image_dirs, seg_mask_output_paths):
coco_config()
coco_setup(data_prefixes, dataset_path, filenames, dataset_root, urls,
md5s, annotation_paths, image_dirs,
seg_mask_output_paths)
| keras-contrib/keras_contrib/datasets/coco.py/0 | {
"file_path": "keras-contrib/keras_contrib/datasets/coco.py",
"repo_id": "keras-contrib",
"token_count": 9870
} | 14 |
from __future__ import absolute_import
from __future__ import division
import warnings
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.layers import Layer
from keras.layers import InputSpec
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_marginal_accuracy
from keras_contrib.metrics import crf_viterbi_accuracy
from keras_contrib.utils.test_utils import to_tuple
class CRF(Layer):
"""An implementation of linear chain conditional random field (CRF).
An linear chain CRF is defined to maximize the following likelihood function:
$$ L(W, U, b; y_1, ..., y_n) := \frac{1}{Z}
\sum_{y_1, ..., y_n} \exp(-a_1' y_1 - a_n' y_n
- \sum_{k=1^n}((f(x_k' W + b) y_k) + y_1' U y_2)), $$
where:
$Z$: normalization constant
$x_k, y_k$: inputs and outputs
This implementation has two modes for optimization:
1. (`join mode`) optimized by maximizing join likelihood,
which is optimal in theory of statistics.
Note that in this case, CRF must be the output/last layer.
2. (`marginal mode`) return marginal probabilities on each time
step and optimized via composition
likelihood (product of marginal likelihood), i.e.,
using `categorical_crossentropy` loss.
Note that in this case, CRF can be either the last layer or an
intermediate layer (though not explored).
For prediction (test phrase), one can choose either Viterbi
best path (class indices) or marginal
probabilities if probabilities are needed.
However, if one chooses *join mode* for training,
Viterbi output is typically better than marginal output,
but the marginal output will still perform
reasonably close, while if *marginal mode* is used for training,
marginal output usually performs
much better. The default behavior and `metrics.crf_accuracy`
is set according to this observation.
In addition, this implementation supports masking and accepts either
onehot or sparse target.
If you open a issue or a pull request about CRF, please
add 'cc @lzfelix' to notify Luiz Felix.
# Examples
```python
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
model = Sequential()
model.add(Embedding(3001, 300, mask_zero=True)(X)
# use learn_mode = 'join', test_mode = 'viterbi',
# sparse_target = True (label indice output)
crf = CRF(10, sparse_target=True)
model.add(crf)
# crf_accuracy is default to Viterbi acc if using join-mode (default).
# One can add crf.marginal_acc if interested, but may slow down learning
model.compile('adam', loss=crf_loss, metrics=[crf_viterbi_accuracy])
# y must be label indices (with shape 1 at dim 3) here,
# since `sparse_target=True`
model.fit(x, y)
# prediction give onehot representation of Viterbi best path
y_hat = model.predict(x_test)
```
The following snippet shows how to load a persisted
model that uses the CRF layer:
```python
from keras.models import load_model
from keras_contrib.losses import import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
custom_objects={'CRF': CRF,
'crf_loss': crf_loss,
'crf_viterbi_accuracy': crf_viterbi_accuracy}
loaded_model = load_model('<path_to_model>',
custom_objects=custom_objects)
```
# Arguments
units: Positive integer, dimensionality of the output space.
learn_mode: Either 'join' or 'marginal'.
The former train the model by maximizing join likelihood while the latter
maximize the product of marginal likelihood over all time steps.
One should use `losses.crf_nll` for 'join' mode
and `losses.categorical_crossentropy` or
`losses.sparse_categorical_crossentropy` for
`marginal` mode. For convenience, simply
use `losses.crf_loss`, which will decide the proper loss as described.
test_mode: Either 'viterbi' or 'marginal'.
The former is recommended and as default when `learn_mode = 'join'` and
gives one-hot representation of the best path at test (prediction) time,
while the latter is recommended and chosen as default
when `learn_mode = 'marginal'`,
which produces marginal probabilities for each time step.
For evaluating metrics, one should
use `metrics.crf_viterbi_accuracy` for 'viterbi' mode and
'metrics.crf_marginal_accuracy' for 'marginal' mode, or
simply use `metrics.crf_accuracy` for
both which automatically decides it as described.
One can also use both for evaluation at training.
sparse_target: Boolean (default False) indicating
if provided labels are one-hot or
indices (with shape 1 at dim 3).
use_boundary: Boolean (default True) indicating if trainable
start-end chain energies
should be added to model.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
chain_initializer: Initializer for the `chain_kernel` weights matrix,
used for the CRF chain energy.
(see [initializers](../initializers.md)).
boundary_initializer: Initializer for the `left_boundary`,
'right_boundary' weights vectors,
used for the start/left and end/right boundary energy.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
chain_regularizer: Regularizer function applied to
the `chain_kernel` weights matrix
(see [regularizer](../regularizers.md)).
boundary_regularizer: Regularizer function applied to
the 'left_boundary', 'right_boundary' weight vectors
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
chain_constraint: Constraint function applied to
the `chain_kernel` weights matrix
(see [constraints](../constraints.md)).
boundary_constraint: Constraint function applied to
the `left_boundary`, `right_boundary` weights vectors
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
unroll: Boolean (default False). If True, the network will be
unrolled, else a symbolic loop will be used.
Unrolling can speed-up a RNN, although it tends
to be more memory-intensive.
Unrolling is only suitable for short sequences.
# Input shape
3D tensor with shape `(nb_samples, timesteps, input_dim)`.
# Output shape
3D tensor with shape `(nb_samples, timesteps, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
"""
def __init__(self, units,
learn_mode='join',
test_mode=None,
sparse_target=False,
use_boundary=True,
use_bias=True,
activation='linear',
kernel_initializer='glorot_uniform',
chain_initializer='orthogonal',
bias_initializer='zeros',
boundary_initializer='zeros',
kernel_regularizer=None,
chain_regularizer=None,
boundary_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
chain_constraint=None,
boundary_constraint=None,
bias_constraint=None,
input_dim=None,
unroll=False,
**kwargs):
super(CRF, self).__init__(**kwargs)
self.supports_masking = True
self.units = units
self.learn_mode = learn_mode
assert self.learn_mode in ['join', 'marginal']
self.test_mode = test_mode
if self.test_mode is None:
self.test_mode = 'viterbi' if self.learn_mode == 'join' else 'marginal'
else:
assert self.test_mode in ['viterbi', 'marginal']
self.sparse_target = sparse_target
self.use_boundary = use_boundary
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.chain_initializer = initializers.get(chain_initializer)
self.boundary_initializer = initializers.get(boundary_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.chain_regularizer = regularizers.get(chain_regularizer)
self.boundary_regularizer = regularizers.get(boundary_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.chain_constraint = constraints.get(chain_constraint)
self.boundary_constraint = constraints.get(boundary_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.unroll = unroll
def build(self, input_shape):
input_shape = to_tuple(input_shape)
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(self.input_dim, self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.chain_kernel = self.add_weight(shape=(self.units, self.units),
name='chain_kernel',
initializer=self.chain_initializer,
regularizer=self.chain_regularizer,
constraint=self.chain_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = 0
if self.use_boundary:
self.left_boundary = self.add_weight(shape=(self.units,),
name='left_boundary',
initializer=self.boundary_initializer,
regularizer=self.boundary_regularizer,
constraint=self.boundary_constraint)
self.right_boundary = self.add_weight(shape=(self.units,),
name='right_boundary',
initializer=self.boundary_initializer,
regularizer=self.boundary_regularizer,
constraint=self.boundary_constraint)
self.built = True
def call(self, X, mask=None):
if mask is not None:
assert K.ndim(mask) == 2, 'Input mask to CRF must have dim 2 if not None'
if self.test_mode == 'viterbi':
test_output = self.viterbi_decoding(X, mask)
else:
test_output = self.get_marginal_prob(X, mask)
self.uses_learning_phase = True
if self.learn_mode == 'join':
train_output = K.zeros_like(K.dot(X, self.kernel))
out = K.in_train_phase(train_output, test_output)
else:
if self.test_mode == 'viterbi':
train_output = self.get_marginal_prob(X, mask)
out = K.in_train_phase(train_output, test_output)
else:
out = test_output
return out
def compute_output_shape(self, input_shape):
return input_shape[:2] + (self.units,)
def compute_mask(self, input, mask=None):
if mask is not None and self.learn_mode == 'join':
return K.any(mask, axis=1)
return mask
def get_config(self):
config = {
'units': self.units,
'learn_mode': self.learn_mode,
'test_mode': self.test_mode,
'use_boundary': self.use_boundary,
'use_bias': self.use_bias,
'sparse_target': self.sparse_target,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'chain_initializer': initializers.serialize(self.chain_initializer),
'boundary_initializer': initializers.serialize(
self.boundary_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'activation': activations.serialize(self.activation),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'chain_regularizer': regularizers.serialize(self.chain_regularizer),
'boundary_regularizer': regularizers.serialize(
self.boundary_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'chain_constraint': constraints.serialize(self.chain_constraint),
'boundary_constraint': constraints.serialize(self.boundary_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'input_dim': self.input_dim,
'unroll': self.unroll}
base_config = super(CRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@property
def loss_function(self):
warnings.warn('CRF.loss_function is deprecated '
'and it might be removed in the future. Please '
'use losses.crf_loss instead.')
return crf_loss
@property
def accuracy(self):
warnings.warn('CRF.accuracy is deprecated and it '
'might be removed in the future. Please '
'use metrics.crf_accuracy')
if self.test_mode == 'viterbi':
return crf_viterbi_accuracy
else:
return crf_marginal_accuracy
@property
def viterbi_acc(self):
warnings.warn('CRF.viterbi_acc is deprecated and it might '
'be removed in the future. Please '
'use metrics.viterbi_acc instead.')
return crf_viterbi_accuracy
@property
def marginal_acc(self):
warnings.warn('CRF.moarginal_acc is deprecated and it '
'might be removed in the future. Please '
'use metrics.marginal_acc instead.')
return crf_marginal_accuracy
@staticmethod
def softmaxNd(x, axis=-1):
m = K.max(x, axis=axis, keepdims=True)
exp_x = K.exp(x - m)
prob_x = exp_x / K.sum(exp_x, axis=axis, keepdims=True)
return prob_x
@staticmethod
def shift_left(x, offset=1):
assert offset > 0
return K.concatenate([x[:, offset:], K.zeros_like(x[:, :offset])], axis=1)
@staticmethod
def shift_right(x, offset=1):
assert offset > 0
return K.concatenate([K.zeros_like(x[:, :offset]), x[:, :-offset]], axis=1)
def add_boundary_energy(self, energy, mask, start, end):
start = K.expand_dims(K.expand_dims(start, 0), 0)
end = K.expand_dims(K.expand_dims(end, 0), 0)
if mask is None:
energy = K.concatenate([energy[:, :1, :] + start, energy[:, 1:, :]],
axis=1)
energy = K.concatenate([energy[:, :-1, :], energy[:, -1:, :] + end],
axis=1)
else:
mask = K.expand_dims(K.cast(mask, K.floatx()))
start_mask = K.cast(K.greater(mask, self.shift_right(mask)), K.floatx())
end_mask = K.cast(K.greater(self.shift_left(mask), mask), K.floatx())
energy = energy + start_mask * start
energy = energy + end_mask * end
return energy
def get_log_normalization_constant(self, input_energy, mask, **kwargs):
"""Compute logarithm of the normalization constant Z, where
Z = sum exp(-E) -> logZ = log sum exp(-E) =: -nlogZ
"""
# should have logZ[:, i] == logZ[:, j] for any i, j
logZ = self.recursion(input_energy, mask, return_sequences=False, **kwargs)
return logZ[:, 0]
def get_energy(self, y_true, input_energy, mask):
"""Energy = a1' y1 + u1' y1 + y1' U y2 + u2' y2 + y2' U y3 + u3' y3 + an' y3
"""
input_energy = K.sum(input_energy * y_true, 2) # (B, T)
# (B, T-1)
chain_energy = K.sum(K.dot(y_true[:, :-1, :],
self.chain_kernel) * y_true[:, 1:, :], 2)
if mask is not None:
mask = K.cast(mask, K.floatx())
# (B, T-1), mask[:,:-1]*mask[:,1:] makes it work with any padding
chain_mask = mask[:, :-1] * mask[:, 1:]
input_energy = input_energy * mask
chain_energy = chain_energy * chain_mask
total_energy = K.sum(input_energy, -1) + K.sum(chain_energy, -1) # (B, )
return total_energy
def get_negative_log_likelihood(self, y_true, X, mask):
"""Compute the loss, i.e., negative log likelihood (normalize by number of time steps)
likelihood = 1/Z * exp(-E) -> neg_log_like = - log(1/Z * exp(-E)) = logZ + E
"""
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
input_energy = self.add_boundary_energy(input_energy, mask,
self.left_boundary,
self.right_boundary)
energy = self.get_energy(y_true, input_energy, mask)
logZ = self.get_log_normalization_constant(input_energy, mask,
input_length=K.int_shape(X)[1])
nloglik = logZ + energy
if mask is not None:
nloglik = nloglik / K.sum(K.cast(mask, K.floatx()), 1)
else:
nloglik = nloglik / K.cast(K.shape(X)[1], K.floatx())
return nloglik
def step(self, input_energy_t, states, return_logZ=True):
# not in the following `prev_target_val` has shape = (B, F)
# where B = batch_size, F = output feature dim
# Note: `i` is of float32, due to the behavior of `K.rnn`
prev_target_val, i, chain_energy = states[:3]
t = K.cast(i[0, 0], dtype='int32')
if len(states) > 3:
if K.backend() == 'theano':
m = states[3][:, t:(t + 2)]
else:
m = K.slice(states[3], [0, t], [-1, 2])
input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
# (1, F, F)*(B, 1, 1) -> (B, F, F)
chain_energy = chain_energy * K.expand_dims(
K.expand_dims(m[:, 0] * m[:, 1]))
if return_logZ:
# shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)
new_target_val = K.logsumexp(-energy, 1) # shapes: (B, F)
return new_target_val, [new_target_val, i + 1]
else:
energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
min_energy = K.min(energy, 1)
# cast for tf-version `K.rnn
argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
return argmin_table, [min_energy, i + 1]
def recursion(self, input_energy, mask=None, go_backwards=False,
return_sequences=True, return_logZ=True, input_length=None):
"""Forward (alpha) or backward (beta) recursion
If `return_logZ = True`, compute the logZ, the normalization constant:
\[ Z = \sum_{y1, y2, y3} exp(-E) # energy
= \sum_{y1, y2, y3} exp(-(u1' y1 + y1' W y2 + u2' y2 + y2' W y3 + u3' y3))
= sum_{y2, y3} (exp(-(u2' y2 + y2' W y3 + u3' y3))
sum_{y1} exp(-(u1' y1' + y1' W y2))) \]
Denote:
\[ S(y2) := sum_{y1} exp(-(u1' y1 + y1' W y2)), \]
\[ Z = sum_{y2, y3} exp(log S(y2) - (u2' y2 + y2' W y3 + u3' y3)) \]
\[ logS(y2) = log S(y2) = log_sum_exp(-(u1' y1' + y1' W y2)) \]
Note that:
yi's are one-hot vectors
u1, u3: boundary energies have been merged
If `return_logZ = False`, compute the Viterbi's best path lookup table.
"""
chain_energy = self.chain_kernel
# shape=(1, F, F): F=num of output features. 1st F is for t-1, 2nd F for t
chain_energy = K.expand_dims(chain_energy, 0)
# shape=(B, F), dtype=float32
prev_target_val = K.zeros_like(input_energy[:, 0, :])
if go_backwards:
input_energy = K.reverse(input_energy, 1)
if mask is not None:
mask = K.reverse(mask, 1)
initial_states = [prev_target_val, K.zeros_like(prev_target_val[:, :1])]
constants = [chain_energy]
if mask is not None:
mask2 = K.cast(K.concatenate([mask, K.zeros_like(mask[:, :1])], axis=1),
K.floatx())
constants.append(mask2)
def _step(input_energy_i, states):
return self.step(input_energy_i, states, return_logZ)
target_val_last, target_val_seq, _ = K.rnn(_step, input_energy,
initial_states,
constants=constants,
input_length=input_length,
unroll=self.unroll)
if return_sequences:
if go_backwards:
target_val_seq = K.reverse(target_val_seq, 1)
return target_val_seq
else:
return target_val_last
def forward_recursion(self, input_energy, **kwargs):
return self.recursion(input_energy, **kwargs)
def backward_recursion(self, input_energy, **kwargs):
return self.recursion(input_energy, go_backwards=True, **kwargs)
def get_marginal_prob(self, X, mask=None):
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
input_energy = self.add_boundary_energy(input_energy, mask,
self.left_boundary,
self.right_boundary)
input_length = K.int_shape(X)[1]
alpha = self.forward_recursion(input_energy, mask=mask,
input_length=input_length)
beta = self.backward_recursion(input_energy, mask=mask,
input_length=input_length)
if mask is not None:
input_energy = input_energy * K.expand_dims(K.cast(mask, K.floatx()))
margin = -(self.shift_right(alpha) + input_energy + self.shift_left(beta))
return self.softmaxNd(margin)
def viterbi_decoding(self, X, mask=None):
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
input_energy = self.add_boundary_energy(
input_energy, mask, self.left_boundary, self.right_boundary)
argmin_tables = self.recursion(input_energy, mask, return_logZ=False)
argmin_tables = K.cast(argmin_tables, 'int32')
# backward to find best path, `initial_best_idx` can be any,
# as all elements in the last argmin_table are the same
argmin_tables = K.reverse(argmin_tables, 1)
# matrix instead of vector is required by tf `K.rnn`
initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])]
if K.backend() == 'theano':
from theano import tensor as T
initial_best_idx = [T.unbroadcast(initial_best_idx[0], 1)]
def gather_each_row(params, indices):
n = K.shape(indices)[0]
if K.backend() == 'theano':
from theano import tensor as T
return params[T.arange(n), indices]
elif K.backend() == 'tensorflow':
import tensorflow as tf
indices = K.transpose(K.stack([tf.range(n), indices]))
return tf.gather_nd(params, indices)
else:
raise NotImplementedError
def find_path(argmin_table, best_idx):
next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0])
next_best_idx = K.expand_dims(next_best_idx)
if K.backend() == 'theano':
from theano import tensor as T
next_best_idx = T.unbroadcast(next_best_idx, 1)
return next_best_idx, [next_best_idx]
_, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx,
input_length=K.int_shape(X)[1], unroll=self.unroll)
best_paths = K.reverse(best_paths, 1)
best_paths = K.squeeze(best_paths, 2)
return K.one_hot(best_paths, self.units)
| keras-contrib/keras_contrib/layers/crf.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/crf.py",
"repo_id": "keras-contrib",
"token_count": 13038
} | 15 |
from __future__ import absolute_import
| keras-contrib/keras_contrib/regularizers/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/regularizers/__init__.py",
"repo_id": "keras-contrib",
"token_count": 10
} | 16 |
import pytest
from numpy.testing import assert_allclose
import numpy as np
from keras import backend as K
from keras.backend import theano_backend as KTH
from keras.backend import tensorflow_backend as KTF
import keras_contrib.backend.theano_backend as KCTH
import keras_contrib.backend.tensorflow_backend as KCTF
import keras_contrib.backend.numpy_backend as KCNP
from keras_contrib import backend as KC
def check_dtype(var, dtype):
if K._BACKEND == 'theano':
assert var.dtype == dtype
else:
assert var.dtype.name == '%s_ref' % dtype
def check_single_tensor_operation(function_name, input_shape, **kwargs):
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
zth = KTH.eval(getattr(KCTH, function_name)(xth, **kwargs))
ztf = KTF.eval(getattr(KCTF, function_name)(xtf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_two_tensor_operation(function_name, x_input_shape,
y_input_shape, **kwargs):
xval = np.random.random(x_input_shape) - 0.5
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random(y_input_shape) - 0.5
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(getattr(KCTH, function_name)(xth, yth, **kwargs))
ztf = KTF.eval(getattr(KCTF, function_name)(xtf, ytf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_composed_tensor_operations(first_function_name, first_function_args,
second_function_name, second_function_args,
input_shape):
''' Creates a random tensor t0 with shape input_shape and compute
t1 = first_function_name(t0, **first_function_args)
t2 = second_function_name(t1, **second_function_args)
with both Theano and TensorFlow backends and ensures the answers match.
'''
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = getattr(KCTH, first_function_name)(xth, **first_function_args)
ytf = getattr(KCTF, first_function_name)(xtf, **first_function_args)
zth = KTH.eval(getattr(KCTH, second_function_name)(yth, **second_function_args))
ztf = KTF.eval(getattr(KCTF, second_function_name)(ytf, **second_function_args))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
class TestBackend(object):
@pytest.mark.skipif(K.backend() != 'tensorflow',
reason='No need to run the tests twice.')
@pytest.mark.parametrize('input_shape', [(1, 3, 40, 40), (1, 3, 10, 10)])
@pytest.mark.parametrize('kernel_shape', [2, 5])
def test_extract(self, input_shape, kernel_shape):
xval = np.random.random(input_shape)
kernel = [kernel_shape, kernel_shape]
strides = [kernel_shape, kernel_shape]
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
ztf = KTF.eval(KCTF.extract_image_patches(xtf, kernel, strides,
data_format='channels_first',
padding='valid'))
zth = KTH.eval(KCTH.extract_image_patches(xth, kernel, strides,
data_format='channels_first',
padding='valid'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-02)
@pytest.mark.skipif(K.backend() != 'tensorflow',
reason='No need to run the tests twice.')
@pytest.mark.parametrize('input_shape', [(1, 40, 40, 3), (1, 10, 10, 3)])
@pytest.mark.parametrize('kernel_shape', [2, 5])
def test_extract2(self, input_shape, kernel_shape):
xval = np.random.random(input_shape)
kernel = [kernel_shape, kernel_shape]
strides = [kernel_shape, kernel_shape]
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
ztf = KTF.eval(KCTF.extract_image_patches(xtf, kernel, strides,
data_format='channels_last',
padding='same'))
zth = KTH.eval(KCTH.extract_image_patches(xth, kernel, strides,
data_format='channels_last',
padding='same'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-02)
@pytest.mark.skipif(K.backend() != 'tensorflow',
reason='No need to run the tests twice.')
@pytest.mark.parametrize('batch_size', [1, 2, 3])
@pytest.mark.parametrize('scale', [2, 3])
@pytest.mark.parametrize('channels', [1, 2, 3])
@pytest.mark.parametrize('rows', [1, 2, 3])
@pytest.mark.parametrize('cols', [1, 2, 3])
def test_depth_to_space(self, batch_size, scale, channels, rows, cols):
if K.image_data_format() == 'channels_first':
arr = np.arange(batch_size * channels * scale * scale * rows * cols)\
.reshape((batch_size, channels * scale * scale, rows, cols))
elif K.image_data_format() == 'channels_last':
arr = np.arange(batch_size * rows * cols * scale * scale * channels) \
.reshape((batch_size, rows, cols, channels * scale * scale))
arr_tf = KTF.variable(arr)
arr_th = KTH.variable(arr)
if K.image_data_format() == 'channels_first':
expected = arr.reshape((batch_size, scale, scale, channels, rows, cols))\
.transpose((0, 3, 4, 1, 5, 2))\
.reshape((batch_size, channels, rows * scale, cols * scale))
elif K.image_data_format() == 'channels_last':
expected = arr.reshape((batch_size, rows, cols, scale, scale, channels))\
.transpose((0, 1, 3, 2, 4, 5))\
.reshape((batch_size, rows * scale, cols * scale, channels))
tf_ans = KTF.eval(KCTF.depth_to_space(arr_tf, scale))
th_ans = KTH.eval(KCTH.depth_to_space(arr_th, scale))
assert tf_ans.shape == expected.shape
assert th_ans.shape == expected.shape
assert_allclose(expected, tf_ans, atol=1e-05)
assert_allclose(expected, th_ans, atol=1e-05)
@pytest.mark.parametrize('keep_dims', [True, False])
def test_moments(self, keep_dims):
input_shape = (10, 10, 10, 10)
x_0 = np.zeros(input_shape)
x_1 = np.ones(input_shape)
x_random = np.random.random(input_shape)
th_axes = [0, 2, 3]
tf_axes = [0, 1, 2]
for ip in [x_0, x_1, x_random]:
for axes in [th_axes, tf_axes]:
K_mean, K_var = KC.moments(K.variable(ip), axes, keep_dims=keep_dims)
np_mean, np_var = KCNP.moments(ip, axes, keep_dims=keep_dims)
K_mean_val = K.eval(K_mean)
K_var_val = K.eval(K_var)
# absolute tolerance needed when working with zeros
assert_allclose(K_mean_val, np_mean, rtol=1e-4, atol=1e-10)
assert_allclose(K_var_val, np_var, rtol=1e-4, atol=1e-10)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/backend/backend_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/backend/backend_test.py",
"repo_id": "keras-contrib",
"token_count": 3638
} | 17 |
import pytest
import numpy as np
from keras import regularizers
from keras import constraints
from keras.models import Sequential
from keras import backend as K
from keras_contrib.layers import core
from keras_contrib.utils.test_utils import layer_test
from numpy.testing import assert_allclose
@pytest.mark.parametrize('input_shape', [(3, 2),
(3, 4, 2),
(None, None, 2),
(3, 4, 5, 2)])
def test_cosinedense(input_shape):
layer_test(core.CosineDense,
kwargs={'units': 3},
input_shape=input_shape)
def test_cosinedense_reg_constraint():
layer_test(core.CosineDense,
kwargs={'units': 3,
'kernel_regularizer': regularizers.l2(0.01),
'bias_regularizer': regularizers.l1(0.01),
'activity_regularizer': regularizers.l2(0.01),
'kernel_constraint': constraints.MaxNorm(1),
'bias_constraint': constraints.MaxNorm(1)},
input_shape=(3, 2))
def test_cosinedense_correctness():
X = np.random.randn(1, 20)
model = Sequential()
model.add(core.CosineDense(1, use_bias=True, input_shape=(20,)))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = X.T
W[1] = np.asarray([1.])
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, np.ones((1, 1), dtype=K.floatx()), atol=1e-5)
X = np.random.randn(1, 20)
model = Sequential()
model.add(core.CosineDense(1, use_bias=False, input_shape=(20,)))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = -2 * X.T
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, -np.ones((1, 1), dtype=K.floatx()), atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/test_core.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/test_core.py",
"repo_id": "keras-contrib",
"token_count": 979
} | 18 |
# Benchmark the layer performance
This directory contains benchmarks to compare the performance of
`keras_core.layers.XXX` and `tf.keras.layers.XXX`. We compare the performance of
both the forward pass and train step (forward & backward pass).
To run the benchmark, use the command below and change the flags according to
your target:
```shell
python3 -m benchmarks.layer_benchmark.conv_benchmark \
--benchmark_name=benchmark_conv2D \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
``` | keras-core/benchmarks/layer_benchmark/README.md/0 | {
"file_path": "keras-core/benchmarks/layer_benchmark/README.md",
"repo_id": "keras-core",
"token_count": 159
} | 19 |
"""Image classification benchmark.
This script runs image classification benchmark with "dogs vs cats" datasets.
It supports the following 3 models:
- EfficientNetV2B0
- Xception
- ResNet50V2
To run the benchmark, make sure you are in model_benchmark/ directory, and run
the command below:
python3 -m model_benchmark.image_classification_benchmark \
--model="EfficientNetV2B0" \
--epochs=2 \
--batch_size=32 \
--mixed_precision_policy="mixed_float16"
"""
import time
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import app
from absl import flags
from absl import logging
from model_benchmark.benchmark_utils import BenchmarkMetricsCallback
import keras_core as keras
flags.DEFINE_string("model", "EfficientNetV2B0", "The model to benchmark.")
flags.DEFINE_integer("epochs", 1, "The number of epochs.")
flags.DEFINE_integer("batch_size", 4, "Batch Size.")
flags.DEFINE_string(
"mixed_precision_policy",
"mixed_float16",
"The global precision policy to use, e.g., 'mixed_float16' or 'float32'.",
)
FLAGS = flags.FLAGS
BATCH_SIZE = 32
IMAGE_SIZE = (224, 224)
CHANNELS = 3
MODEL_MAP = {
"EfficientNetV2B0": keras.applications.EfficientNetV2B0,
"Xception": keras.applications.Xception,
"ResNet50V2": keras.applications.ResNet50V2,
}
def load_data():
# Load cats vs dogs dataset, and split into train and validation sets.
train_dataset, val_dataset = tfds.load(
"cats_vs_dogs", split=["train[:90%]", "train[90%:]"], as_supervised=True
)
resizing = keras.layers.Resizing(
IMAGE_SIZE[0], IMAGE_SIZE[1], crop_to_aspect_ratio=True
)
def preprocess_inputs(image, label):
image = tf.cast(image, "float32")
return resizing(image), label
train_dataset = (
train_dataset.map(
preprocess_inputs, num_parallel_calls=tf.data.AUTOTUNE
)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
val_dataset = (
val_dataset.map(preprocess_inputs, num_parallel_calls=tf.data.AUTOTUNE)
.batch(FLAGS.batch_size)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
return train_dataset, val_dataset
def load_model():
model_class = MODEL_MAP[FLAGS.model]
# Load the EfficientNetV2B0 model and add a classification head.
model = model_class(include_top=False, weights="imagenet")
classifier = keras.models.Sequential(
[
keras.Input([IMAGE_SIZE[0], IMAGE_SIZE[1], CHANNELS]),
model,
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(2),
]
)
return classifier
def main(_):
keras.mixed_precision.set_dtype_policy(FLAGS.mixed_precision_policy)
logging.info(
"Benchmarking configs...\n"
"=========================\n"
f"MODEL: {FLAGS.model}\n"
f"TASK: image classification/dogs-vs-cats \n"
f"BATCH_SIZE: {FLAGS.batch_size}\n"
f"EPOCHS: {FLAGS.epochs}\n"
"=========================\n"
)
# Load datasets.
train_ds, validation_ds = load_data()
# Load the model.
classifier = load_model()
lr = keras.optimizers.schedules.PolynomialDecay(
5e-4,
decay_steps=train_ds.cardinality() * FLAGS.epochs,
end_learning_rate=0.0,
)
optimizer = keras.optimizers.Adam(lr)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
benchmark_metrics_callback = BenchmarkMetricsCallback(
start_batch=1,
stop_batch=train_ds.cardinality().numpy() - 1,
)
classifier.compile(
optimizer=optimizer,
loss=loss,
metrics=["sparse_categorical_accuracy"],
)
# Start training.
logging.info("Starting Training...")
st = time.time()
history = classifier.fit(
train_ds,
validation_data=validation_ds,
epochs=FLAGS.epochs,
callbacks=[benchmark_metrics_callback],
)
wall_time = time.time() - st
validation_accuracy = history.history["val_sparse_categorical_accuracy"][-1]
examples_per_second = (
np.mean(np.array(benchmark_metrics_callback.state["throughput"]))
* FLAGS.batch_size
)
logging.info("Training Finished!")
logging.info(f"Wall Time: {wall_time:.4f} seconds.")
logging.info(f"Validation Accuracy: {validation_accuracy:.4f}")
logging.info(f"examples_per_second: {examples_per_second:.4f}")
if __name__ == "__main__":
app.run(main)
| keras-core/benchmarks/model_benchmark/image_classification_benchmark.py/0 | {
"file_path": "keras-core/benchmarks/model_benchmark/image_classification_benchmark.py",
"repo_id": "keras-core",
"token_count": 1935
} | 20 |
# flake8: noqa
import os
# Set backend env to torch
os.environ["KERAS_BACKEND"] = "torch"
import torch
import torch.nn as nn
import torch.optim as optim
from keras_core import layers
import keras_core
import numpy as np
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
learning_rate = 0.01
batch_size = 128
num_epochs = 1
def get_data():
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras_core.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
return dataset
def get_model():
# Create the Keras model
model = keras_core.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
return model
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.model = keras_core.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
def forward(self, x):
return self.model(x)
def train(model, train_loader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
# Print loss statistics
if (batch_idx + 1) % 10 == 0:
print(
f"Epoch [{epoch+1}/{num_epochs}], "
f"Batch [{batch_idx+1}/{len(train_loader)}], "
f"Loss: {running_loss / 10}"
)
running_loss = 0.0
def setup(current_gpu_index, num_gpu):
# Device setup
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "56492"
device = torch.device("cuda:{}".format(current_gpu_index))
dist.init_process_group(
backend="nccl",
init_method="env://",
world_size=num_gpu,
rank=current_gpu_index,
)
torch.cuda.set_device(device)
def prepare(dataset, current_gpu_index, num_gpu, batch_size):
sampler = DistributedSampler(
dataset,
num_replicas=num_gpu,
rank=current_gpu_index,
shuffle=False,
)
# Create a DataLoader
train_loader = DataLoader(
dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=False,
)
return train_loader
def cleanup():
# Cleanup
dist.destroy_process_group()
def main(current_gpu_index, num_gpu):
# setup the process groups
setup(current_gpu_index, num_gpu)
#################################################################
######## Writing a torch training loop for a Keras model ########
#################################################################
dataset = get_data()
model = get_model()
# prepare the dataloader
dataloader = prepare(dataset, current_gpu_index, num_gpu, batch_size)
# Instantiate the torch optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
# Put model on device
model = model.to(current_gpu_index)
ddp_model = DDP(
model, device_ids=[current_gpu_index], output_device=current_gpu_index
)
train(ddp_model, dataloader, num_epochs, optimizer, loss_fn)
################################################################
######## Using a Keras model or layer in a torch Module ########
################################################################
torch_module = MyModel().to(current_gpu_index)
ddp_torch_module = DDP(
torch_module,
device_ids=[current_gpu_index],
output_device=current_gpu_index,
)
# Instantiate the torch optimizer
optimizer = optim.Adam(torch_module.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
train(ddp_torch_module, dataloader, num_epochs, optimizer, loss_fn)
cleanup()
if __name__ == "__main__":
# GPU parameters
num_gpu = torch.cuda.device_count()
print(f"Running on {num_gpu} GPUs")
torch.multiprocessing.spawn(
main,
args=(num_gpu,),
nprocs=num_gpu,
join=True,
)
| keras-core/examples/demo_torch_multi_gpu.py/0 | {
"file_path": "keras-core/examples/demo_torch_multi_gpu.py",
"repo_id": "keras-core",
"token_count": 2639
} | 21 |
"""
Title: DCGAN to generate face images
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/04/29
Last modified: 2021/01/01
Description: A simple DCGAN trained using `fit()` by overriding `train_step` on CelebA images.
Accelerator: GPU
"""
"""
## Setup
"""
import tensorflow as tf
import keras_core as keras
from keras_core import layers
import matplotlib.pyplot as plt
import os
import gdown
from zipfile import ZipFile
"""
## Prepare CelebA data
We'll use face images from the CelebA dataset, resized to 64x64.
"""
os.makedirs("celeba_gan")
url = "https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684"
output = "celeba_gan/data.zip"
gdown.download(url, output, quiet=True)
with ZipFile("celeba_gan/data.zip", "r") as zipobj:
zipobj.extractall("celeba_gan")
"""
Create a dataset from our folder, and rescale the images to the [0-1] range:
"""
dataset = keras.utils.image_dataset_from_directory(
"celeba_gan", label_mode=None, image_size=(64, 64), batch_size=32
)
dataset = dataset.map(lambda x: x / 255.0)
"""
Let's display a sample image:
"""
for x in dataset:
plt.axis("off")
plt.imshow((x.numpy() * 255).astype("int32")[0])
break
"""
## Create the discriminator
It maps a 64x64 image to a binary classification score.
"""
discriminator = keras.Sequential(
[
keras.Input(shape=(64, 64, 3)),
layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Flatten(),
layers.Dropout(0.2),
layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
discriminator.summary()
"""
## Create the generator
It mirrors the discriminator, replacing `Conv2D` layers with `Conv2DTranspose` layers.
"""
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
layers.Dense(8 * 8 * 128),
layers.Reshape((8, 8, 128)),
layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid"),
],
name="generator",
)
generator.summary()
"""
## Override `train_step`
"""
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super().__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
def compile(self, d_optimizer, g_optimizer, loss_fn):
super().compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
self.d_loss_metric = keras.metrics.Mean(name="d_loss")
self.g_loss_metric = keras.metrics.Mean(name="g_loss")
@property
def metrics(self):
return [self.d_loss_metric, self.g_loss_metric]
def train_step(self, real_images):
# Sample random points in the latent space
batch_size = tf.shape(real_images)[0]
random_latent_vectors = tf.random.normal(
shape=(batch_size, self.latent_dim)
)
# Decode them to fake images
generated_images = self.generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(tf.shape(labels))
# Train the discriminator
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(
shape=(batch_size, self.latent_dim)
)
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = self.discriminator(
self.generator(random_latent_vectors)
)
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(
zip(grads, self.generator.trainable_weights)
)
# Update metrics
self.d_loss_metric.update_state(d_loss)
self.g_loss_metric.update_state(g_loss)
return {
"d_loss": self.d_loss_metric.result(),
"g_loss": self.g_loss_metric.result(),
}
"""
## Create a callback that periodically saves generated images
"""
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=3, latent_dim=128):
self.num_img = num_img
self.latent_dim = latent_dim
def on_epoch_end(self, epoch, logs=None):
random_latent_vectors = tf.random.normal(
shape=(self.num_img, self.latent_dim)
)
generated_images = self.model.generator(random_latent_vectors)
generated_images *= 255
generated_images.numpy()
for i in range(self.num_img):
img = keras.utils.array_to_img(generated_images[i])
img.save("generated_img_%03d_%d.png" % (epoch, i))
"""
## Train the end-to-end model
"""
epochs = 1 # In practice, use ~100 epochs
gan = GAN(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
)
gan.fit(
dataset,
epochs=epochs,
callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)],
)
"""
Some of the last generated images around epoch 30
(results keep improving after that):

"""
| keras-core/examples/keras_io/tensorflow/generative/dcgan_overriding_train_step.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/generative/dcgan_overriding_train_step.py",
"repo_id": "keras-core",
"token_count": 2914
} | 22 |
"""
Title: Image classification from scratch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/04/27
Last modified: 2022/11/10
Description: Training an image classifier from scratch on the Kaggle Cats vs Dogs dataset.
Accelerator: GPU
"""
"""
## Introduction
This example shows how to do image classification from scratch, starting from JPEG
image files on disk, without leveraging pre-trained weights or a pre-made Keras
Application model. We demonstrate the workflow on the Kaggle Cats vs Dogs binary
classification dataset.
We use the `image_dataset_from_directory` utility to generate the datasets, and
we use Keras image preprocessing layers for image standardization and data augmentation.
"""
"""
## Setup
"""
import tensorflow as tf
import keras_core as keras
from keras_core import layers
import os
from pathlib import Path
import matplotlib.pyplot as plt
"""
## Load the data: the Cats vs Dogs dataset
### Raw data download
First, let's download the 786M ZIP archive of the raw data:
"""
fpath = keras.utils.get_file(
origin="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip"
)
dirpath = Path(fpath).parent.absolute()
os.system(f"unzip -q {fpath} -d {dirpath}")
"""
Now we have a `PetImages` folder which contain two subfolders, `Cat` and `Dog`. Each
subfolder contains image files for each category.
"""
os.system(f"ls {dirpath}/PetImages/")
"""
### Filter out corrupted images
When working with lots of real-world image data, corrupted images are a common
occurence. Let's filter out badly-encoded images that do not feature the string "JFIF"
in their header.
"""
import os
num_skipped = 0
for folder_name in ("Cat", "Dog"):
folder_path = os.path.join(dirpath, "PetImages", folder_name)
for fname in os.listdir(folder_path):
fpath = os.path.join(folder_path, fname)
try:
fobj = open(fpath, "rb")
is_jfif = tf.compat.as_bytes("JFIF") in fobj.peek(10)
finally:
fobj.close()
if not is_jfif:
num_skipped += 1
# Delete corrupted image
os.remove(fpath)
print(f"Deleted {num_skipped} images")
"""
## Generate a `Dataset`
"""
image_size = (180, 180)
batch_size = 128
train_ds, val_ds = keras.utils.image_dataset_from_directory(
os.path.join(dirpath, "PetImages"),
validation_split=0.2,
subset="both",
seed=1337,
image_size=image_size,
batch_size=batch_size,
)
"""
## Visualize the data
Here are the first 9 images in the training dataset. As you can see, label 1 is "dog"
and label 0 is "cat".
"""
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(int(labels[i]))
plt.axis("off")
"""
## Using image data augmentation
When you don't have a large image dataset, it's a good practice to artificially
introduce sample diversity by applying random yet realistic transformations to the
training images, such as random horizontal flipping or small random rotations. This
helps expose the model to different aspects of the training data while slowing down
overfitting.
"""
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
)
"""
Let's visualize what the augmented samples look like, by applying `data_augmentation`
repeatedly to the first image in the dataset:
"""
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
"""
## Standardizing the data
Our image are already in a standard size (180x180), as they are being yielded as
contiguous `float32` batches by our dataset. However, their RGB channel values are in
the `[0, 255]` range. This is not ideal for a neural network;
in general you should seek to make your input values small. Here, we will
standardize values to be in the `[0, 1]` by using a `Rescaling` layer at the start of
our model.
"""
"""
## Two options to preprocess the data
There are two ways you could be using the `data_augmentation` preprocessor:
**Option 1: Make it part of the model**, like this:
```python
inputs = keras.Input(shape=input_shape)
x = data_augmentation(inputs)
x = layers.Rescaling(1./255)(x)
... # Rest of the model
```
With this option, your data augmentation will happen *on device*, synchronously
with the rest of the model execution, meaning that it will benefit from GPU
acceleration.
Note that data augmentation is inactive at test time, so the input samples will only be
augmented during `fit()`, not when calling `evaluate()` or `predict()`.
If you're training on GPU, this may be a good option.
**Option 2: apply it to the dataset**, so as to obtain a dataset that yields batches of
augmented images, like this:
```python
augmented_train_ds = train_ds.map(
lambda x, y: (data_augmentation(x, training=True), y))
```
With this option, your data augmentation will happen **on CPU**, asynchronously, and will
be buffered before going into the model.
If you're training on CPU, this is the better option, since it makes data augmentation
asynchronous and non-blocking.
In our case, we'll go with the second option. If you're not sure
which one to pick, this second option (asynchronous preprocessing) is always a solid choice.
"""
"""
## Configure the dataset for performance
Let's apply data augmentation to our training dataset,
and let's make sure to use buffered prefetching so we can yield data from disk without
having I/O becoming blocking:
"""
print("Make datasets")
# Apply `data_augmentation` to the training images.
train_ds = train_ds.map(
lambda img, label: (data_augmentation(img), label),
num_parallel_calls=tf.data.AUTOTUNE,
)
# Prefetching samples in GPU memory helps maximize GPU utilization.
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
val_ds = val_ds.prefetch(tf.data.AUTOTUNE)
"""
## Build a model
We'll build a small version of the Xception network. We haven't particularly tried to
optimize the architecture; if you want to do a systematic search for the best model
configuration, consider using
[KerasTuner](https://github.com/keras-team/keras-tuner).
Note that:
- We start the model with the `data_augmentation` preprocessor, followed by a
`Rescaling` layer.
- We include a `Dropout` layer before the final classification layer.
"""
def make_model(input_shape, num_classes):
inputs = keras.Input(shape=input_shape)
# Entry block
x = layers.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(128, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
for size in [256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
x = layers.SeparableConv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes == 2:
activation = "sigmoid"
units = 1
else:
activation = "softmax"
units = num_classes
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(units, activation=activation)(x)
return keras.Model(inputs, outputs)
model = make_model(input_shape=image_size + (3,), num_classes=2)
keras.utils.plot_model(model, show_shapes=True)
"""
## Train the model
"""
epochs = 25
callbacks = [
keras.callbacks.ModelCheckpoint("save_at_{epoch}.keras"),
]
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss="binary_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_ds,
epochs=epochs,
callbacks=callbacks,
validation_data=val_ds,
)
"""
We get to >90% validation accuracy after training for 25 epochs on the full dataset
(in practice, you can train for 50+ epochs before validation performance starts degrading).
"""
"""
## Run inference on new data
Note that data augmentation and dropout are inactive at inference time.
"""
img = keras.utils.load_img(
f"{dirpath}/PetImages/Cat/6779.jpg", target_size=image_size
)
img_array = keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create batch axis
predictions = model.predict(img_array)
score = float(predictions[0])
print(f"This image is {100 * (1 - score):.2f}% cat and {100 * score:.2f}% dog.")
| keras-core/examples/keras_io/tensorflow/vision/image_classification_from_scratch.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/image_classification_from_scratch.py",
"repo_id": "keras-core",
"token_count": 3248
} | 23 |
"""
Title: Timeseries classification from scratch
Author: [hfawaz](https://github.com/hfawaz/)
Date created: 2020/07/21
Last modified: 2021/07/16
Description: Training a timeseries classifier from scratch on the FordA dataset from the UCR/UEA archive.
Accelerator: GPU
"""
"""
## Introduction
This example shows how to do timeseries classification from scratch, starting from raw
CSV timeseries files on disk. We demonstrate the workflow on the FordA dataset from the
[UCR/UEA archive](https://www.cs.ucr.edu/%7Eeamonn/time_series_data_2018/).
"""
"""
## Setup
"""
import keras_core as keras
import numpy as np
import matplotlib.pyplot as plt
"""
## Load the data: the FordA dataset
### Dataset description
The dataset we are using here is called FordA.
The data comes from the UCR archive.
The dataset contains 3601 training instances and another 1320 testing instances.
Each timeseries corresponds to a measurement of engine noise captured by a motor sensor.
For this task, the goal is to automatically detect the presence of a specific issue with
the engine. The problem is a balanced binary classification task. The full description of
this dataset can be found [here](http://www.j-wichard.de/publications/FordPaper.pdf).
### Read the TSV data
We will use the `FordA_TRAIN` file for training and the
`FordA_TEST` file for testing. The simplicity of this dataset
allows us to demonstrate effectively how to use ConvNets for timeseries classification.
In this file, the first column corresponds to the label.
"""
def readucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)
root_url = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA/"
x_train, y_train = readucr(root_url + "FordA_TRAIN.tsv")
x_test, y_test = readucr(root_url + "FordA_TEST.tsv")
"""
## Visualize the data
Here we visualize one timeseries example for each class in the dataset.
"""
classes = np.unique(np.concatenate((y_train, y_test), axis=0))
plt.figure()
for c in classes:
c_x_train = x_train[y_train == c]
plt.plot(c_x_train[0], label="class " + str(c))
plt.legend(loc="best")
plt.show()
plt.close()
"""
## Standardize the data
Our timeseries are already in a single length (500). However, their values are
usually in various ranges. This is not ideal for a neural network;
in general we should seek to make the input values normalized.
For this specific dataset, the data is already z-normalized: each timeseries sample
has a mean equal to zero and a standard deviation equal to one. This type of
normalization is very common for timeseries classification problems, see
[Bagnall et al. (2016)](https://link.springer.com/article/10.1007/s10618-016-0483-9).
Note that the timeseries data used here are univariate, meaning we only have one channel
per timeseries example.
We will therefore transform the timeseries into a multivariate one with one channel
using a simple reshaping via numpy.
This will allow us to construct a model that is easily applicable to multivariate time
series.
"""
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
"""
Finally, in order to use `sparse_categorical_crossentropy`, we will have to count
the number of classes beforehand.
"""
num_classes = len(np.unique(y_train))
"""
Now we shuffle the training set because we will be using the `validation_split` option
later when training.
"""
idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]
"""
Standardize the labels to positive integers.
The expected labels will then be 0 and 1.
"""
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
"""
## Build a model
We build a Fully Convolutional Neural Network originally proposed in
[this paper](https://arxiv.org/abs/1611.06455).
The implementation is based on the TF 2 version provided
[here](https://github.com/hfawaz/dl-4-tsc/).
The following hyperparameters (kernel_size, filters, the usage of BatchNorm) were found
via random search using [KerasTuner](https://github.com/keras-team/keras-tuner).
"""
def make_model(input_shape):
input_layer = keras.layers.Input(input_shape)
conv1 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(
input_layer
)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.ReLU()(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(
conv1
)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.ReLU()(conv2)
conv3 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(
conv2
)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.ReLU()(conv3)
gap = keras.layers.GlobalAveragePooling1D()(conv3)
output_layer = keras.layers.Dense(num_classes, activation="softmax")(gap)
return keras.models.Model(inputs=input_layer, outputs=output_layer)
model = make_model(input_shape=x_train.shape[1:])
keras.utils.plot_model(model, show_shapes=True)
"""
## Train the model
"""
epochs = 500
batch_size = 32
callbacks = [
keras.callbacks.ModelCheckpoint(
"best_model.keras", save_best_only=True, monitor="val_loss"
),
keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=20, min_lr=0.0001
),
keras.callbacks.EarlyStopping(monitor="val_loss", patience=50, verbose=1),
]
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_split=0.2,
verbose=1,
)
"""
## Evaluate model on test data
"""
model = keras.models.load_model("best_model.keras")
test_loss, test_acc = model.evaluate(x_test, y_test)
print("Test accuracy", test_acc)
print("Test loss", test_loss)
"""
## Plot the model's training and validation loss
"""
metric = "sparse_categorical_accuracy"
plt.figure()
plt.plot(history.history[metric])
plt.plot(history.history["val_" + metric])
plt.title("model " + metric)
plt.ylabel(metric, fontsize="large")
plt.xlabel("epoch", fontsize="large")
plt.legend(["train", "val"], loc="best")
plt.show()
plt.close()
"""
We can see how the training accuracy reaches almost 0.95 after 100 epochs.
However, by observing the validation accuracy we can see how the network still needs
training until it reaches almost 0.97 for both the validation and the training accuracy
after 200 epochs. Beyond the 200th epoch, if we continue on training, the validation
accuracy will start decreasing while the training accuracy will continue on increasing:
the model starts overfitting.
"""
| keras-core/examples/keras_io/timeseries/timeseries_classification_from_scratch.py/0 | {
"file_path": "keras-core/examples/keras_io/timeseries/timeseries_classification_from_scratch.py",
"repo_id": "keras-core",
"token_count": 2314
} | 24 |
"""
Title: Object detection with Vision Transformers
Author: [Karan V. Dave](https://www.linkedin.com/in/karan-dave-811413164/)
Converted to Keras Core by: [Gabriel Rasskin](https://github.com/grasskin), [Soumik Rakshit](http://github.com/soumik12345)
Date created: 2022/03/27
Last modified: 2022/03/27
Description: A simple Keras implementation of object detection using Vision Transformers.
Accelerator: GPU
"""
"""
## Introduction
The article
[Vision Transformer (ViT)](https://arxiv.org/abs/2010.11929)
architecture by Alexey Dosovitskiy et al.
demonstrates that a pure transformer applied directly to sequences of image
patches can perform well on object detection tasks.
In this Keras example, we implement an object detection ViT
and we train it on the
[Caltech 101 dataset](http://www.vision.caltech.edu/datasets/)
to detect an airplane in the given image.
This example requires TensorFlow 2.4 or higher, and
[TensorFlow Addons](https://www.tensorflow.org/addons/overview),
from which we import the `AdamW` optimizer.
TensorFlow Addons can be installed via the following command:
```
pip install -U git+https://github.com/keras-team/keras-core
```
"""
"""
## Imports and setup
"""
import os
os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"]
import numpy as np
import keras_core as keras
from keras_core import layers
from keras_core import ops
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import scipy.io
import shutil
"""
## Prepare dataset
We use the [Caltech 101 Dataset](https://data.caltech.edu/records/mzrjq-6wc02).
"""
# Path to images and annotations
path_images = "/101_ObjectCategories/airplanes/"
path_annot = "/Annotations/Airplanes_Side_2/"
path_to_downloaded_file = keras.utils.get_file(
fname="caltech_101_zipped",
origin="https://data.caltech.edu/records/mzrjq-6wc02/files/caltech-101.zip",
extract=True,
archive_format="zip", # downloaded file format
cache_dir="/", # cache and extract in current directory
)
# Extracting tar files found inside main zip file
shutil.unpack_archive("/datasets/caltech-101/101_ObjectCategories.tar.gz", "/")
shutil.unpack_archive("/datasets/caltech-101/Annotations.tar", "/")
# list of paths to images and annotations
image_paths = [
f for f in os.listdir(path_images) if os.path.isfile(os.path.join(path_images, f))
]
annot_paths = [
f for f in os.listdir(path_annot) if os.path.isfile(os.path.join(path_annot, f))
]
image_paths.sort()
annot_paths.sort()
image_size = 224 # resize input images to this size
images, targets = [], []
# loop over the annotations and images, preprocess them and store in lists
for i in range(0, len(annot_paths)):
# Access bounding box coordinates
annot = scipy.io.loadmat(path_annot + annot_paths[i])["box_coord"][0]
top_left_x, top_left_y = annot[2], annot[0]
bottom_right_x, bottom_right_y = annot[3], annot[1]
image = keras.utils.load_img(
path_images + image_paths[i],
)
(w, h) = image.size[:2]
# resize train set images
if i < int(len(annot_paths) * 0.8):
# resize image if it is for training dataset
image = image.resize((image_size, image_size))
# convert image to array and append to list
images.append(keras.utils.img_to_array(image))
# apply relative scaling to bounding boxes as per given image and append to list
targets.append(
(
float(top_left_x) / w,
float(top_left_y) / h,
float(bottom_right_x) / w,
float(bottom_right_y) / h,
)
)
# Convert the list to numpy array, split to train and test dataset
(x_train), (y_train) = (
np.asarray(images[: int(len(images) * 0.8)]),
np.asarray(targets[: int(len(targets) * 0.8)]),
)
(x_test), (y_test) = (
np.asarray(images[int(len(images) * 0.8) :]),
np.asarray(targets[int(len(targets) * 0.8) :]),
)
"""
## Implement multilayer-perceptron (MLP)
We use the code from the Keras example
[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/)
as a reference.
"""
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=keras.activations.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
"""
## Implement the patch creation layer
"""
class Patches(layers.Layer):
def __init__(self, patch_size):
super().__init__()
self.patch_size = patch_size
def call(self, images):
input_shape = ops.shape(images)
batch_size = input_shape[0]
height = input_shape[1]
width = input_shape[2]
channels = input_shape[3]
num_patches_h = height // self.patch_size
num_patches_w = width // self.patch_size
patches = keras.ops.image.extract_patches(images, size=self.patch_size)
patches = ops.reshape(
patches,
(
batch_size,
num_patches_h * num_patches_w,
self.patch_size * self.patch_size * channels,
),
)
return patches
def get_config(self):
config = super().get_config()
config.update({"patch_size": self.patch_size})
return config
"""
## Display patches for an input image
"""
patch_size = 32 # Size of the patches to be extracted from the input images
plt.figure(figsize=(4, 4))
plt.imshow(x_train[0].astype("uint8"))
plt.axis("off")
patches = Patches(patch_size)(np.expand_dims(x_train[0], axis=0))
print(f"Image size: {image_size} X {image_size}")
print(f"Patch size: {patch_size} X {patch_size}")
print(f"{patches.shape[1]} patches per image \n{patches.shape[-1]} elements per patch")
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[0]):
ax = plt.subplot(n, n, i + 1)
patch_img = ops.reshape(patch, (patch_size, patch_size, 3))
plt.imshow(ops.convert_to_numpy(patch_img).astype("uint8"))
plt.axis("off")
"""
## Implement the patch encoding layer
The `PatchEncoder` layer linearly transforms a patch by projecting it into a
vector of size `projection_dim`. It also adds a learnable position
embedding to the projected vector.
"""
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
# Override function to avoid error while saving model
def get_config(self):
config = super().get_config().copy()
config.update(
{
"input_shape": input_shape,
"patch_size": patch_size,
"num_patches": num_patches,
"projection_dim": projection_dim,
"num_heads": num_heads,
"transformer_units": transformer_units,
"transformer_layers": transformer_layers,
"mlp_head_units": mlp_head_units,
}
)
return config
def call(self, patch):
positions = ops.expand_dims(
ops.arange(start=0, stop=self.num_patches, step=1), axis=0
)
projected_patches = self.projection(patch)
encoded = projected_patches + self.position_embedding(positions)
return encoded
"""
## Build the ViT model
The ViT model has multiple Transformer blocks.
The `MultiHeadAttention` layer is used for self-attention,
applied to the sequence of image patches. The encoded patches (skip connection)
and self-attention layer outputs are normalized and fed into a
multilayer perceptron (MLP).
The model outputs four dimensions representing
the bounding box coordinates of an object.
"""
def create_vit_object_detector(
input_shape,
patch_size,
num_patches,
projection_dim,
num_heads,
transformer_units,
transformer_layers,
mlp_head_units,
):
inputs = keras.Input(shape=input_shape)
# Create patches
patches = Patches(patch_size)(inputs)
# Encode patches
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.3)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.3)
bounding_box = layers.Dense(4)(
features
) # Final four neurons that output bounding box
# return Keras model.
return keras.Model(inputs=inputs, outputs=bounding_box)
"""
## Run the experiment
"""
def run_experiment(model, learning_rate, weight_decay, batch_size, num_epochs):
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
# Compile model.
model.compile(optimizer=optimizer, loss=keras.losses.MeanSquaredError())
checkpoint_filepath = "vit_object_detector.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[
checkpoint_callback,
keras.callbacks.EarlyStopping(monitor="val_loss", patience=10),
],
)
return history
input_shape = (image_size, image_size, 3) # input image shape
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 32
num_epochs = 15
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
# Size of the transformer layers
transformer_units = [
projection_dim * 2,
projection_dim,
]
transformer_layers = 4
mlp_head_units = [2048, 1024, 512, 64, 32] # Size of the dense layers
history = []
num_patches = (image_size // patch_size) ** 2
vit_object_detector = create_vit_object_detector(
input_shape,
patch_size,
num_patches,
projection_dim,
num_heads,
transformer_units,
transformer_layers,
mlp_head_units,
)
# Train model
history = run_experiment(
vit_object_detector, learning_rate, weight_decay, batch_size, num_epochs
)
def plot_history(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("loss")
"""
## Evaluate the model
"""
import matplotlib.patches as patches
# Saves the model in current path
vit_object_detector.save("vit_object_detector.keras")
# To calculate IoU (intersection over union, given two bounding boxes)
def bounding_box_intersection_over_union(box_predicted, box_truth):
# get (x, y) coordinates of intersection of bounding boxes
top_x_intersect = max(box_predicted[0], box_truth[0])
top_y_intersect = max(box_predicted[1], box_truth[1])
bottom_x_intersect = min(box_predicted[2], box_truth[2])
bottom_y_intersect = min(box_predicted[3], box_truth[3])
# calculate area of the intersection bb (bounding box)
intersection_area = max(0, bottom_x_intersect - top_x_intersect + 1) * max(
0, bottom_y_intersect - top_y_intersect + 1
)
# calculate area of the prediction bb and ground-truth bb
box_predicted_area = (box_predicted[2] - box_predicted[0] + 1) * (
box_predicted[3] - box_predicted[1] + 1
)
box_truth_area = (box_truth[2] - box_truth[0] + 1) * (
box_truth[3] - box_truth[1] + 1
)
# calculate intersection over union by taking intersection
# area and dividing it by the sum of predicted bb and ground truth
# bb areas subtracted by the interesection area
# return ioU
return intersection_area / float(
box_predicted_area + box_truth_area - intersection_area
)
i, mean_iou = 0, 0
# Compare results for 10 images in the test set
for input_image in x_test[:10]:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 15))
im = input_image
# Display the image
ax1.imshow(im.astype("uint8"))
ax2.imshow(im.astype("uint8"))
input_image = cv2.resize(
input_image, (image_size, image_size), interpolation=cv2.INTER_AREA
)
input_image = np.expand_dims(input_image, axis=0)
preds = vit_object_detector.predict(input_image)[0]
(h, w) = (im).shape[0:2]
top_left_x, top_left_y = int(preds[0] * w), int(preds[1] * h)
bottom_right_x, bottom_right_y = int(preds[2] * w), int(preds[3] * h)
box_predicted = [top_left_x, top_left_y, bottom_right_x, bottom_right_y]
# Create the bounding box
rect = patches.Rectangle(
(top_left_x, top_left_y),
bottom_right_x - top_left_x,
bottom_right_y - top_left_y,
facecolor="none",
edgecolor="red",
linewidth=1,
)
# Add the bounding box to the image
ax1.add_patch(rect)
ax1.set_xlabel(
"Predicted: "
+ str(top_left_x)
+ ", "
+ str(top_left_y)
+ ", "
+ str(bottom_right_x)
+ ", "
+ str(bottom_right_y)
)
top_left_x, top_left_y = int(y_test[i][0] * w), int(y_test[i][1] * h)
bottom_right_x, bottom_right_y = int(y_test[i][2] * w), int(y_test[i][3] * h)
box_truth = top_left_x, top_left_y, bottom_right_x, bottom_right_y
mean_iou += bounding_box_intersection_over_union(box_predicted, box_truth)
# Create the bounding box
rect = patches.Rectangle(
(top_left_x, top_left_y),
bottom_right_x - top_left_x,
bottom_right_y - top_left_y,
facecolor="none",
edgecolor="red",
linewidth=1,
)
# Add the bounding box to the image
ax2.add_patch(rect)
ax2.set_xlabel(
"Target: "
+ str(top_left_x)
+ ", "
+ str(top_left_y)
+ ", "
+ str(bottom_right_x)
+ ", "
+ str(bottom_right_y)
+ "\n"
+ "IoU"
+ str(bounding_box_intersection_over_union(box_predicted, box_truth))
)
i = i + 1
print("mean_iou: " + str(mean_iou / len(x_test[:10])))
plt.show()
"""
This example demonstrates that a pure Transformer can be trained
to predict the bounding boxes of an object in a given image,
thus extending the use of Transformers to object detection tasks.
The model can be improved further by tuning hyper-parameters and pre-training.
""" | keras-core/examples/keras_io/vision/object_detection_using_vision_transformer.py/0 | {
"file_path": "keras-core/examples/keras_io/vision/object_detection_using_vision_transformer.py",
"repo_id": "keras-core",
"token_count": 6402
} | 25 |
"""
Title: Understanding masking & padding
Authors: Scott Zhu, Francois Chollet
Date created: 2019/07/16
Last modified: 2023/06/25
Description: Complete guide to using mask-aware sequence layers in Keras.
Accelerator: None
"""
"""
## Setup
"""
import numpy as np
import keras_core as keras
from keras_core import ops
from keras_core import layers
"""
## Introduction
**Masking** is a way to tell sequence-processing layers that certain timesteps
in an input are missing, and thus should be skipped when processing the data.
**Padding** is a special form of masking where the masked steps are at the start or
the end of a sequence. Padding comes from the need to encode sequence data into
contiguous batches: in order to make all sequences in a batch fit a given standard
length, it is necessary to pad or truncate some sequences.
Let's take a close look.
"""
"""
## Padding sequence data
When processing sequence data, it is very common for individual samples to have
different lengths. Consider the following example (text tokenized as words):
```
[
["Hello", "world", "!"],
["How", "are", "you", "doing", "today"],
["The", "weather", "will", "be", "nice", "tomorrow"],
]
```
After vocabulary lookup, the data might be vectorized as integers, e.g.:
```
[
[71, 1331, 4231]
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
```
The data is a nested list where individual samples have length 3, 5, and 6,
respectively. Since the input data for a deep learning model must be a single tensor
(of shape e.g. `(batch_size, 6, vocab_size)` in this case), samples that are shorter
than the longest item need to be padded with some placeholder value (alternatively,
one might also truncate long samples before padding short samples).
Keras provides a utility function to truncate and pad Python lists to a common length:
`keras.utils.pad_sequences`.
"""
raw_inputs = [
[711, 632, 71],
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
# By default, this will pad using 0s; it is configurable via the
# "value" parameter.
# Note that you could use "pre" padding (at the beginning) or
# "post" padding (at the end).
# We recommend using "post" padding when working with RNN layers
# (in order to be able to use the
# CuDNN implementation of the layers).
padded_inputs = keras.utils.pad_sequences(raw_inputs, padding="post")
print(padded_inputs)
"""
## Masking
Now that all samples have a uniform length, the model must be informed that some part
of the data is actually padding and should be ignored. That mechanism is **masking**.
There are three ways to introduce input masks in Keras models:
- Add a `keras.layers.Masking` layer.
- Configure a `keras.layers.Embedding` layer with `mask_zero=True`.
- Pass a `mask` argument manually when calling layers that support this argument (e.g.
RNN layers).
"""
"""
## Mask-generating layers: `Embedding` and `Masking`
Under the hood, these layers will create a mask tensor (2D tensor with shape `(batch,
sequence_length)`), and attach it to the tensor output returned by the `Masking` or
`Embedding` layer.
"""
embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
masked_output = embedding(padded_inputs)
print(masked_output._keras_mask)
masking_layer = layers.Masking()
# Simulate the embedding lookup by expanding the 2D input to 3D,
# with embedding dimension of 10.
unmasked_embedding = ops.cast(
ops.tile(ops.expand_dims(padded_inputs, axis=-1), [1, 1, 10]),
dtype="float32",
)
masked_embedding = masking_layer(unmasked_embedding)
print(masked_embedding._keras_mask)
"""
As you can see from the printed result, the mask is a 2D boolean tensor with shape
`(batch_size, sequence_length)`, where each individual `False` entry indicates that
the corresponding timestep should be ignored during processing.
"""
"""
## Mask propagation in the Functional API and Sequential API
When using the Functional API or the Sequential API, a mask generated by an `Embedding`
or `Masking` layer will be propagated through the network for any layer that is
capable of using them (for example, RNN layers). Keras will automatically fetch the
mask corresponding to an input and pass it to any layer that knows how to use it.
For instance, in the following Sequential model, the `LSTM` layer will automatically
receive a mask, which means it will ignore padded values:
"""
model = keras.Sequential(
[
layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True),
layers.LSTM(32),
]
)
"""
This is also the case for the following Functional API model:
"""
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
outputs = layers.LSTM(32)(x)
model = keras.Model(inputs, outputs)
"""
## Passing mask tensors directly to layers
"""
"""
Layers that can handle masks (such as the `LSTM` layer) have a `mask` argument in their
`__call__` method.
Meanwhile, layers that produce a mask (e.g. `Embedding`) expose a `compute_mask(input,
previous_mask)` method which you can call.
Thus, you can pass the output of the `compute_mask()` method of a mask-producing layer
to the `__call__` method of a mask-consuming layer, like this:
"""
class MyLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.embedding = layers.Embedding(
input_dim=5000, output_dim=16, mask_zero=True
)
self.lstm = layers.LSTM(32)
def call(self, inputs):
x = self.embedding(inputs)
# Note that you could also prepare a `mask` tensor manually.
# It only needs to be a boolean tensor
# with the right shape, i.e. (batch_size, timesteps).
mask = self.embedding.compute_mask(inputs)
output = self.lstm(
x, mask=mask
) # The layer will ignore the masked values
return output
layer = MyLayer()
x = np.random.random((32, 10)) * 100
x = x.astype("int32")
layer(x)
"""
## Supporting masking in your custom layers
"""
"""
Sometimes, you may need to write layers that generate a mask (like `Embedding`), or
layers that need to modify the current mask.
For instance, any layer that produces a tensor with a different time dimension than its
input, such as a `Concatenate` layer that concatenates on the time dimension, will
need to modify the current mask so that downstream layers will be able to properly
take masked timesteps into account.
To do this, your layer should implement the `layer.compute_mask()` method, which
produces a new mask given the input and the current mask.
Here is an example of a `TemporalSplit` layer that needs to modify the current mask.
"""
class TemporalSplit(keras.layers.Layer):
"""Split the input tensor into 2 tensors along the time dimension."""
def call(self, inputs):
# Expect the input to be 3D and mask to be 2D, split the input tensor into 2
# subtensors along the time axis (axis 1).
return ops.split(inputs, 2, axis=1)
def compute_mask(self, inputs, mask=None):
# Also split the mask into 2 if it presents.
if mask is None:
return None
return ops.split(mask, 2, axis=1)
first_half, second_half = TemporalSplit()(masked_embedding)
print(first_half._keras_mask)
print(second_half._keras_mask)
"""
Here is another example of a `CustomEmbedding` layer that is capable of generating a
mask from input values:
"""
class CustomEmbedding(keras.layers.Layer):
def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs):
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.mask_zero = mask_zero
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer="random_normal",
dtype="float32",
)
def call(self, inputs):
inputs = ops.cast(inputs, "int32")
return ops.take(self.embeddings, inputs)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return ops.not_equal(inputs, 0)
layer = CustomEmbedding(10, 32, mask_zero=True)
x = np.random.random((3, 10)) * 9
x = x.astype("int32")
y = layer(x)
mask = layer.compute_mask(x)
print(mask)
"""
Note: For more details about format limitations related to masking, see the
[serialization guide](/guides/serialization_and_saving).
"""
"""
## Opting-in to mask propagation on compatible layers
Most layers don't modify the time dimension, so don't need to modify the current mask.
However, they may still want to be able to **propagate** the current mask, unchanged,
to the next layer. **This is an opt-in behavior.** By default, a custom layer will
destroy the current mask (since the framework has no way to tell whether propagating
the mask is safe to do).
If you have a custom layer that does not modify the time dimension, and if you want it
to be able to propagate the current input mask, you should set `self.supports_masking
= True` in the layer constructor. In this case, the default behavior of
`compute_mask()` is to just pass the current mask through.
Here's an example of a layer that is whitelisted for mask propagation:
"""
class MyActivation(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Signal that the layer is safe for mask propagation
self.supports_masking = True
def call(self, inputs):
return ops.relu(inputs)
"""
You can now use this custom layer in-between a mask-generating layer (like `Embedding`)
and a mask-consuming layer (like `LSTM`), and it will pass the mask along so that it
reaches the mask-consuming layer.
"""
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
x = MyActivation()(x) # Will pass the mask along
print("Mask found:", x._keras_mask)
outputs = layers.LSTM(32)(x) # Will receive the mask
model = keras.Model(inputs, outputs)
y = model(np.random.randint(0, 5000, size=(32, 100)))
"""
## Writing layers that need mask information
Some layers are mask *consumers*: they accept a `mask` argument in `call` and use it to
determine whether to skip certain time steps.
To write such a layer, you can simply add a `mask=None` argument in your `call`
signature. The mask associated with the inputs will be passed to your layer whenever
it is available.
Here's a simple example below: a layer that computes a softmax over the time dimension
(axis 1) of an input sequence, while discarding masked timesteps.
"""
class TemporalSoftmax(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
assert mask is not None
broadcast_float_mask = ops.expand_dims(ops.cast(mask, "float32"), -1)
inputs_exp = ops.exp(inputs) * broadcast_float_mask
inputs_sum = ops.sum(
inputs_exp * broadcast_float_mask, axis=-1, keepdims=True
)
return inputs_exp / inputs_sum
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=10, output_dim=32, mask_zero=True)(inputs)
x = layers.Dense(1)(x)
outputs = TemporalSoftmax()(x)
model = keras.Model(inputs, outputs)
y = model(np.random.randint(0, 10, size=(32, 100)))
"""
## Summary
That is all you need to know about padding & masking in Keras. To recap:
- "Masking" is how layers are able to know when to skip / ignore certain timesteps in
sequence inputs.
- Some layers are mask-generators: `Embedding` can generate a mask from input values
(if `mask_zero=True`), and so can the `Masking` layer.
- Some layers are mask-consumers: they expose a `mask` argument in their `__call__`
method. This is the case for RNN layers.
- In the Functional API and Sequential API, mask information is propagated
automatically.
- When using layers in a standalone way, you can pass the `mask` arguments to layers
manually.
- You can easily write layers that modify the current mask, that generate a new mask,
or that consume the mask associated with the inputs.
"""
| keras-core/guides/understanding_masking_and_padding.py/0 | {
"file_path": "keras-core/guides/understanding_masking_and_padding.py",
"repo_id": "keras-core",
"token_count": 4064
} | 26 |
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import testing
from keras_core.applications import convnext
from keras_core.applications import densenet
from keras_core.applications import efficientnet
from keras_core.applications import efficientnet_v2
from keras_core.applications import inception_resnet_v2
from keras_core.applications import inception_v3
from keras_core.applications import mobilenet
from keras_core.applications import mobilenet_v2
from keras_core.applications import mobilenet_v3
from keras_core.applications import nasnet
from keras_core.applications import resnet
from keras_core.applications import resnet_v2
from keras_core.applications import vgg16
from keras_core.applications import vgg19
from keras_core.applications import xception
from keras_core.saving import serialization_lib
from keras_core.utils import file_utils
from keras_core.utils import image_utils
try:
import PIL
except ImportError:
PIL = None
MODEL_LIST = [
# vgg
(vgg16.VGG16, 512, vgg16),
(vgg19.VGG19, 512, vgg19),
# xception
(xception.Xception, 2048, xception),
# inception
(inception_v3.InceptionV3, 2048, inception_v3),
(inception_resnet_v2.InceptionResNetV2, 1536, inception_resnet_v2),
# mobilenet
(mobilenet.MobileNet, 1024, mobilenet),
(mobilenet_v2.MobileNetV2, 1280, mobilenet_v2),
(mobilenet_v3.MobileNetV3Small, 576, mobilenet_v3),
(mobilenet_v3.MobileNetV3Large, 960, mobilenet_v3),
# efficientnet
(efficientnet.EfficientNetB0, 1280, efficientnet),
(efficientnet.EfficientNetB1, 1280, efficientnet),
(efficientnet.EfficientNetB2, 1408, efficientnet),
(efficientnet.EfficientNetB3, 1536, efficientnet),
(efficientnet.EfficientNetB4, 1792, efficientnet),
(efficientnet.EfficientNetB5, 2048, efficientnet),
(efficientnet.EfficientNetB6, 2304, efficientnet),
(efficientnet.EfficientNetB7, 2560, efficientnet),
(efficientnet_v2.EfficientNetV2B0, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2B1, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2B2, 1408, efficientnet_v2),
(efficientnet_v2.EfficientNetV2B3, 1536, efficientnet_v2),
(efficientnet_v2.EfficientNetV2S, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2M, 1280, efficientnet_v2),
(efficientnet_v2.EfficientNetV2L, 1280, efficientnet_v2),
# densenet
(densenet.DenseNet121, 1024, densenet),
(densenet.DenseNet169, 1664, densenet),
(densenet.DenseNet201, 1920, densenet),
# convnext
(convnext.ConvNeXtTiny, 768, convnext),
(convnext.ConvNeXtSmall, 768, convnext),
(convnext.ConvNeXtBase, 1024, convnext),
(convnext.ConvNeXtLarge, 1536, convnext),
(convnext.ConvNeXtXLarge, 2048, convnext),
# nasnet
(nasnet.NASNetMobile, 1056, nasnet),
(nasnet.NASNetLarge, 4032, nasnet),
# resnet
(resnet.ResNet50, 2048, resnet),
(resnet.ResNet101, 2048, resnet),
(resnet.ResNet152, 2048, resnet),
(resnet_v2.ResNet50V2, 2048, resnet_v2),
(resnet_v2.ResNet101V2, 2048, resnet_v2),
(resnet_v2.ResNet152V2, 2048, resnet_v2),
]
# Add names for `named_parameters`.
MODEL_LIST = [(e[0].__name__, *e) for e in MODEL_LIST]
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
TEST_IMAGE_PATH = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/tests/elephant.jpg"
)
if target_size[0] is None:
target_size = (299, 299)
test_image = file_utils.get_file("elephant.jpg", TEST_IMAGE_PATH)
img = image_utils.load_img(test_image, target_size=tuple(target_size))
x = image_utils.img_to_array(img)
return np.expand_dims(x, axis=0)
@pytest.mark.skipif(
os.environ.get("SKIP_APPLICATIONS_TESTS"),
reason="Env variable set to skip.",
)
@pytest.mark.requires_trainable_backend
class ApplicationsTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(MODEL_LIST)
def test_application_notop_variable_input_channels(self, app, last_dim, _):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
# Test compatibility with 1 channel
if backend.image_data_format() == "channels_first":
input_shape = (1, None, None)
else:
input_shape = (None, None, 1)
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, [None, None, None, last_dim])
# Test compatibility with 4 channels
if backend.image_data_format() == "channels_first":
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, [None, None, None, last_dim])
@parameterized.named_parameters(MODEL_LIST)
@pytest.mark.skipif(PIL is None, reason="Requires PIL.")
def test_application_base(self, app, _, app_module):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
# Can be instantiated with default arguments
model = app(weights="imagenet")
# Can run a correct inference on a test image
x = _get_elephant(model.input_shape[1:3])
x = app_module.preprocess_input(x)
preds = model.predict(x)
names = [p[1] for p in app_module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
self.assertIn("African_elephant", names[:3])
# Can be serialized and deserialized
config = serialization_lib.serialize_keras_object(model)
reconstructed_model = serialization_lib.deserialize_keras_object(config)
self.assertEqual(len(model.weights), len(reconstructed_model.weights))
@parameterized.named_parameters(MODEL_LIST)
def test_application_notop_custom_input_shape(self, app, last_dim, _):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
model = app(weights=None, include_top=False, input_shape=(123, 123, 3))
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape[-1], last_dim)
@parameterized.named_parameters(MODEL_LIST)
def test_application_pooling(self, app, last_dim, _):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
model = app(weights=None, include_top=False, pooling="max")
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, [None, last_dim])
@parameterized.named_parameters(MODEL_LIST)
def test_application_classifier_activation(self, app, *_):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
model = app(
weights=None, include_top=True, classifier_activation="softmax"
)
last_layer_act = model.layers[-1].activation.__name__
self.assertEqual(last_layer_act, "softmax")
| keras-core/keras_core/applications/applications_test.py/0 | {
"file_path": "keras-core/keras_core/applications/applications_test.py",
"repo_id": "keras-core",
"token_count": 3224
} | 27 |
import numpy as np
from keras_core import backend
from keras_core import initializers
from keras_core.backend.common.variables import AutocastScope
from keras_core.backend.common.variables import KerasVariable
from keras_core.backend.common.variables import standardize_shape
from keras_core.testing import test_case
class VariablesTest(test_case.TestCase):
def test_deferred_initialization(self):
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
# Variables can nevertheless be accessed
_ = v + 1
self.assertEqual(v._value.shape, (2, 2))
with self.assertRaisesRegex(ValueError, "while in a stateless scope"):
with backend.StatelessScope():
v = backend.Variable(initializer=0)
def test_deferred_assignment(self):
with backend.StatelessScope() as scope:
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
v.assign(np.zeros((2, 2)))
v.assign_add(2 * np.ones((2, 2)))
v.assign_sub(np.ones((2, 2)))
out = scope.get_current_value(v)
self.assertAllClose(out, np.ones((2, 2)))
def test_autocasting(self):
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
print("open scope")
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
# Test non-float variables are not affected
v = backend.Variable(
initializer=initializers.Ones(),
shape=(2, 2),
dtype="int32",
trainable=False,
)
self.assertEqual(v.dtype, "int32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
with AutocastScope("float16"):
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
def test_standardize_dtype_with_torch_dtype(self):
import torch
x = torch.randn(4, 4)
backend.standardize_dtype(x.dtype)
def test_name_validation(self):
# Test when name is not a string
with self.assertRaisesRegex(
ValueError, "Argument `name` must be a string"
):
KerasVariable(initializer=initializers.RandomNormal(), name=12345)
# Test when name contains a '/'
with self.assertRaisesRegex(ValueError, "cannot contain character `/`"):
KerasVariable(
initializer=initializers.RandomNormal(), name="invalid/name"
)
def test_standardize_shape_with_none(self):
with self.assertRaisesRegex(
ValueError, "Undefined shapes are not supported."
):
standardize_shape(None)
def test_standardize_shape_with_non_iterable(self):
with self.assertRaisesRegex(
ValueError, "Cannot convert '42' to a shape."
):
standardize_shape(42)
def test_standardize_shape_with_valid_input(self):
shape = [3, 4, 5]
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
# TODO
# (3.9,torch) FAILED keras_core/backend/common/variables_test.py
# ::VariablesTest::test_standardize_shape_with_non_integer_entry:
# - AssertionError "Cannot convert '\(3, 4, 'a'\)' to a shape.
# " does not match "invalid literal for int() with base 10: 'a'"
# def test_standardize_shape_with_non_integer_entry(self):
# with self.assertRaisesRegex(
# ValueError,
# "Cannot convert '\\(3, 4, 'a'\\)' to a shape. Found invalid",
# ):
# standardize_shape([3, 4, "a"])
def test_standardize_shape_with_negative_entry(self):
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, -5\\)' to a shape. Negative dimensions",
):
standardize_shape([3, 4, -5])
def test_autocast_scope_with_non_float_dtype(self):
with self.assertRaisesRegex(
ValueError,
"`AutocastScope` can only be used with a floating-point",
):
_ = AutocastScope("int32")
| keras-core/keras_core/backend/common/variables_test.py/0 | {
"file_path": "keras-core/keras_core/backend/common/variables_test.py",
"repo_id": "keras-core",
"token_count": 2145
} | 28 |
import jax
import numpy as np
from keras_core.backend.numpy.core import convert_to_tensor
from keras_core.utils.module_utils import scipy
RESIZE_INTERPOLATIONS = (
"bilinear",
"nearest",
"lanczos3",
"lanczos5",
"bicubic",
)
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
data_format="channels_last",
):
if interpolation not in RESIZE_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
)
if not len(size) == 2:
raise ValueError(
"Argument `size` must be a tuple of two elements "
f"(height, width). Received: size={size}"
)
size = tuple(size)
if len(image.shape) == 4:
if data_format == "channels_last":
size = (image.shape[0],) + size + (image.shape[-1],)
else:
size = (image.shape[0], image.shape[1]) + size
elif len(image.shape) == 3:
if data_format == "channels_last":
size = size + (image.shape[-1],)
else:
size = (image.shape[0],) + size
else:
raise ValueError(
"Invalid input rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
return np.array(
jax.image.resize(image, size, method=interpolation, antialias=antialias)
)
AFFINE_TRANSFORM_INTERPOLATIONS = { # map to order
"nearest": 0,
"bilinear": 1,
}
AFFINE_TRANSFORM_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
def affine_transform(
image,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
transform = convert_to_tensor(transform)
if len(image.shape) not in (3, 4):
raise ValueError(
"Invalid image rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
# scipy.ndimage.map_coordinates lacks support for half precision.
input_dtype = image.dtype
if input_dtype == "float16":
image = image.astype("float32")
# unbatched case
need_squeeze = False
if len(image.shape) == 3:
image = np.expand_dims(image, axis=0)
need_squeeze = True
if len(transform.shape) == 1:
transform = np.expand_dims(transform, axis=0)
if data_format == "channels_first":
image = np.transpose(image, (0, 2, 3, 1))
batch_size = image.shape[0]
# get indices
meshgrid = np.meshgrid(
*[np.arange(size) for size in image.shape[1:]], indexing="ij"
)
indices = np.concatenate(
[np.expand_dims(x, axis=-1) for x in meshgrid], axis=-1
)
indices = np.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
a0 = transform[:, 0].copy()
a2 = transform[:, 2].copy()
b1 = transform[:, 4].copy()
b2 = transform[:, 5].copy()
transform[:, 0] = b1
transform[:, 2] = b2
transform[:, 4] = a0
transform[:, 5] = a2
# deal with transform
transform = np.pad(transform, pad_width=[[0, 0], [0, 1]], constant_values=1)
transform = np.reshape(transform, (batch_size, 3, 3))
offset = transform[:, 0:2, 2].copy()
offset = np.pad(offset, pad_width=[[0, 0], [0, 1]])
transform[:, 0:2, 2] = 0
# transform the indices
coordinates = np.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = np.moveaxis(coordinates, source=-1, destination=1)
coordinates += np.reshape(a=offset, newshape=(*offset.shape, 1, 1, 1))
# apply affine transformation
affined = np.stack(
[
map_coordinates(
image[i],
coordinates[i],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
fill_value=fill_value,
)
for i in range(batch_size)
],
axis=0,
)
if data_format == "channels_first":
affined = np.transpose(affined, (0, 3, 1, 2))
if need_squeeze:
affined = np.squeeze(affined, axis=0)
if input_dtype == "float16":
affined = affined.astype(input_dtype)
return affined
MAP_COORDINATES_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
def map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0.0
):
if fill_mode not in MAP_COORDINATES_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected one of "
f"{set(MAP_COORDINATES_FILL_MODES.keys())}. Received: "
f"fill_mode={fill_mode}"
)
if order not in range(2):
raise ValueError(
"Invalid value for argument `order`. Expected one of "
f"{[0, 1]}. Received: order={order}"
)
# SciPy's implementation of map_coordinates handles boundaries incorrectly,
# unless mode='reflect'. For order=1, this only affects interpolation
# outside the bounds of the original array.
# https://github.com/scipy/scipy/issues/2640
padding = [
(
max(-np.floor(c.min()).astype(int) + 1, 0),
max(np.ceil(c.max()).astype(int) + 1 - size, 0),
)
for c, size in zip(coordinates, input.shape)
]
shifted_coords = [c + p[0] for p, c in zip(padding, coordinates)]
pad_mode = {
"nearest": "edge",
"mirror": "reflect",
"reflect": "symmetric",
}.get(fill_mode, fill_mode)
if fill_mode == "constant":
padded = np.pad(
input, padding, mode=pad_mode, constant_values=fill_value
)
else:
padded = np.pad(input, padding, mode=pad_mode)
result = scipy.ndimage.map_coordinates(
padded, shifted_coords, order=order, mode=fill_mode, cval=fill_value
)
return result
| keras-core/keras_core/backend/numpy/image.py/0 | {
"file_path": "keras-core/keras_core/backend/numpy/image.py",
"repo_id": "keras-core",
"token_count": 3129
} | 29 |
import builtins
import functools
import math
import warnings
import tensorflow as tf
from tensorflow.experimental import numpy as tfnp
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from keras_core.backend import config
from keras_core.backend.tensorflow.core import convert_to_tensor
def add(x1, x2):
if isinstance(x1, tf.SparseTensor) or isinstance(x2, tf.SparseTensor):
return tf.sparse.add(x1, x2)
return tfnp.add(x1, x2)
def bincount(x, weights=None, minlength=None):
if minlength is not None:
x = tf.cast(x, tf.int32)
if isinstance(x, tf.SparseTensor):
result = tf.sparse.bincount(
x,
weights=weights,
minlength=minlength,
axis=-1,
)
if x.shape.rank == 1:
output_shape = (minlength,)
else:
batch_size = tf.shape(result)[0]
output_shape = (batch_size, minlength)
return tf.SparseTensor(
indices=result.indices,
values=result.values,
dense_shape=output_shape,
)
return tf.math.bincount(x, weights=weights, minlength=minlength, axis=-1)
def einsum(subscripts, *operands, **kwargs):
return tfnp.einsum(subscripts, *operands, **kwargs)
def subtract(x1, x2):
if isinstance(x1, tf.SparseTensor) or isinstance(x2, tf.SparseTensor):
if isinstance(x2, tf.SparseTensor):
return tf.sparse.add(x1, tf.sparse.map_values(tf.negative, x2))
else:
return tf.sparse.add(x1, tf.negative(x2))
return tfnp.subtract(x1, x2)
def matmul(x1, x2):
def with_combined_batch_dimensions(a, b, fn_3d):
batch_shape = (
b.shape[:-2] if isinstance(b, tf.SparseTensor) else a.shape[:-2]
)
batch_size = math.prod(batch_shape)
a_3d = reshape(a, [batch_size] + a.shape[-2:])
b_3d = reshape(b, [batch_size] + b.shape[-2:])
result = fn_3d(a_3d, b_3d)
return reshape(result, batch_shape + result.shape[1:])
def sparse_sparse_matmul(a, b):
dtype = a.values.dtype
# Convert SparseTensors to CSR SparseMatrix.
a_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
a.indices, a.values, a.dense_shape
)
b_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
b.indices, b.values, b.dense_shape
)
# Compute the CSR SparseMatrix matrix multiplication.
result_csr = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a_csr, b_csr, dtype
)
# Convert the CSR SparseMatrix to a SparseTensor.
res = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
result_csr, dtype
)
return tf.SparseTensor(res.indices, res.values, res.dense_shape)
def embedding_lookup_sparse_dense_matmul(a, b):
# We need at least one id per rows for embedding_lookup_sparse,
# otherwise there will be missing rows in the output.
a, _ = tf.sparse.fill_empty_rows(a, 0)
# We need to split x1 into separate ids and weights tensors. The ids
# should be the column indices of x1 and the values of the weights
# can continue to be the actual x1. The column arrangement of ids
# and weights does not matter as we sum over columns. See details in
# the documentation for sparse_ops.sparse_tensor_dense_matmul.
ids = tf.SparseTensor(
indices=a.indices,
values=a.indices[:, 1],
dense_shape=a.dense_shape,
)
return tf.nn.embedding_lookup_sparse(b, ids, a, combiner="sum")
# Either a or b is sparse
def sparse_dense_matmul_3d(a, b):
return tf.map_fn(
lambda x: tf.sparse.sparse_dense_matmul(x[0], x[1]),
elems=(a, b),
fn_output_signature=a.dtype,
)
x1_sparse = isinstance(x1, tf.SparseTensor)
x2_sparse = isinstance(x2, tf.SparseTensor)
if x1_sparse and x2_sparse:
if x1.shape.rank <= 3:
return sparse_sparse_matmul(x1, x2)
else:
return with_combined_batch_dimensions(x1, x2, sparse_sparse_matmul)
elif x1_sparse or x2_sparse:
# Sparse * dense or dense * sparse
sparse_rank = x1.shape.rank if x1_sparse else x2.shape.rank
# Special case: embedding_lookup_sparse for sparse * dense and rank 2
if x1_sparse and sparse_rank == 2:
return embedding_lookup_sparse_dense_matmul(x1, x2)
elif sparse_rank == 2:
return tf.sparse.sparse_dense_matmul(x1, x2)
elif sparse_rank == 3:
return sparse_dense_matmul_3d(x1, x2)
else:
return with_combined_batch_dimensions(
x1, x2, sparse_dense_matmul_3d
)
return tfnp.matmul(x1, x2)
def multiply(x1, x2):
if isinstance(x1, tf.SparseTensor):
if isinstance(x2, tf.SparseTensor):
ones_like_int8 = functools.partial(tf.ones_like, dtype=tf.int8)
zeros_like_int8 = functools.partial(tf.zeros_like, dtype=tf.int8)
# compute the intersection of indices in the form of a sparse tensor
# containing ones as values
ones1 = tf.sparse.map_values(ones_like_int8, x1)
ones2 = tf.sparse.map_values(ones_like_int8, x2)
# tf.sets.intersection ignores the last dimension when comparing,
# so we need to add a dummy extra dimension and then remove it
intersection = tf.sparse.reshape(
tf.sets.intersection(
tf.sparse.expand_dims(ones1, axis=-1),
tf.sparse.expand_dims(ones2, axis=-1),
),
x1.dense_shape,
)
# compute the masks to remove indices in x1 and x2 that are not part
# of the intersection, then trim x1 and x2
zeros1 = tf.sparse.map_values(zeros_like_int8, x1)
zeros2 = tf.sparse.map_values(zeros_like_int8, x2)
mask1 = tf.sparse.add(zeros1, intersection)
mask2 = tf.sparse.add(zeros2, intersection)
x1_trimmed = tf.sparse.retain(x1, tf.cast(mask1.values, tf.bool))
x2_trimmed = tf.sparse.retain(x2, tf.cast(mask2.values, tf.bool))
# now it is an element-wise multiplication on the values
return tf.sparse.map_values(tf.multiply, x1_trimmed, x2_trimmed)
else:
return x1 * x2
elif isinstance(x2, tf.SparseTensor):
return x2 * x1
return tfnp.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
return tfnp.mean(x, axis=axis, keepdims=keepdims)
def max(x, axis=None, keepdims=False, initial=None):
# The TensorFlow numpy API implementation doesn't support `initial` so we
# handle it manually here.
if initial is not None:
return tf.math.maximum(
tfnp.max(x, axis=axis, keepdims=keepdims), initial
)
# TensorFlow returns -inf by default for an empty list, but for consistency
# with other backends and the numpy API we want to throw in this case.
if tf.executing_eagerly():
size_x = size(x)
tf.assert_greater(
size_x,
tf.constant(0, dtype=size_x.dtype),
message="Cannot compute the max of an empty tensor.",
)
return tfnp.max(x, axis=axis, keepdims=keepdims)
def ones(shape, dtype="float32"):
return tf.ones(shape, dtype=dtype)
def zeros(shape, dtype="float32"):
return tf.zeros(shape, dtype=dtype)
def absolute(x):
return tfnp.absolute(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
return tfnp.all(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
return tfnp.any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
return tfnp.amax(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
return tfnp.amin(x, axis=axis, keepdims=keepdims)
def append(
x1,
x2,
axis=None,
):
return tfnp.append(x1, x2, axis=axis)
def arange(start, stop=None, step=1, dtype=None):
# tfnp.arange has trouble with dynamic Tensors in compiled function.
# tf.range does not.
if dtype is None:
if hasattr(start, "dtype"):
dtype = start.dtype
elif isinstance(start, int):
dtype = "int32"
else:
dtype = config.floatx()
return tf.range(start, stop, delta=step, dtype=dtype)
def arccos(x):
return tfnp.arccos(x)
def arccosh(x):
return tfnp.arccosh(x)
def arcsin(x):
return tfnp.arcsin(x)
def arcsinh(x):
return tfnp.arcsinh(x)
def arctan(x):
return tfnp.arctan(x)
def arctan2(x1, x2):
return tfnp.arctan2(x1, x2)
def arctanh(x):
return tfnp.arctanh(x)
def argmax(x, axis=None):
return tfnp.argmax(x, axis=axis)
def argmin(x, axis=None):
return tfnp.argmin(x, axis=axis)
def argsort(x, axis=-1):
return tfnp.argsort(x, axis=axis)
def array(x, dtype=None):
return tfnp.array(x, dtype=dtype)
def average(x, axis=None, weights=None):
if not isinstance(axis, (list, tuple)):
axis = (axis,)
for a in axis:
# `tfnp.average` does not handle multiple axes.
x = tfnp.average(x, weights=weights, axis=a)
return x
def broadcast_to(x, shape):
return tfnp.broadcast_to(x, shape)
def ceil(x):
return tfnp.ceil(x)
def clip(x, x_min, x_max):
return tfnp.clip(x, x_min, x_max)
def concatenate(xs, axis=0):
sparse_count = builtins.sum(isinstance(x, tf.SparseTensor) for x in xs)
if sparse_count:
if sparse_count == len(xs):
return tf.sparse.concat(axis=axis, sp_inputs=xs)
else:
xs = [
tf.sparse.to_dense(x) if isinstance(x, tf.SparseTensor) else x
for x in xs
]
return tfnp.concatenate(xs, axis=axis)
def conjugate(x):
return tfnp.conjugate(x)
def conj(x):
return conjugate(x)
def copy(x):
return tfnp.copy(x)
def cos(x):
return tfnp.cos(x)
def cosh(x):
return tfnp.cosh(x)
def count_nonzero(x, axis=None):
return tfnp.count_nonzero(x, axis=axis)
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
return tfnp.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
def cumprod(x, axis=None):
return tfnp.cumprod(x, axis=axis)
def cumsum(x, axis=None):
return tfnp.cumsum(x, axis=axis)
def diag(x, k=0):
return tfnp.diag(x, k=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
return tfnp.diagonal(
x,
offset=offset,
axis1=axis1,
axis2=axis2,
)
def digitize(x, bins):
bins = list(bins)
if isinstance(x, tf.RaggedTensor):
return tf.ragged.map_flat_values(
lambda y: tf.raw_ops.Bucketize(input=y, boundaries=bins), x
)
elif isinstance(x, tf.SparseTensor):
return tf.SparseTensor(
indices=tf.identity(x.indices),
values=tf.raw_ops.Bucketize(input=x.values, boundaries=bins),
dense_shape=tf.identity(x.dense_shape),
)
x = convert_to_tensor(x)
return tf.raw_ops.Bucketize(input=x, boundaries=bins)
def dot(x, y):
return tfnp.dot(x, y)
def empty(shape, dtype="float32"):
return tfnp.empty(shape, dtype=dtype)
def equal(x1, x2):
return tfnp.equal(x1, x2)
def exp(x):
return tfnp.exp(x)
def expand_dims(x, axis):
if isinstance(x, tf.SparseTensor):
return tf.sparse.expand_dims(x, axis)
return tfnp.expand_dims(x, axis)
def expm1(x):
return tfnp.expm1(x)
def flip(x, axis=None):
return tfnp.flip(x, axis=axis)
def floor(x):
return tfnp.floor(x)
def full(shape, fill_value, dtype=None):
return tfnp.full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, dtype=None):
return tfnp.full_like(x, fill_value, dtype=dtype)
def greater(x1, x2):
return tfnp.greater(x1, x2)
def greater_equal(x1, x2):
return tfnp.greater_equal(x1, x2)
def hstack(xs):
return tfnp.hstack(xs)
def identity(n, dtype="float32"):
return tfnp.identity(n, dtype=dtype)
def imag(x):
return tfnp.imag(x)
def isclose(x1, x2):
return tfnp.isclose(x1, x2)
def isfinite(x):
return tfnp.isfinite(x)
def isinf(x):
return tfnp.isinf(x)
def isnan(x):
return tfnp.isnan(x)
def less(x1, x2):
return tfnp.less(x1, x2)
def less_equal(x1, x2):
return tfnp.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
return tfnp.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
def log(x):
return tfnp.log(x)
def log10(x):
return tfnp.log10(x)
def log1p(x):
return tfnp.log1p(x)
def log2(x):
return tfnp.log2(x)
def logaddexp(x1, x2):
return tfnp.logaddexp(x1, x2)
def logical_and(x1, x2):
return tfnp.logical_and(x1, x2)
def logical_not(x):
return tfnp.logical_not(x)
def logical_or(x1, x2):
return tfnp.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
return tfnp.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
def maximum(x1, x2):
if isinstance(x1, tf.SparseTensor):
if isinstance(x2, tf.SparseTensor):
return tf.sparse.maximum(x1, x2)
else:
x1 = tf.sparse.to_dense(x1)
elif isinstance(x2, tf.SparseTensor):
x2 = tf.sparse.to_dense(x2)
return tfnp.maximum(x1, x2)
def meshgrid(*x, indexing="xy"):
return tfnp.meshgrid(*x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
# The TensorFlow numpy API implementation doesn't support `initial` so we
# handle it manually here.
if initial is not None:
return tf.math.minimum(
tfnp.min(x, axis=axis, keepdims=keepdims), initial
)
# TensorFlow returns inf by default for an empty list, but for consistency
# with other backends and the numpy API we want to throw in this case.
if tf.executing_eagerly():
size_x = size(x)
tf.assert_greater(
size_x,
tf.constant(0, dtype=size_x.dtype),
message="Cannot compute the min of an empty tensor.",
)
return tfnp.min(x, axis=axis, keepdims=keepdims)
def minimum(x1, x2):
if isinstance(x1, tf.SparseTensor):
if isinstance(x2, tf.SparseTensor):
return tf.sparse.minimum(x1, x2)
else:
x1 = tf.sparse.to_dense(x1)
elif isinstance(x2, tf.SparseTensor):
x2 = tf.sparse.to_dense(x2)
return tfnp.minimum(x1, x2)
def mod(x1, x2):
return tfnp.mod(x1, x2)
def moveaxis(x, source, destination):
return tfnp.moveaxis(x, source=source, destination=destination)
def nan_to_num(x):
# Replace NaN with 0
x = tf.where(tf.math.is_nan(x), 0, x)
# Replace positive infinitiy with dtype.max
x = tf.where(tf.math.is_inf(x) & (x > 0), x.dtype.max, x)
# Replace negative infinity with dtype.min
x = tf.where(tf.math.is_inf(x) & (x < 0), x.dtype.min, x)
return x
def ndim(x):
return tfnp.ndim(x)
def nonzero(x):
return tfnp.nonzero(x)
def not_equal(x1, x2):
return tfnp.not_equal(x1, x2)
def ones_like(x, dtype=None):
return tfnp.ones_like(x, dtype=dtype)
def zeros_like(x, dtype=None):
return tf.zeros_like(x, dtype=dtype)
def outer(x1, x2):
return tfnp.outer(x1, x2)
def pad(x, pad_width, mode="constant"):
return tfnp.pad(x, pad_width, mode=mode)
def prod(x, axis=None, keepdims=False, dtype=None):
return tfnp.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
def ravel(x):
return tfnp.ravel(x)
def real(x):
return tfnp.real(x)
def reciprocal(x):
return tfnp.reciprocal(x)
def repeat(x, repeats, axis=None):
# tfnp.repeat has trouble with dynamic Tensors in compiled function.
# tf.repeat does not.
return tf.repeat(x, repeats, axis=axis)
def reshape(x, new_shape):
if isinstance(x, tf.SparseTensor):
return tf.sparse.reshape(x, new_shape)
return tfnp.reshape(x, new_shape)
def roll(x, shift, axis=None):
return tfnp.roll(x, shift, axis=axis)
def sign(x):
return tfnp.sign(x)
def sin(x):
return tfnp.sin(x)
def sinh(x):
return tfnp.sinh(x)
def size(x):
return tfnp.size(x)
def sort(x, axis=-1):
return tfnp.sort(x, axis=axis)
def split(x, indices_or_sections, axis=0):
return tfnp.split(x, indices_or_sections, axis=axis)
def stack(x, axis=0):
return tfnp.stack(x, axis=axis)
def std(x, axis=None, keepdims=False):
return tfnp.std(x, axis=axis, keepdims=keepdims)
def swapaxes(x, axis1, axis2):
return tfnp.swapaxes(x, axis1=axis1, axis2=axis2)
def take(x, indices, axis=None):
if isinstance(indices, tf.SparseTensor):
if x.dtype not in (tf.float16, tf.float32, tf.float64, tf.bfloat16):
warnings.warn(
"`take` with the TensorFlow backend does not support "
f"`x.dtype={x.dtype}` when `indices` is a sparse tensor; "
"densifying `indices`."
)
return tfnp.take(x, tf.sparse.to_dense(indices), axis=axis)
if axis is None:
x = tf.reshape(x, (-1,))
elif axis != 0:
warnings.warn(
"`take` with the TensorFlow backend does not support "
f"`axis={axis}` when `indices` is a sparse tensor; "
"densifying `indices`."
)
return tfnp.take(x, tf.sparse.to_dense(indices), axis=axis)
return tf.nn.safe_embedding_lookup_sparse(
embedding_weights=x,
sparse_ids=tf.sparse.expand_dims(indices, axis=-1),
default_id=0,
)
return tfnp.take(x, indices, axis=axis)
def take_along_axis(x, indices, axis=None):
return tfnp.take_along_axis(x, indices, axis=axis)
def tan(x):
return tfnp.tan(x)
def tanh(x):
return tfnp.tanh(x)
def tensordot(x1, x2, axes=2):
return tfnp.tensordot(x1, x2, axes=axes)
def round(x, decimals=0):
return tfnp.round(x, decimals=decimals)
def tile(x, repeats):
# The TFNP implementation is buggy, we roll our own.
x = convert_to_tensor(x)
repeats = tf.reshape(convert_to_tensor(repeats, dtype="int32"), [-1])
repeats_size = tf.size(repeats)
repeats = tf.pad(
repeats,
[[tf.maximum(x.shape.rank - repeats_size, 0), 0]],
constant_values=1,
)
x_shape = tf.pad(
tf.shape(x),
[[tf.maximum(repeats_size - x.shape.rank, 0), 0]],
constant_values=1,
)
x = tf.reshape(x, x_shape)
return tf.tile(x, repeats)
def trace(x, offset=0, axis1=0, axis2=1):
return tfnp.trace(x, offset=offset, axis1=axis1, axis2=axis2)
def tri(N, M=None, k=0, dtype="float32"):
return tfnp.tri(N, M=M, k=k, dtype=dtype)
def tril(x, k=0):
return tfnp.tril(x, k=k)
def triu(x, k=0):
return tfnp.triu(x, k=k)
def vdot(x1, x2):
return tfnp.vdot(x1, x2)
def vstack(xs):
return tfnp.vstack(xs)
def where(condition, x1, x2):
return tfnp.where(condition, x1, x2)
def divide(x1, x2):
return tfnp.divide(x1, x2)
def true_divide(x1, x2):
return tfnp.true_divide(x1, x2)
def power(x1, x2):
return tfnp.power(x1, x2)
def negative(x):
return tfnp.negative(x)
def square(x):
return tfnp.square(x)
def sqrt(x):
x = convert_to_tensor(x)
if tf.as_dtype(x.dtype).is_integer:
x = tf.cast(x, dtype=config.floatx())
return tfnp.sqrt(x)
def squeeze(x, axis=None):
if isinstance(x, tf.SparseTensor):
new_shape = list(x.shape)
gather_indices = list(range(len(new_shape)))
if axis is None:
for i in range(len(new_shape) - 1, -1, -1):
if new_shape[i] == 1:
del new_shape[i]
del gather_indices[i]
else:
if new_shape[axis] != 1:
raise ValueError(
f"Cannot squeeze axis {axis}, because the "
"dimension is not 1."
)
del new_shape[axis]
del gather_indices[axis]
new_indices = tf.gather(x.indices, gather_indices, axis=1)
return tf.SparseTensor(new_indices, x.values, tuple(new_shape))
return tfnp.squeeze(x, axis=axis)
def transpose(x, axes=None):
if isinstance(x, tf.SparseTensor):
return tf.sparse.transpose(x, perm=axes)
return tfnp.transpose(x, axes=axes)
def var(x, axis=None, keepdims=False):
return tfnp.var(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
return tfnp.sum(x, axis=axis, keepdims=keepdims)
def eye(N, M=None, k=0, dtype="float32"):
return tfnp.eye(N, M=M, k=k, dtype=dtype)
def floor_divide(x1, x2):
return tfnp.floor_divide(x1, x2)
def logical_xor(x1, x2):
return tfnp.logical_xor(x1, x2)
| keras-core/keras_core/backend/tensorflow/numpy.py/0 | {
"file_path": "keras-core/keras_core/backend/tensorflow/numpy.py",
"repo_id": "keras-core",
"token_count": 10493
} | 30 |
from keras_core.backend.torch.optimizers.torch_optimizer import TorchOptimizer
| keras-core/keras_core/backend/torch/optimizers/__init__.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/optimizers/__init__.py",
"repo_id": "keras-core",
"token_count": 26
} | 31 |
"""CIFAR10 small images classification dataset."""
import os
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.datasets.cifar import load_batch
from keras_core.utils.file_utils import get_file
@keras_core_export("keras_core.datasets.cifar10.load_data")
def load_data():
"""Loads the CIFAR10 dataset.
This is a dataset of 50,000 32x32 color training images and 10,000 test
images, labeled over 10 categories. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
The classes are:
| Label | Description |
|:-----:|-------------|
| 0 | airplane |
| 1 | automobile |
| 2 | bird |
| 3 | cat |
| 4 | deer |
| 5 | dog |
| 6 | frog |
| 7 | horse |
| 8 | ship |
| 9 | truck |
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
dirname = "cifar-10-batches-py"
origin = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
untar=True,
file_hash=( # noqa: E501
"6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
),
)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype="uint8")
y_train = np.empty((num_train_samples,), dtype="uint8")
for i in range(1, 6):
fpath = os.path.join(path, "data_batch_" + str(i))
(
x_train[(i - 1) * 10000 : i * 10000, :, :, :],
y_train[(i - 1) * 10000 : i * 10000],
) = load_batch(fpath)
fpath = os.path.join(path, "test_batch")
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
x_test = x_test.astype(x_train.dtype)
y_test = y_test.astype(y_train.dtype)
return (x_train, y_train), (x_test, y_test)
| keras-core/keras_core/datasets/cifar10.py/0 | {
"file_path": "keras-core/keras_core/datasets/cifar10.py",
"repo_id": "keras-core",
"token_count": 1444
} | 32 |
import math
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.backend import random
from keras_core.initializers.initializer import Initializer
from keras_core.saving import serialization_lib
@keras_core_export(
[
"keras_core.initializers.RandomNormal",
"keras_core.initializers.random_normal",
]
)
class RandomNormal(Initializer):
"""Random normal initializer.
Draws samples from a normal distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomNormal(mean=0.0, stddev=1.0)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self._init_seed = seed
self.seed = seed or random.make_default_seed()
super().__init__()
def __call__(self, shape, dtype=None):
return random.normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
seed_config = serialization_lib.serialize_keras_object(self._init_seed)
return {"mean": self.mean, "stddev": self.stddev, "seed": seed_config}
@keras_core_export(
[
"keras_core.initializers.TruncatedNormal",
"keras_core.initializers.truncated_normal",
]
)
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
The values generated are similar to values from a
`RandomNormal` initializer, except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = TruncatedNormal(mean=0., stddev=1.)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
mean: A python scalar or a scalar keras tensor. Mean of the random
values to generate.
stddev: A python scalar or a scalar keras tensor. Standard deviation of
the random values to generate.
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self._init_seed = seed
self.seed = seed or random.make_default_seed()
super().__init__()
def __call__(self, shape, dtype=None):
return random.truncated_normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
seed_config = serialization_lib.serialize_keras_object(self._init_seed)
return {"mean": self.mean, "stddev": self.stddev, "seed": seed_config}
@keras_core_export(
[
"keras_core.initializers.RandomUniform",
"keras_core.initializers.random_uniform",
]
)
class RandomUniform(Initializer):
"""Random uniform initializer.
Draws samples from a uniform distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar keras tensor. Lower bound of the
range of random values to generate (inclusive).
maxval: A python scalar or a scalar keras tensor. Upper bound of the
range of random values to generate (exclusive).
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self._init_seed = seed
self.seed = seed or random.make_default_seed()
super().__init__()
def __call__(self, shape, dtype=None):
return random.uniform(
shape=shape,
minval=self.minval,
maxval=self.maxval,
seed=self.seed,
dtype=dtype,
)
def get_config(self):
seed_config = serialization_lib.serialize_keras_object(self._init_seed)
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": seed_config,
}
@keras_core_export(
[
"keras_core.initializers.VarianceScaling",
"keras_core.initializers.variance_scaling",
]
)
class VarianceScaling(Initializer):
"""Initializer that adapts its scale to the shape of its input tensors.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero
and a standard deviation (after truncation, if used) `stddev = sqrt(scale /
n)`, where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = VarianceScaling(
scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of `"fan_in"`, `"fan_out"`, `"fan_avg"`.
distribution: Random distribution to use.
One of `"truncated_normal"`, `"untruncated_normal"`, or `"uniform"`.
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
"""
def __init__(
self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None,
):
if scale <= 0.0:
raise ValueError(
"Argument `scale` must be positive float. "
f"Received: scale={scale}"
)
allowed_modes = {"fan_in", "fan_out", "fan_avg"}
if mode not in allowed_modes:
raise ValueError(
f"Invalid `mode` argument: {mode}. "
f"Please use one of {allowed_modes}"
)
distribution = distribution.lower()
if distribution == "normal":
distribution = "truncated_normal"
allowed_distributions = {
"uniform",
"truncated_normal",
"untruncated_normal",
}
if distribution not in allowed_distributions:
raise ValueError(
f"Invalid `distribution` argument: {distribution}."
f"Please use one of {allowed_distributions}"
)
self.scale = scale
self.mode = mode
self.distribution = distribution
self._init_seed = seed
self.seed = seed or random.make_default_seed()
def __call__(self, shape, dtype=None):
scale = self.scale
fan_in, fan_out = compute_fans(shape)
if self.mode == "fan_in":
scale /= max(1.0, fan_in)
elif self.mode == "fan_out":
scale /= max(1.0, fan_out)
else:
scale /= max(1.0, (fan_in + fan_out) / 2.0)
if self.distribution == "truncated_normal":
stddev = math.sqrt(scale) / 0.87962566103423978
return random.truncated_normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return random.normal(
shape, mean=0.0, stddev=stddev, dtype=dtype, seed=self.seed
)
else:
limit = math.sqrt(3.0 * scale)
return random.uniform(
shape, minval=-limit, maxval=limit, dtype=dtype, seed=self.seed
)
def get_config(self):
seed_config = serialization_lib.serialize_keras_object(self._init_seed)
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": seed_config,
}
@keras_core_export(
[
"keras_core.initializers.GlorotUniform",
"keras_core.initializers.glorot_uniform",
]
)
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input
units in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = GlorotUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
Reference:
- [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_avg", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_core_export(
[
"keras_core.initializers.GlorotNormal",
"keras_core.initializers.glorot_normal",
]
)
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of
input units in the weight tensor and `fan_out` is the number of output units
in the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = GlorotNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
Reference:
- [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed,
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_core_export(
[
"keras_core.initializers.LecunNormal",
"keras_core.initializers.lecun_normal",
]
)
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in
the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = LecunNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_in", distribution="truncated_normal", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_core_export(
[
"keras_core.initializers.LecunUniform",
"keras_core.initializers.lecun_uniform",
]
)
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = LecunUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0, mode="fan_in", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_core_export(
["keras_core.initializers.HeNormal", "keras_core.initializers.he_normal"]
)
class HeNormal(VarianceScaling):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in
the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = HeNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
Reference:
- [He et al., 2015](https://arxiv.org/abs/1502.01852)
"""
def __init__(self, seed=None):
super().__init__(
scale=2.0, mode="fan_in", distribution="truncated_normal", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_core_export(
["keras_core.initializers.HeUniform", "keras_core.initializers.he_uniform"]
)
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = HeUniform()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras_core.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.backend.SeedGenerator`.
Reference:
- [He et al., 2015](https://arxiv.org/abs/1502.01852)
"""
def __init__(self, seed=None):
super().__init__(
scale=2.0, mode="fan_in", distribution="uniform", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
def compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple.
Returns:
A tuple of integer scalars: `(fan_in, fan_out)`.
"""
shape = tuple(shape)
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return int(fan_in), int(fan_out)
@keras_core_export(
[
"keras_core.initializers.OrthogonalInitializer",
"keras_core.initializers.Orthogonal",
"keras_core.initializers.orthogonal",
]
)
class OrthogonalInitializer(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, it is
initialized with an orthogonal matrix obtained from the QR decomposition of
a matrix of random numbers drawn from a normal distribution. If the matrix
has fewer rows than columns then the output will have orthogonal rows.
Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = keras_core.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = keras_core.initializers.Orthogonal()
>>> layer = keras_core.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the orthogonal matrix.
seed: A Python integer. Used to make the behavior of the initializer
deterministic.
Reference:
- [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self._init_seed = seed
self.seed = seed or random.make_default_seed()
def __call__(self, shape, dtype=None):
if len(shape) < 2:
raise ValueError(
"The tensor to initialize must be "
"at least two-dimensional. Received: "
f"shape={shape} of rank {len(shape)}."
)
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = random.normal(flat_shape, seed=self.seed, dtype=dtype)
# Compute the qr factorization
q, r = ops.qr(a)
# Make Q uniform
d = ops.diag(r)
q *= ops.sign(d)
if num_rows < num_cols:
q = ops.transpose(q)
return self.gain * ops.reshape(q, shape)
def get_config(self):
seed_config = serialization_lib.serialize_keras_object(self._init_seed)
return {"gain": self.gain, "seed": seed_config}
| keras-core/keras_core/initializers/random_initializers.py/0 | {
"file_path": "keras-core/keras_core/initializers/random_initializers.py",
"repo_id": "keras-core",
"token_count": 9908
} | 33 |
import numpy as np
import pytest
from keras_core import backend
from keras_core import layers
from keras_core.testing import test_case
class EmbeddingTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_embedding_basics(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 4, "output_dim": 3},
input_shape=(2,),
input_dtype="int32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4, "mask_zero": True},
input_shape=(2, 3),
input_dtype="int64",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4},
input_shape=(2, 3),
input_dtype="int32",
input_sparse=True,
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_correctness(self):
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array([2, 1, 0]))
self.assertAllClose(out, np.array([[3.0, 3.0], [2.0, 2.0], [0.0, 0.0]]))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_correctness_sparse(self):
import tensorflow as tf
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
x = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[2, 1], dense_shape=(2, 3)
)
self.assertAllClose(
layer(x),
np.array(
[
[[3.0, 3.0], [0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0], [2.0, 2.0]],
]
),
)
def test_masking(self):
layer = layers.Embedding(input_dim=3, output_dim=2, mask_zero=True)
layer.build()
out = layer.compute_mask(np.array(([2, 1, 0])))
self.assertAllClose(out, np.array([True, True, False]))
| keras-core/keras_core/layers/core/embedding_test.py/0 | {
"file_path": "keras-core/keras_core/layers/core/embedding_test.py",
"repo_id": "keras-core",
"token_count": 1640
} | 34 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.merging.base_merge import Merge
@keras_core_export("keras_core.layers.Average")
class Average(Merge):
"""Averages a list of inputs element-wise..
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.Average()([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras_core.layers.average([x1, x2])`
>>> y = keras_core.layers.Average()([x1, x2])
>>> out = keras_core.layers.Dense(4)(y)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output / len(inputs)
@keras_core_export("keras_core.layers.average")
def average(inputs, **kwargs):
"""Functional interface to the `keras_core.layers.Average` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.average([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> y = keras_core.layers.average([x1, x2])
>>> out = keras_core.layers.Dense(4)(y)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
return Average(**kwargs)(inputs)
| keras-core/keras_core/layers/merging/average.py/0 | {
"file_path": "keras-core/keras_core/layers/merging/average.py",
"repo_id": "keras-core",
"token_count": 954
} | 35 |
from keras_core import initializers
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers import Wrapper
from keras_core.layers.input_spec import InputSpec
from keras_core.utils.numerical_utils import normalize
@keras_core_export("keras_core.layers.SpectralNormalization")
class SpectralNormalization(Wrapper):
"""Performs spectral normalization on the weights of a target layer.
This wrapper controls the Lipschitz constant of the weights of a layer by
constraining their spectral norm, which can stabilize the training of GANs.
Args:
layer: A `keras_core.layers.Layer` instance that
has either a `kernel` (e.g. `Conv2D`, `Dense`...)
or an `embeddings` attribute (`Embedding` layer).
power_iterations: int, the number of iterations during normalization.
**kwargs: Base wrapper keyword arguments.
Examples:
Wrap `keras_core.layers.Conv2D`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> conv2d = SpectralNormalization(keras_core.layers.Conv2D(2, 2))
>>> y = conv2d(x)
>>> y.shape
(1, 9, 9, 2)
Wrap `keras_core.layers.Dense`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> dense = SpectralNormalization(keras_core.layers.Dense(10))
>>> y = dense(x)
>>> y.shape
(1, 10, 10, 10)
Reference:
- [Spectral Normalization for GAN](https://arxiv.org/abs/1802.05957).
"""
def __init__(self, layer, power_iterations=1, **kwargs):
super().__init__(layer, **kwargs)
if power_iterations <= 0:
raise ValueError(
"`power_iterations` should be greater than zero. Received: "
f"`power_iterations={power_iterations}`"
)
self.power_iterations = power_iterations
def build(self, input_shape):
super().build(input_shape)
self.input_spec = InputSpec(shape=[None] + list(input_shape[1:]))
if hasattr(self.layer, "kernel"):
self.kernel = self.layer.kernel
elif hasattr(self.layer, "embeddings"):
self.kernel = self.layer.embeddings
else:
raise ValueError(
f"{type(self.layer).__name__} object has no attribute 'kernel' "
"nor 'embeddings'"
)
self.kernel_shape = self.kernel.shape
self.vector_u = self.add_weight(
shape=(1, self.kernel_shape[-1]),
initializer=initializers.TruncatedNormal(stddev=0.02),
trainable=False,
name="vector_u",
dtype=self.kernel.dtype,
)
def call(self, inputs, training=False):
if training:
self.normalize_weights()
output = self.layer(inputs)
return ops.cast(output, inputs.dtype)
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def normalize_weights(self):
"""Generate spectral normalized weights.
This method will update the value of `self.kernel` with the
spectral normalized value, so that the layer is ready for `call()`.
"""
weights = ops.reshape(self.kernel, [-1, self.kernel_shape[-1]])
vector_u = self.vector_u
# check for zeroes weights
if not all([w == 0.0 for w in weights]):
for _ in range(self.power_iterations):
vector_v = normalize(
ops.matmul(vector_u, ops.transpose(weights)), axis=None
)
vector_u = normalize(ops.matmul(vector_v, weights), axis=None)
# vector_u = tf.stop_gradient(vector_u)
# vector_v = tf.stop_gradient(vector_v)
sigma = ops.matmul(
ops.matmul(vector_v, weights), ops.transpose(vector_u)
)
self.vector_u.assign(ops.cast(vector_u, self.vector_u.dtype))
self.kernel.assign(
ops.cast(
ops.reshape(self.kernel / sigma, self.kernel_shape),
self.kernel.dtype,
)
)
def get_config(self):
config = {"power_iterations": self.power_iterations}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/normalization/spectral_normalization.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/spectral_normalization.py",
"repo_id": "keras-core",
"token_count": 1931
} | 36 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.utils import argument_validation
from keras_core.utils import backend_utils
from keras_core.utils import tf_utils
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core.layers.HashedCrossing")
class HashedCrossing(Layer):
"""A preprocessing layer which crosses features using the "hashing trick".
This layer performs crosses of categorical features using the "hashing
trick". Conceptually, the transformation can be thought of as:
`hash(concatenate(features)) % num_bins.
This layer currently only performs crosses of scalar inputs and batches of
scalar inputs. Valid input shapes are `(batch_size, 1)`, `(batch_size,)` and
`()`.
**Note:** This layer wraps `tf.keras.layers.HashedCrossing`. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
num_bins: Number of hash bins.
output_mode: Specification for the output of the layer. Values can be
`"int"`, or `"one_hot"` configuring the layer as follows:
- `"int"`: Return the integer bin indices directly.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as `num_bins`, containing a 1 at the input's
bin index. Defaults to `"int"`.
sparse: Boolean. Only applicable to `"one_hot"` mode and only valid
when using the TensorFlow backend. If `True`, returns
a `SparseTensor` instead of a dense `Tensor`. Defaults to `False`.
**kwargs: Keyword arguments to construct a layer.
Examples:
**Crossing two scalar features.**
>>> layer = keras_core.layers.HashedCrossing(
... num_bins=5)
>>> feat1 = np.array(['A', 'B', 'A', 'B', 'A'])
>>> feat2 = np.array([101, 101, 101, 102, 102])
>>> layer((feat1, feat2))
array([1, 4, 1, 1, 3])
**Crossing and one-hotting two scalar features.**
>>> layer = keras_core.layers.HashedCrossing(
... num_bins=5, output_mode='one_hot')
>>> feat1 = np.array(['A', 'B', 'A', 'B', 'A'])
>>> feat2 = np.array([101, 101, 101, 102, 102])
>>> layer((feat1, feat2))
array([[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 1., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.]], dtype=float32)
"""
def __init__(
self,
num_bins,
output_mode="int",
sparse=False,
name=None,
dtype=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer HashedCrossing requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if output_mode == "int" and dtype is None:
dtype = "int64"
super().__init__(name=name, dtype=dtype)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse` can only be set to True with the "
"TensorFlow backend."
)
argument_validation.validate_string_arg(
output_mode,
allowable_strings=("int", "one_hot"),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
self.num_bins = num_bins
self.output_mode = output_mode
self.sparse = sparse
self._allow_non_tensor_positional_args = True
self._convert_input_args = False
self.supports_jit = False
def compute_output_shape(self, input_shape):
if (
not len(input_shape) == 2
or not isinstance(input_shape[0], tuple)
or not isinstance(input_shape[1], tuple)
):
raise ValueError(
"Expected as input a list/tuple of 2 tensors. "
f"Received input_shape={input_shape}"
)
if input_shape[0][-1] != input_shape[1][-1]:
raise ValueError(
"Expected the two input tensors to have identical shapes. "
f"Received input_shape={input_shape}"
)
if not input_shape:
if self.output_mode == "int":
return ()
return (self.num_bins,)
if self.output_mode == "int":
return input_shape[0]
if self.output_mode == "one_hot" and input_shape[0][-1] != 1:
return tuple(input_shape[0]) + (self.num_bins,)
return tuple(input_shape[0])[:-1] + (self.num_bins,)
def call(self, inputs):
self._check_at_least_two_inputs(inputs)
inputs = [tf_utils.ensure_tensor(x) for x in inputs]
self._check_input_shape_and_type(inputs)
# Uprank to rank 2 for the cross_hashed op.
rank = len(inputs[0].shape)
if rank < 2:
inputs = [tf_utils.expand_dims(x, -1) for x in inputs]
if rank < 1:
inputs = [tf_utils.expand_dims(x, -1) for x in inputs]
# Perform the cross and convert to dense
outputs = tf.sparse.cross_hashed(inputs, self.num_bins)
outputs = tf.sparse.to_dense(outputs)
# Fix output shape and downrank to match input rank.
if rank == 2:
# tf.sparse.cross_hashed output shape will always be None on the
# last dimension. Given our input shape restrictions, we want to
# force shape 1 instead.
outputs = tf.reshape(outputs, [-1, 1])
elif rank == 1:
outputs = tf.reshape(outputs, [-1])
elif rank == 0:
outputs = tf.reshape(outputs, [])
# Encode outputs.
outputs = tf_utils.encode_categorical_inputs(
outputs,
output_mode=self.output_mode,
depth=self.num_bins,
sparse=self.sparse,
dtype=self.compute_dtype,
)
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
outputs = backend.convert_to_tensor(outputs, dtype=self.dtype)
return outputs
def get_config(self):
return {
"num_bins": self.num_bins,
"output_mode": self.output_mode,
"sparse": self.sparse,
"name": self.name,
"dtype": self.dtype,
}
def _check_at_least_two_inputs(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"`HashedCrossing` should be called on a list or tuple of "
f"inputs. Received: inputs={inputs}"
)
if len(inputs) < 2:
raise ValueError(
"`HashedCrossing` should be called on at least two inputs. "
f"Received: inputs={inputs}"
)
def _check_input_shape_and_type(self, inputs):
first_shape = tuple(inputs[0].shape)
rank = len(first_shape)
if rank > 2 or (rank == 2 and first_shape[-1] != 1):
raise ValueError(
"All `HashedCrossing` inputs should have shape `()`, "
"`(batch_size)` or `(batch_size, 1)`. "
f"Received: inputs={inputs}"
)
if not all(tuple(x.shape) == first_shape for x in inputs[1:]):
raise ValueError(
"All `HashedCrossing` inputs should have equal shape. "
f"Received: inputs={inputs}"
)
if any(
isinstance(x, (tf.RaggedTensor, tf.SparseTensor)) for x in inputs
):
raise ValueError(
"All `HashedCrossing` inputs should be dense tensors. "
f"Received: inputs={inputs}"
)
if not all(
tf.as_dtype(x.dtype).is_integer or x.dtype == tf.string
for x in inputs
):
raise ValueError(
"All `HashedCrossing` inputs should have an integer or "
f"string dtype. Received: inputs={inputs}"
)
| keras-core/keras_core/layers/preprocessing/hashed_crossing.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/hashed_crossing.py",
"repo_id": "keras-core",
"token_count": 3965
} | 37 |
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.random.seed_generator import SeedGenerator
HORIZONTAL = "horizontal"
VERTICAL = "vertical"
HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical"
@keras_core_export("keras_core.layers.RandomFlip")
class RandomFlip(TFDataLayer):
"""A preprocessing layer which randomly flips images during training.
This layer will flip the images horizontally and or vertically based on the
`mode` attribute. During inference time, the output will be identical to
input. Call the layer with `training=True` to flip the input.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`. `"horizontal"` is a
left-right flip and `"vertical"` is a top-bottom flip. Defaults to
`"horizontal_and_vertical"`
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(self, mode=HORIZONTAL_AND_VERTICAL, seed=None, **kwargs):
super().__init__(**kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self.mode = mode
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
def _randomly_flip_inputs(self, inputs):
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
inputs_shape = self.backend.shape(inputs)
batch_size = inputs_shape[0]
flipped_outputs = inputs
seed_generator = self._get_seed_generator(self.backend._backend)
if self.mode == HORIZONTAL or self.mode == HORIZONTAL_AND_VERTICAL:
flipped_outputs = self.backend.numpy.where(
self.backend.random.uniform(
shape=(batch_size, 1, 1, 1), seed=seed_generator
)
<= 0.5,
self.backend.numpy.flip(flipped_outputs, axis=-2),
flipped_outputs,
)
if self.mode == VERTICAL or self.mode == HORIZONTAL_AND_VERTICAL:
flipped_outputs = self.backend.numpy.where(
self.backend.random.uniform(
shape=(batch_size, 1, 1, 1), seed=seed_generator
)
<= 0.5,
self.backend.numpy.flip(flipped_outputs, axis=-3),
flipped_outputs,
)
if unbatched:
flipped_outputs = self.backend.numpy.squeeze(
flipped_outputs, axis=0
)
return flipped_outputs
def call(self, inputs, training=True):
inputs = self.backend.cast(inputs, self.compute_dtype)
if training:
return self._randomly_flip_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"seed": self.seed, "mode": self.mode})
return config
| keras-core/keras_core/layers/preprocessing/random_flip.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_flip.py",
"repo_id": "keras-core",
"token_count": 1701
} | 38 |
import tree
import keras_core.backend
from keras_core.layers.layer import Layer
from keras_core.random.seed_generator import SeedGenerator
from keras_core.utils import backend_utils
from keras_core.utils import tracking
class TFDataLayer(Layer):
"""Layer that can safely used in a tf.data pipeline.
The `call()` method must solely rely on `self.backend` ops.
Only supports a single input tensor argument.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
if backend_utils.in_tf_graph() and not isinstance(
inputs, keras_core.KerasTensor
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if backend is None or backend == keras_core.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
| keras-core/keras_core/layers/preprocessing/tf_data_layer.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/tf_data_layer.py",
"repo_id": "keras-core",
"token_count": 970
} | 39 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import layers
from keras_core import ops
from keras_core import testing
class Cropping2DTest(testing.TestCase, parameterized.TestCase):
@parameterized.product(
(
# different cropping values
{"cropping": ((1, 2), (3, 4)), "expected_ranges": ((1, 5), (3, 5))},
# same cropping values with 2 tuples
{"cropping": ((2, 2), (2, 2)), "expected_ranges": ((2, 5), (2, 7))},
# same cropping values with 1 tuple
{"cropping": (2, 2), "expected_ranges": ((2, 5), (2, 7))},
# same cropping values with an integer
{"cropping": 2, "expected_ranges": ((2, 5), (2, 7))},
# cropping right only in both dimensions
{"cropping": ((0, 2), (0, 4)), "expected_ranges": ((0, 5), (0, 5))},
# cropping left only in both dimensions
{"cropping": ((1, 0), (3, 0)), "expected_ranges": ((1, 7), (3, 9))},
# cropping left only in rows dimension
{"cropping": ((1, 0), (3, 4)), "expected_ranges": ((1, 7), (3, 5))},
# cropping left only in cols dimension
{"cropping": ((1, 2), (3, 0)), "expected_ranges": ((1, 5), (3, 9))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_2d(self, cropping, data_format, expected_ranges):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
expected_ranges[0][0] : expected_ranges[0][1],
expected_ranges[1][0] : expected_ranges[1][1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
expected_ranges[0][0] : expected_ranges[0][1],
expected_ranges[1][0] : expected_ranges[1][1],
:,
]
)
self.run_layer_test(
layers.Cropping2D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
def test_cropping_2d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, 7, None, 5))
cropped = layers.Cropping2D(((1, 2), (3, 4)))(input_layer)
self.assertEqual(cropped.shape, (1, 4, None, 5))
@parameterized.product(
(
{"cropping": ((3, 6), (0, 0))},
{"cropping": ((0, 0), (5, 4))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_2d_errors_if_cropping_more_than_available(
self, cropping, data_format
):
input_layer = layers.Input(batch_shape=(3, 7, 9, 5))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=cropping, data_format=data_format)(
input_layer
)
def test_cropping_2d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=(1, 2, 3))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping="1")
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=((1, 2), (3, 4, 5)))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=((1, 2), (3, -4)))
with self.assertRaises(ValueError):
layers.Cropping2D(cropping=((1, 2), "3"))
| keras-core/keras_core/layers/reshaping/cropping2d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/cropping2d_test.py",
"repo_id": "keras-core",
"token_count": 2021
} | 40 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import layers
from keras_core import testing
class UpSampling3dTest(testing.TestCase, parameterized.TestCase):
@parameterized.product(
data_format=["channels_first", "channels_last"],
length_dim1=[2, 3],
length_dim2=[2],
length_dim3=[3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_3d(
self, data_format, length_dim1, length_dim2, length_dim3
):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
if data_format == "channels_first":
inputs = np.random.rand(
num_samples,
stack_size,
input_len_dim1,
input_len_dim2,
input_len_dim3,
)
else:
inputs = np.random.rand(
num_samples,
input_len_dim1,
input_len_dim2,
input_len_dim3,
stack_size,
)
# basic test
if data_format == "channels_first":
expected_output_shape = (2, 2, 20, 22, 24)
else:
expected_output_shape = (2, 20, 22, 24, 2)
self.run_layer_test(
layers.UpSampling3D,
init_kwargs={"size": (2, 2, 2), "data_format": data_format},
input_shape=inputs.shape,
expected_output_shape=expected_output_shape,
expected_output_dtype="float32",
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
layer = layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format,
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == "channels_first":
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
self.assertAllClose(np_output, expected_out)
def test_upsampling_3d_correctness(self):
input_shape = (2, 1, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
self.assertAllClose(
layers.UpSampling3D(size=(2, 2, 2))(x),
np.array(
[
[
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
[
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
],
],
[
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
[
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[6.0, 7.0, 8.0], [6.0, 7.0, 8.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
[[9.0, 10.0, 11.0], [9.0, 10.0, 11.0]],
],
],
]
),
)
| keras-core/keras_core/layers/reshaping/up_sampling3d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/up_sampling3d_test.py",
"repo_id": "keras-core",
"token_count": 2928
} | 41 |
import numpy as np
import pytest
from keras_core import initializers
from keras_core import layers
from keras_core import testing
class ConvLSTM1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.ConvLSTM3D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(3, 2, 4, 4, 4, 3),
expected_output_shape=(3, 4, 4, 4, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM3D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(3, 2, 8, 8, 8, 3),
call_kwargs={"training": True},
expected_output_shape=(3, 6, 6, 6, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM3D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(3, 2, 8, 8, 8, 3),
expected_output_shape=(3, 2, 6, 6, 6, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = (
np.arange(1920).reshape((2, 3, 4, 4, 4, 5)).astype("float32") / 100
)
layer = layers.ConvLSTM3D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[
[
[
[0.99149036, 0.99149036],
[0.99180907, 0.99180907],
],
[[0.99258363, 0.99258363], [0.9927925, 0.9927925]],
],
[
[
[0.99413764, 0.99413764],
[0.99420583, 0.99420583],
],
[[0.9943788, 0.9943788], [0.9944278, 0.9944278]],
],
],
[
[
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
],
[
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
],
],
]
),
output,
)
| keras-core/keras_core/layers/rnn/conv_lstm3d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/conv_lstm3d_test.py",
"repo_id": "keras-core",
"token_count": 2142
} | 42 |
from keras_core.api_export import keras_core_export
from keras_core.losses.loss import Loss
from keras_core.losses.losses import BinaryCrossentropy
from keras_core.losses.losses import CategoricalCrossentropy
from keras_core.losses.losses import CategoricalHinge
from keras_core.losses.losses import CosineSimilarity
from keras_core.losses.losses import Hinge
from keras_core.losses.losses import Huber
from keras_core.losses.losses import KLDivergence
from keras_core.losses.losses import LogCosh
from keras_core.losses.losses import LossFunctionWrapper
from keras_core.losses.losses import MeanAbsoluteError
from keras_core.losses.losses import MeanAbsolutePercentageError
from keras_core.losses.losses import MeanSquaredError
from keras_core.losses.losses import MeanSquaredLogarithmicError
from keras_core.losses.losses import Poisson
from keras_core.losses.losses import SparseCategoricalCrossentropy
from keras_core.losses.losses import SquaredHinge
from keras_core.losses.losses import binary_crossentropy
from keras_core.losses.losses import categorical_crossentropy
from keras_core.losses.losses import categorical_hinge
from keras_core.losses.losses import cosine_similarity
from keras_core.losses.losses import hinge
from keras_core.losses.losses import huber
from keras_core.losses.losses import kl_divergence
from keras_core.losses.losses import log_cosh
from keras_core.losses.losses import mean_absolute_error
from keras_core.losses.losses import mean_absolute_percentage_error
from keras_core.losses.losses import mean_squared_error
from keras_core.losses.losses import mean_squared_logarithmic_error
from keras_core.losses.losses import poisson
from keras_core.losses.losses import sparse_categorical_crossentropy
from keras_core.losses.losses import squared_hinge
from keras_core.saving import serialization_lib
ALL_OBJECTS = {
# Base
Loss,
LossFunctionWrapper,
# Probabilistic
KLDivergence,
Poisson,
BinaryCrossentropy,
CategoricalCrossentropy,
SparseCategoricalCrossentropy,
# Regression
MeanSquaredError,
MeanAbsoluteError,
MeanAbsolutePercentageError,
MeanSquaredLogarithmicError,
CosineSimilarity,
LogCosh,
Huber,
# Hinge
Hinge,
SquaredHinge,
CategoricalHinge,
# Probabilistic
kl_divergence,
poisson,
binary_crossentropy,
categorical_crossentropy,
sparse_categorical_crossentropy,
# Regression
mean_squared_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_logarithmic_error,
cosine_similarity,
log_cosh,
huber,
# Hinge
hinge,
squared_hinge,
categorical_hinge,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{
"bce": binary_crossentropy,
"BCE": binary_crossentropy,
"kld": kl_divergence,
"KLD": kl_divergence,
"mae": mean_absolute_error,
"MAE": mean_absolute_error,
"mse": mean_squared_error,
"MSE": mean_squared_error,
"mape": mean_absolute_percentage_error,
"MAPE": mean_absolute_percentage_error,
"msle": mean_squared_logarithmic_error,
"MSLE": mean_squared_logarithmic_error,
}
)
@keras_core_export("keras_core.losses.serialize")
def serialize(loss):
"""Serializes loss function or `Loss` instance.
Args:
loss: A Keras `Loss` instance or a loss function.
Returns:
Loss configuration dictionary.
"""
return serialization_lib.serialize_keras_object(loss)
@keras_core_export("keras_core.losses.deserialize")
def deserialize(name, custom_objects=None):
"""Deserializes a serialized loss class/function instance.
Args:
name: Loss configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `Loss` instance or a loss function.
"""
return serialization_lib.deserialize_keras_object(
name,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_core_export("keras_core.losses.get")
def get(identifier):
"""Retrieves a Keras loss as a `function`/`Loss` class instance.
The `identifier` may be the string name of a loss function or `Loss` class.
>>> loss = losses.get("categorical_crossentropy")
>>> type(loss)
<class 'function'>
>>> loss = losses.get("CategoricalCrossentropy")
>>> type(loss)
<class '...CategoricalCrossentropy'>
You can also specify `config` of the loss to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Loss` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> loss = losses.get(identifier)
>>> type(loss)
<class '...CategoricalCrossentropy'>
Args:
identifier: A loss identifier. One of None or string name of a loss
function/class or loss configuration dictionary or a loss function
or a loss class instance.
Returns:
A Keras loss as a `function`/ `Loss` class instance.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = deserialize(identifier)
else:
obj = identifier
if callable(obj):
return obj
else:
raise ValueError(f"Could not interpret loss identifier: {identifier}")
| keras-core/keras_core/losses/__init__.py/0 | {
"file_path": "keras-core/keras_core/losses/__init__.py",
"repo_id": "keras-core",
"token_count": 2171
} | 43 |
from keras_core import backend
from keras_core import initializers
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.utils.naming import auto_name
from keras_core.utils.tracking import Tracker
@keras_core_export(["keras_core.Metric", "keras_core.metrics.Metric"])
class Metric:
"""Encapsulates metric logic and state.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result())
```
Usage with `compile()` API:
```python
model = keras_core.Sequential()
model.add(keras_core.layers.Dense(64, activation='relu'))
model.add(keras_core.layers.Dense(64, activation='relu'))
model.add(keras_core.layers.Dense(10, activation='softmax'))
model.compile(optimizer=keras_core.optimizers.RMSprop(0.01),
loss=keras_core.losses.CategoricalCrossentropy(),
metrics=[keras_core.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model.fit(data, labels, epochs=10)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_variable()` like: `self.var = self.add_variable(...)`
* `update_state()`: Has all updates to the state variables like:
`self.var.assign(...)`.
* `result()`: Computes and returns a scalar value or a dict of scalar values
for the metric from the state variables.
Example subclass implementation:
```python
class BinaryTruePositives(Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_variable(
shape=(),
initializer='zeros',
name='true_positives'
)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.cast(y_true, "bool")
y_pred = ops.cast(y_pred, "bool")
values = ops.logical_and(
ops.equal(y_true, True), ops.equal(y_pred, True))
values = ops.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, self.dtype)
sample_weight = ops.broadcast_to(
sample_weight, ops.shape(values)
)
values = ops.multiply(values, sample_weight)
self.true_positives.assign(self.true_positives + ops.sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, dtype=None, name=None):
self.name = name or auto_name(self.__class__.__name__)
self._dtype = dtype
self._metrics = []
self._variables = []
self._tracker = Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
self._variables,
),
"metrics": (lambda x: isinstance(x, Metric), self._metrics),
}
)
def reset_state(self):
"""Reset all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
for v in self.variables:
v.assign(ops.zeros(v.shape, dtype=v.dtype))
def update_state(self, *args, **kwargs):
"""Accumulate statistics for the metric."""
raise NotImplementedError
def stateless_update_state(self, metric_variables, *args, **kwargs):
if len(metric_variables) != len(self.variables):
raise ValueError(
"Argument `metric_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(metric_variables)}, but "
f"expected {len(self.variables)} variables."
)
# Gather variable mapping
mapping = list(zip(self.variables, metric_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping) as scope:
self.update_state(*args, **kwargs)
# Gather updated variables
metric_variables = []
for v in self.variables:
new_v = scope.get_current_value(v)
if new_v is not None:
metric_variables.append(new_v)
else:
metric_variables.append(v)
return metric_variables
def result(self):
"""Compute the current metric value.
Returns:
A scalar tensor, or a dictionary of scalar tensors.
"""
raise NotImplementedError
def stateless_result(self, metric_variables):
if len(metric_variables) != len(self.variables):
raise ValueError(
"Argument `metric_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(metric_variables)}, but "
f"expected {len(self.variables)} variables."
)
# Gather variable mapping
mapping = list(zip(self.variables, metric_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping):
res = self.result()
return res
@property
def dtype(self):
return self._dtype
def add_variable(self, shape, initializer, dtype=None, name=None):
self._check_super_called()
with backend.name_scope(self.name, caller=self):
initializer = initializers.get(initializer)
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=False,
name=name,
)
# Prevent double-tracking
self._tracker.add_to_store("variables", variable)
return variable
def add_weight(self, shape=(), initializer=None, dtype=None, name=None):
# Backwards compatibility alias
return self.add_variable(
shape=shape, initializer=initializer, dtype=dtype, name=name
)
@property
def variables(self):
variables = self._variables[:]
for metric in self._metrics:
variables.extend(metric._variables)
return variables
def __call__(self, *args, **kwargs):
self._check_super_called()
self.update_state(*args, **kwargs)
return self.result()
def get_config(self):
"""Return the serializable config of the metric."""
return {"name": self.name, "dtype": self.dtype}
@classmethod
def from_config(cls, config):
return cls(**config)
def __setattr__(self, name, value):
# Track Variables, Layers, Metrics
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
return super().__setattr__(name, value)
def _check_super_called(self):
if not hasattr(self, "_tracker"):
raise RuntimeError(
"You forgot to call `super().__init__()` "
"in the `__init__()` method. Go add it!"
)
| keras-core/keras_core/metrics/metric.py/0 | {
"file_path": "keras-core/keras_core/metrics/metric.py",
"repo_id": "keras-core",
"token_count": 3413
} | 44 |
import inspect
import json
import os
import warnings
from keras_core import backend
from keras_core import utils
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.legacy.saving import legacy_h5_format
from keras_core.models.variable_mapping import map_trackable_variables
from keras_core.saving import saving_api
from keras_core.saving import saving_lib
from keras_core.trainers import trainer as base_trainer
from keras_core.utils import io_utils
from keras_core.utils import summary_utils
from keras_core.utils import traceback_utils
if backend.backend() == "tensorflow":
from keras_core.backend.tensorflow.trainer import (
TensorFlowTrainer as Trainer,
)
elif backend.backend() == "jax":
from keras_core.backend.jax.trainer import JAXTrainer as Trainer
elif backend.backend() == "torch":
from keras_core.backend.torch.trainer import TorchTrainer as Trainer
elif backend.backend() == "numpy":
from keras_core.backend.numpy.trainer import NumpyTrainer as Trainer
else:
raise RuntimeError(
f"Backend '{backend.backend()}' must implement the Trainer class."
)
@keras_core_export(["keras_core.Model", "keras_core.models.Model"])
class Model(Trainer, Layer):
"""A model grouping layers into an object with training/inference features.
There are three ways to instantiate a `Model`:
## With the "Functional API"
You start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
inputs = keras_core.Input(shape=(37,))
x = keras_core.layers.Dense(32, activation="relu")(inputs)
outputs = keras_core.layers.Dense(5, activation="softmax")(x)
model = keras_core.Model(inputs=inputs, outputs=outputs)
```
Note: Only dicts, lists, and tuples of input tensors are supported. Nested
inputs are not supported (e.g. lists of list or dicts of dict).
A new Functional API model can also be created by using the
intermediate tensors. This enables you to quickly extract sub-components
of the model.
Example:
```python
inputs = keras_core.Input(shape=(None, None, 3))
processed = keras_core.layers.RandomCrop(width=128, height=128)(inputs)
conv = keras_core.layers.Conv2D(filters=32, kernel_size=3)(processed)
pooling = keras_core.layers.GlobalAveragePooling2D()(conv)
feature = keras_core.layers.Dense(10)(pooling)
full_model = keras_core.Model(inputs, feature)
backbone = keras_core.Model(processed, conv)
activations = keras_core.Model(conv, feature)
```
Note that the `backbone` and `activations` models are not
created with `keras_core.Input` objects, but with the tensors that originate
from `keras_core.Input` objects. Under the hood, the layers and weights will
be shared across these models, so that user can train the `full_model`, and
use `backbone` or `activations` to do feature extraction.
The inputs and outputs of the model can be nested structures of tensors as
well, and the created models are standard Functional API models that support
all the existing APIs.
## By subclassing the `Model` class
In that case, you should define your
layers in `__init__()` and you should implement the model's forward pass
in `call()`.
```python
class MyModel(keras_core.Model):
def __init__(self):
super().__init__()
self.dense1 = keras_core.layers.Dense(32, activation="relu")
self.dense2 = keras_core.layers.Dense(5, activation="softmax")
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call()`, which you can use to specify
a different behavior in training and inference:
```python
class MyModel(keras_core.Model):
def __init__(self):
super().__init__()
self.dense1 = keras_core.layers.Dense(32, activation="relu")
self.dense2 = keras_core.layers.Dense(5, activation="softmax")
self.dropout = keras_core.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
Once the model is created, you can config the model with losses and metrics
with `model.compile()`, train the model with `model.fit()`, or use the model
to do prediction with `model.predict()`.
## With the `Sequential` class
In addition, `keras_core.Sequential` is a special case of model where
the model is purely a stack of single-input, single-output layers.
```python
model = keras_core.Sequential([
keras_core.Input(shape=(None, None, 3)),
keras_core.layers.Conv2D(filters=32, kernel_size=3),
])
```
"""
def __new__(cls, *args, **kwargs):
# Signature detection for usage of `Model` as a `Functional`
if functional_init_arguments(args, kwargs) and cls == Model:
from keras_core.models import functional
return functional.Functional(*args, **kwargs)
return super().__new__(cls)
def __init__(self, *args, **kwargs):
Trainer.__init__(self)
from keras_core.models import functional
# Signature detection for usage of a `Model` subclass
# as a `Functional` subclass
if functional_init_arguments(args, kwargs):
inject_functional_model_class(self.__class__)
functional.Functional.__init__(self, *args, **kwargs)
else:
Layer.__init__(self, *args, **kwargs)
def call(self, inputs, training=False):
raise NotImplementedError
@property
def layers(self):
return list(self._flatten_layers(include_self=False, recursive=False))
@layers.setter
def layers(self, _):
raise AttributeError(
"`Model.layers` attribute is reserved and should not be used. "
"Please use another name."
)
@traceback_utils.filter_traceback
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Args:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
"""
if index is not None and name is not None:
raise ValueError(
"Provide only a layer name or a layer index. Received: "
f"index={index}, name={name}."
)
if index is not None:
if len(self.layers) <= index:
raise ValueError(
f"Was asked to retrieve layer at index {index}"
f" but model only has {len(self.layers)}"
" layers."
)
else:
return self.layers[index]
if name is not None:
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError(
f"No such layer: {name}. Existing layers are: "
f"{list(layer.name for layer in self.layers)}."
)
raise ValueError(
"Provide either a layer name or layer index at `get_layer`."
)
@traceback_utils.filter_traceback
def summary(
self,
line_length=None,
positions=None,
print_fn=None,
expand_nested=False,
show_trainable=False,
layer_range=None,
):
"""Prints a string summary of the network.
Args:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided, becomes
`[0.3, 0.6, 0.70, 1.]`. Defaults to `None`.
print_fn: Print function to use. By default, prints to `stdout`.
If `stdout` doesn't work in your environment, change to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
expand_nested: Whether to expand the nested models.
Defaults to `False`.
show_trainable: Whether to show if a layer is trainable.
Defaults to `False`.
layer_range: a list or tuple of 2 strings,
which is the starting layer name and ending layer name
(both inclusive) indicating the range of layers to be printed
in summary. It also accepts regex patterns instead of exact
name. In such case, start predicate will be the first element
it matches to `layer_range[0]` and the end predicate will be
the last element it matches to `layer_range[1]`.
By default `None` which considers all layers of model.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
summary_utils.print_summary(
self,
line_length=line_length,
positions=positions,
print_fn=print_fn,
expand_nested=expand_nested,
show_trainable=show_trainable,
layer_range=layer_range,
)
@traceback_utils.filter_traceback
def save(self, filepath, overwrite=True, **kwargs):
"""Saves a model as a `.keras` file.
Args:
filepath: `str` or `pathlib.Path` object.
Path where to save the model. Must end in `.keras`.
overwrite: Whether we should overwrite any existing model
at the target location, or instead ask the user
via an interactive prompt.
save_format: Format to use, as a string. Only the `"keras"`
format is supported at this time.
Example:
```python
model = keras_core.Sequential(
[
keras_core.layers.Dense(5, input_shape=(3,)),
keras_core.layers.Softmax(),
],
)
model.save("model.keras")
loaded_model = keras_core.saving.load_model("model.keras")
x = keras.random.uniform((10, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
Note that `model.save()` is an alias for
`keras_core.saving.save_model()`.
The saved `.keras` file contains:
- The model's configuration (architecture)
- The model's weights
- The model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state.
"""
include_optimizer = kwargs.pop("include_optimizer", True)
save_format = kwargs.pop("save_format", None)
if kwargs:
raise ValueError(
"The following argument(s) are not supported: "
f"{list(kwargs.keys())}"
)
if save_format:
if str(filepath).endswith((".h5", ".hdf5")) or str(
filepath
).endswith(".keras"):
warnings.warn(
"The `save_format` argument is deprecated in Keras Core. "
"We recommend removing this argument as it can be inferred "
"from the file path. "
f"Received: save_format={save_format}"
)
else:
raise ValueError(
"The `save_format` argument is deprecated in Keras Core. "
"Please remove this argument and pass a file path with "
"either `.keras` or `.h5` extension."
f"Received: save_format={save_format}"
)
try:
exists = os.path.exists(filepath)
except TypeError:
exists = False
if exists and not overwrite:
proceed = io_utils.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
if str(filepath).endswith(".keras"):
saving_lib.save_model(self, filepath)
elif str(filepath).endswith((".h5", ".hdf5")):
# Deprecation warnings
warnings.warn(
"You are saving your model as an HDF5 file via `model.save()`. "
"This file format is considered legacy. "
"We recommend using instead the native Keras format, "
"e.g. `model.save('my_model.keras')`."
)
legacy_h5_format.save_model_to_hdf5(
self, filepath, overwrite, include_optimizer
)
else:
raise ValueError(
"Invalid filepath extension for saving. "
"Please add either a `.keras` extension for the native Keras "
f"format (recommended) or a `.h5` extension. "
"Use `tf.saved_model.save()` if you want to export a "
"SavedModel for use with TFLite/TFServing/etc. "
f"Received: filepath={filepath}."
)
@traceback_utils.filter_traceback
def save_weights(self, filepath, overwrite=True):
"""Saves all layer weights to a `.weights.h5` file.
Args:
filepath: `str` or `pathlib.Path` object.
Path where to save the model. Must end in `.weights.h5`.
overwrite: Whether we should overwrite any existing model
at the target location, or instead ask the user
via an interactive prompt.
"""
if not str(filepath).endswith(".weights.h5"):
raise ValueError(
"The filename must end in `.weights.h5`. "
f"Received: filepath={filepath}"
)
try:
exists = os.path.exists(filepath)
except TypeError:
exists = False
if exists and not overwrite:
proceed = io_utils.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
saving_lib.save_weights_only(self, filepath)
@traceback_utils.filter_traceback
def load_weights(self, filepath, skip_mismatch=False, **kwargs):
"""Load weights from a file saved via `save_weights()`.
Weights are loaded based on the network's
topology. This means the architecture should be the same as when the
weights were saved. Note that layers that don't have weights are not
taken into account in the topological ordering, so adding or removing
layers is fine as long as they don't have weights.
**Partial weight loading**
If you have modified your model, for instance by adding a new layer
(with weights) or by changing the shape of the weights of a layer,
you can choose to ignore errors and continue loading
by setting `skip_mismatch=True`. In this case any layer with
mismatching weights will be skipped. A warning will be displayed
for each skipped layer.
Args:
filepath: String, path to the weights file to load.
It can either be a `.weights.h5` file
or a legacy `.h5` weights file.
skip_mismatch: Boolean, whether to skip loading of layers where
there is a mismatch in the number of weights, or a mismatch in
the shape of the weights.
"""
saving_api.load_weights(
self, filepath, skip_mismatch=skip_mismatch, **kwargs
)
def build_from_config(self, config):
if not config:
return
if "input_shape" in config:
# Case: all inputs are in the first arg (possibly nested).
if utils.is_default(self.build):
status = self._build_by_run_for_single_pos_arg(
config["input_shape"]
)
else:
try:
self.build(config["input_shape"])
status = True
except:
status = False
self._build_shapes_dict = config
elif "shapes_dict" in config:
# Case: inputs were recorded as multiple keyword arguments.
if utils.is_default(self.build):
status = self._build_for_kwargs(config["shapes_dict"])
else:
try:
self.build(**config["shapes_dict"])
status = True
except:
status = False
self._build_shapes_dict = config["shapes_dict"]
if not status:
warnings.warn(
f"Model '{self.name}' had a build config, but the model "
"cannot be built automatically in "
"`build_from_config(config)`. "
"You should implement "
"`def build_from_config(self, config)`, "
"and you might also want to implement the method "
" that generates the config at saving time, "
"`def get_build_config(self)`. "
"The method `build_from_config()` is meant to "
"create the state of the model (i.e. its variables) "
"upon deserialization.",
stacklevel=2,
)
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={...})`.
Args:
**kwargs: Additional keyword arguments to be passed to
`json.dumps()`.
Returns:
A JSON string.
"""
from keras_core.saving import serialization_lib
model_config = serialization_lib.serialize_keras_object(self)
return json.dumps(model_config, **kwargs)
def export(self, filepath, format="tf_saved_model"):
"""[TF backend only]* Create a TF SavedModel artifact for inference
(e.g. via TF-Serving).
**Note:** This can currently only be used with the TF backend.
This method lets you export a model to a lightweight SavedModel artifact
that contains the model's forward pass only (its `call()` method)
and can be served via e.g. TF-Serving. The forward pass is registered
under the name `serve()` (see example below).
The original code of the model (including any custom layers you may
have used) is *no longer* necessary to reload the artifact -- it is
entirely standalone.
Args:
filepath: `str` or `pathlib.Path` object. Path where to save
the artifact.
Example:
```python
# Create the artifact
model.export("path/to/location")
# Later, in a different process / environment...
reloaded_artifact = tf.saved_model.load("path/to/location")
predictions = reloaded_artifact.serve(input_data)
```
If you would like to customize your serving endpoints, you can
use the lower-level `keras_core.export.ExportArchive` class. The
`export()` method relies on `ExportArchive` internally.
"""
from keras_core.export import export_lib
export_lib.export_model(self, filepath)
@classmethod
def from_config(cls, config, custom_objects=None):
from keras_core.models.functional import Functional
functional_config_keys = [
"name",
"layers",
"input_layers",
"output_layers",
]
is_functional_config = all(
key in config for key in functional_config_keys
)
argspec = inspect.getfullargspec(cls.__init__)
functional_init_args = inspect.getfullargspec(Functional.__init__).args[
1:
]
revivable_as_functional = (
cls in {Functional, Model}
or argspec.args[1:] == functional_init_args
or (argspec.varargs == "args" and argspec.varkw == "kwargs")
)
if is_functional_config and revivable_as_functional:
# Revive Functional model
# (but not Functional subclasses with a custom __init__)
from keras_core.models.functional import functional_from_config
return functional_from_config(
cls, config, custom_objects=custom_objects
)
# Either the model has a custom __init__, or the config
# does not contain all the information necessary to
# revive a Functional model. This happens when the user creates
# subclassed models where `get_config()` is returning
# insufficient information to be considered a Functional model.
# In this case, we fall back to provide all config into the
# constructor of the class.
try:
return cls(**config)
except TypeError as e:
raise TypeError(
"Unable to revive model from config. When overriding "
"the `get_config()` method, make sure that the "
"returned config contains all items used as arguments "
f"in the constructor to {cls}, "
"which is the default behavior. "
"You can override this default behavior by defining a "
"`from_config(cls, config)` class method to specify "
"how to create an "
f"instance of {cls.__name__} from its config.\n\n"
f"Received config={config}\n\n"
f"Error encountered during deserialization: {e}"
)
def _get_variable_map(self):
store = {}
map_trackable_variables(self, store=store, visited_trackables=set())
return store
@keras_core_export("keras_core.models.model_from_json")
def model_from_json(json_string, custom_objects=None):
"""Parses a JSON model configuration string and returns a model instance.
Usage:
>>> model = keras_core.Sequential([
... keras_core.layers.Dense(5, input_shape=(3,)),
... keras_core.layers.Softmax()])
>>> config = model.to_json()
>>> loaded_model = keras_core.models.model_from_json(config)
Args:
json_string: JSON string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
from keras_core.saving import serialization_lib
model_config = json.loads(json_string)
return serialization_lib.deserialize_keras_object(
model_config, custom_objects=custom_objects
)
def functional_init_arguments(args, kwargs):
return (
(len(args) == 2)
or (len(args) == 1 and "outputs" in kwargs)
or ("inputs" in kwargs and "outputs" in kwargs)
)
def inject_functional_model_class(cls):
"""Inject `Functional` into the hierarchy of this class if needed."""
from keras_core.models import functional
if cls == Model:
return functional.Functional
# In case there is any multiple inheritance, we stop injecting the
# class if keras model is not in its class hierarchy.
if cls == object:
return object
cls.__bases__ = tuple(
inject_functional_model_class(base) for base in cls.__bases__
)
# Trigger any `__new__` class swapping that needed to happen on `Functional`
# but did not because functional was not in the class hierarchy.
cls.__new__(cls)
return cls
Model.fit.__doc__ = base_trainer.Trainer.fit.__doc__
Model.predict.__doc__ = base_trainer.Trainer.predict.__doc__
Model.evaluate.__doc__ = base_trainer.Trainer.evaluate.__doc__
Model.train_on_batch.__doc__ = base_trainer.Trainer.train_on_batch.__doc__
Model.test_on_batch.__doc__ = base_trainer.Trainer.test_on_batch.__doc__
Model.predict_on_batch.__doc__ = base_trainer.Trainer.predict_on_batch.__doc__
| keras-core/keras_core/models/model.py/0 | {
"file_path": "keras-core/keras_core/models/model.py",
"repo_id": "keras-core",
"token_count": 10660
} | 45 |
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_core import backend
from keras_core import testing
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.ops import nn as knn
class NNOpsDynamicShapeTest(testing.TestCase, parameterized.TestCase):
def test_relu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.relu(x).shape, (None, 2, 3))
def test_relu6(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.relu6(x).shape, (None, 2, 3))
def test_sigmoid(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.sigmoid(x).shape, (None, 2, 3))
def test_softplus(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.softplus(x).shape, (None, 2, 3))
def test_softsign(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.softsign(x).shape, (None, 2, 3))
def test_silu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.silu(x).shape, (None, 2, 3))
def test_log_sigmoid(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.log_sigmoid(x).shape, (None, 2, 3))
def test_leaky_relu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.leaky_relu(x).shape, (None, 2, 3))
def test_hard_sigmoid(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.hard_sigmoid(x).shape, (None, 2, 3))
def test_elu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.elu(x).shape, (None, 2, 3))
def test_selu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.selu(x).shape, (None, 2, 3))
def test_gelu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.gelu(x).shape, (None, 2, 3))
def test_softmax(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.softmax(x).shape, (None, 2, 3))
self.assertEqual(knn.softmax(x, axis=1).shape, (None, 2, 3))
self.assertEqual(knn.softmax(x, axis=-1).shape, (None, 2, 3))
def test_log_softmax(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.log_softmax(x).shape, (None, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=1).shape, (None, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=-1).shape, (None, 2, 3))
def test_max_pool(self):
x = KerasTensor([None, 8, 3])
self.assertEqual(knn.max_pool(x, 2, 1).shape, (None, 7, 3))
self.assertEqual(
knn.max_pool(x, 2, 2, padding="same").shape, (None, 4, 3)
)
x = KerasTensor([None, 8, None, 3])
self.assertEqual(knn.max_pool(x, 2, 1).shape, (None, 7, None, 3))
self.assertEqual(
knn.max_pool(x, 2, 2, padding="same").shape, (None, 4, None, 3)
)
self.assertEqual(
knn.max_pool(x, (2, 2), (2, 2), padding="same").shape,
(None, 4, None, 3),
)
def test_average_pool(self):
x = KerasTensor([None, 8, 3])
self.assertEqual(knn.average_pool(x, 2, 1).shape, (None, 7, 3))
self.assertEqual(
knn.average_pool(x, 2, 2, padding="same").shape, (None, 4, 3)
)
x = KerasTensor([None, 8, None, 3])
self.assertEqual(knn.average_pool(x, 2, 1).shape, (None, 7, None, 3))
self.assertEqual(
knn.average_pool(x, 2, 2, padding="same").shape, (None, 4, None, 3)
)
self.assertEqual(
knn.average_pool(x, (2, 2), (2, 2), padding="same").shape,
(None, 4, None, 3),
)
def test_multi_hot(self):
x = KerasTensor([None, 3, 1])
self.assertEqual(knn.multi_hot(x, 5).shape, (None, 1, 5))
self.assertEqual(knn.multi_hot(x, 5, 1).shape, (None, 3, 1))
self.assertEqual(knn.multi_hot(x, 5, 2).shape, (None, 5, 1))
@parameterized.product(dtype=["float32", "int32"])
def test_multi_hot_dtype(self, dtype):
# dtype tests
x = np.arange(5)
out = knn.multi_hot(x, 5, axis=0, dtype=dtype)
self.assertEqual(backend.standardize_dtype(out.dtype), dtype)
def test_conv(self):
# Test 1D conv.
inputs_1d = KerasTensor([None, 20, 3])
kernel = KerasTensor([4, 3, 2])
self.assertEqual(
knn.conv(inputs_1d, kernel, 1, padding="valid").shape, (None, 17, 2)
)
self.assertEqual(
knn.conv(inputs_1d, kernel, 1, padding="same").shape, (None, 20, 2)
)
self.assertEqual(
knn.conv(inputs_1d, kernel, (2,), dilation_rate=2).shape,
(None, 7, 2),
)
# Test 2D conv.
inputs_2d = KerasTensor([None, 10, None, 3])
kernel = KerasTensor([2, 2, 3, 2])
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding="valid").shape,
(None, 9, None, 2),
)
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding="same").shape,
(None, 10, None, 2),
)
self.assertEqual(
knn.conv(inputs_2d, kernel, (2, 1), dilation_rate=(2, 1)).shape,
(None, 4, None, 2),
)
# Test 3D conv.
inputs_3d = KerasTensor([None, 8, None, 8, 3])
kernel = KerasTensor([3, 3, 3, 3, 2])
self.assertEqual(
knn.conv(inputs_3d, kernel, 1, padding="valid").shape,
(None, 6, None, 6, 2),
)
self.assertEqual(
knn.conv(inputs_3d, kernel, (2, 1, 2), padding="same").shape,
(None, 4, None, 4, 2),
)
self.assertEqual(
knn.conv(
inputs_3d, kernel, 1, padding="valid", dilation_rate=(1, 2, 2)
).shape,
(None, 6, None, 4, 2),
)
def test_depthwise_conv(self):
# Test 1D depthwise conv.
inputs_1d = KerasTensor([None, 20, 3])
kernel = KerasTensor([4, 3, 1])
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, 1, padding="valid").shape,
(None, 17, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, (1,), padding="same").shape,
(None, 20, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, 2, dilation_rate=2).shape,
(None, 7, 3),
)
# Test 2D depthwise conv.
inputs_2d = KerasTensor([None, 10, 10, 3])
kernel = KerasTensor([2, 2, 3, 1])
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, 1, padding="valid").shape,
(None, 9, 9, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, (1, 2), padding="same").shape,
(None, 10, 5, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, 2, dilation_rate=2).shape,
(None, 4, 4, 3),
)
self.assertEqual(
knn.depthwise_conv(
inputs_2d, kernel, 2, dilation_rate=(2, 1)
).shape,
(None, 4, 5, 3),
)
def test_separable_conv(self):
# Test 1D separable conv.
inputs_1d = KerasTensor([None, 20, 3])
kernel = KerasTensor([4, 3, 2])
pointwise_kernel = KerasTensor([1, 6, 5])
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 1, padding="valid"
).shape,
(None, 17, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 1, padding="same"
).shape,
(None, 20, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 2, dilation_rate=2
).shape,
(None, 7, 5),
)
# Test 2D separable conv.
inputs_2d = KerasTensor([None, 10, 10, 3])
kernel = KerasTensor([2, 2, 3, 2])
pointwise_kernel = KerasTensor([1, 1, 6, 5])
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, 1, padding="valid"
).shape,
(None, 9, 9, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, (1, 2), padding="same"
).shape,
(None, 10, 5, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, 2, dilation_rate=(2, 1)
).shape,
(None, 4, 5, 5),
)
def test_conv_transpose(self):
inputs_1d = KerasTensor([None, 4, 3])
kernel = KerasTensor([2, 5, 3])
self.assertEqual(
knn.conv_transpose(inputs_1d, kernel, 2).shape, (None, 8, 5)
)
self.assertEqual(
knn.conv_transpose(inputs_1d, kernel, 2, padding="same").shape,
(None, 8, 5),
)
self.assertEqual(
knn.conv_transpose(
inputs_1d, kernel, 5, padding="valid", output_padding=4
).shape,
(None, 21, 5),
)
inputs_2d = KerasTensor([None, 4, 4, 3])
kernel = KerasTensor([2, 2, 5, 3])
self.assertEqual(
knn.conv_transpose(inputs_2d, kernel, 2).shape, (None, 8, 8, 5)
)
self.assertEqual(
knn.conv_transpose(inputs_2d, kernel, (2, 2), padding="same").shape,
(None, 8, 8, 5),
)
self.assertEqual(
knn.conv_transpose(
inputs_2d, kernel, (5, 5), padding="valid", output_padding=4
).shape,
(None, 21, 21, 5),
)
def test_one_hot(self):
x = KerasTensor([None, 3, 1])
self.assertEqual(knn.one_hot(x, 5).shape, (None, 3, 1, 5))
self.assertEqual(knn.one_hot(x, 5, 1).shape, (None, 5, 3, 1))
self.assertEqual(knn.one_hot(x, 5, 2).shape, (None, 3, 5, 1))
@parameterized.product(dtype=["float32", "int32"])
def test_one_hot_dtype(self, dtype):
# dtype tests
x = np.arange(5)
out = knn.one_hot(x, 5, axis=0, dtype=dtype)
self.assertEqual(backend.standardize_dtype(out.dtype), dtype)
def test_moments(self):
x = KerasTensor([None, 3, 4])
self.assertEqual(knn.moments(x, axes=[0])[0].shape, (3, 4))
self.assertEqual(knn.moments(x, axes=[0, 1])[0].shape, (4,))
self.assertEqual(
knn.moments(x, axes=[0, 1], keepdims=True)[0].shape, (1, 1, 4)
)
self.assertEqual(knn.moments(x, axes=[1])[0].shape, (None, 4))
self.assertEqual(knn.moments(x, axes=[1, 2])[0].shape, (None,))
self.assertEqual(
knn.moments(x, axes=[1, 2], keepdims=True)[0].shape, (None, 1, 1)
)
class NNOpsStaticShapeTest(testing.TestCase):
def test_relu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.relu(x).shape, (1, 2, 3))
def test_relu6(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.relu6(x).shape, (1, 2, 3))
def test_sigmoid(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.sigmoid(x).shape, (1, 2, 3))
def test_softplus(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.softplus(x).shape, (1, 2, 3))
def test_softsign(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.softsign(x).shape, (1, 2, 3))
def test_silu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.silu(x).shape, (1, 2, 3))
def test_log_sigmoid(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.log_sigmoid(x).shape, (1, 2, 3))
def test_leaky_relu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.leaky_relu(x).shape, (1, 2, 3))
def test_hard_sigmoid(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.hard_sigmoid(x).shape, (1, 2, 3))
def test_elu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.elu(x).shape, (1, 2, 3))
def test_selu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.selu(x).shape, (1, 2, 3))
def test_gelu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.gelu(x).shape, (1, 2, 3))
def test_softmax(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.softmax(x).shape, (1, 2, 3))
self.assertEqual(knn.softmax(x, axis=1).shape, (1, 2, 3))
self.assertEqual(knn.softmax(x, axis=-1).shape, (1, 2, 3))
def test_log_softmax(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.log_softmax(x).shape, (1, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=1).shape, (1, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=-1).shape, (1, 2, 3))
def test_max_pool(self):
x = KerasTensor([1, 8, 3])
self.assertEqual(knn.max_pool(x, 2, 1).shape, (1, 7, 3))
self.assertEqual(knn.max_pool(x, 2, 2, padding="same").shape, (1, 4, 3))
x = KerasTensor([1, 8, 8, 3])
self.assertEqual(knn.max_pool(x, 2, 1).shape, (1, 7, 7, 3))
self.assertEqual(
knn.max_pool(x, 2, 2, padding="same").shape, (1, 4, 4, 3)
)
self.assertEqual(
knn.max_pool(x, (2, 2), (2, 2), padding="same").shape, (1, 4, 4, 3)
)
def test_average_pool(self):
x = KerasTensor([1, 8, 3])
self.assertEqual(knn.average_pool(x, 2, 1).shape, (1, 7, 3))
self.assertEqual(
knn.average_pool(x, 2, 2, padding="same").shape, (1, 4, 3)
)
x = KerasTensor([1, 8, 8, 3])
self.assertEqual(knn.average_pool(x, 2, 1).shape, (1, 7, 7, 3))
self.assertEqual(
knn.average_pool(x, 2, 2, padding="same").shape, (1, 4, 4, 3)
)
self.assertEqual(
knn.average_pool(x, (2, 2), (2, 2), padding="same").shape,
(1, 4, 4, 3),
)
def test_conv(self):
# Test 1D conv.
inputs_1d = KerasTensor([2, 20, 3])
kernel = KerasTensor([4, 3, 2])
self.assertEqual(
knn.conv(inputs_1d, kernel, 1, padding="valid").shape, (2, 17, 2)
)
self.assertEqual(
knn.conv(inputs_1d, kernel, 1, padding="same").shape, (2, 20, 2)
)
self.assertEqual(
knn.conv(inputs_1d, kernel, (2,), dilation_rate=2).shape, (2, 7, 2)
)
# Test 2D conv.
inputs_2d = KerasTensor([2, 10, 10, 3])
kernel = KerasTensor([2, 2, 3, 2])
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding="valid").shape, (2, 9, 9, 2)
)
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding="same").shape, (2, 10, 10, 2)
)
self.assertEqual(
knn.conv(inputs_2d, kernel, (2, 1), dilation_rate=(2, 1)).shape,
(2, 4, 9, 2),
)
# Test 3D conv.
inputs_3d = KerasTensor([2, 8, 8, 8, 3])
kernel = KerasTensor([3, 3, 3, 3, 2])
self.assertEqual(
knn.conv(inputs_3d, kernel, 1, padding="valid").shape,
(2, 6, 6, 6, 2),
)
self.assertEqual(
knn.conv(inputs_3d, kernel, (2, 1, 2), padding="same").shape,
(2, 4, 8, 4, 2),
)
self.assertEqual(
knn.conv(
inputs_3d, kernel, 1, padding="valid", dilation_rate=(1, 2, 2)
).shape,
(2, 6, 4, 4, 2),
)
def test_depthwise_conv(self):
# Test 1D depthwise conv.
inputs_1d = KerasTensor([2, 20, 3])
kernel = KerasTensor([4, 3, 1])
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, 1, padding="valid").shape,
(2, 17, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, (1,), padding="same").shape,
(2, 20, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, 2, dilation_rate=2).shape,
(2, 7, 3),
)
# Test 2D depthwise conv.
inputs_2d = KerasTensor([2, 10, 10, 3])
kernel = KerasTensor([2, 2, 3, 1])
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, 1, padding="valid").shape,
(2, 9, 9, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, (1, 2), padding="same").shape,
(2, 10, 5, 3),
)
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, 2, dilation_rate=2).shape,
(2, 4, 4, 3),
)
self.assertEqual(
knn.depthwise_conv(
inputs_2d, kernel, 2, dilation_rate=(2, 1)
).shape,
(2, 4, 5, 3),
)
def test_separable_conv(self):
# Test 1D separable conv.
inputs_1d = KerasTensor([2, 20, 3])
kernel = KerasTensor([4, 3, 2])
pointwise_kernel = KerasTensor([1, 6, 5])
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 1, padding="valid"
).shape,
(2, 17, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 1, padding="same"
).shape,
(2, 20, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 2, dilation_rate=2
).shape,
(2, 7, 5),
)
# Test 2D separable conv.
inputs_2d = KerasTensor([2, 10, 10, 3])
kernel = KerasTensor([2, 2, 3, 2])
pointwise_kernel = KerasTensor([1, 1, 6, 5])
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, 1, padding="valid"
).shape,
(2, 9, 9, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, (1, 2), padding="same"
).shape,
(2, 10, 5, 5),
)
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, 2, dilation_rate=(2, 1)
).shape,
(2, 4, 5, 5),
)
def test_conv_transpose(self):
inputs_1d = KerasTensor([2, 4, 3])
kernel = KerasTensor([2, 5, 3])
self.assertEqual(
knn.conv_transpose(inputs_1d, kernel, 2).shape, (2, 8, 5)
)
self.assertEqual(
knn.conv_transpose(inputs_1d, kernel, 2, padding="same").shape,
(2, 8, 5),
)
self.assertEqual(
knn.conv_transpose(
inputs_1d, kernel, 5, padding="valid", output_padding=4
).shape,
(2, 21, 5),
)
inputs_2d = KerasTensor([2, 4, 4, 3])
kernel = KerasTensor([2, 2, 5, 3])
self.assertEqual(
knn.conv_transpose(inputs_2d, kernel, 2).shape, (2, 8, 8, 5)
)
self.assertEqual(
knn.conv_transpose(inputs_2d, kernel, (2, 2), padding="same").shape,
(2, 8, 8, 5),
)
self.assertEqual(
knn.conv_transpose(
inputs_2d, kernel, (5, 5), padding="valid", output_padding=4
).shape,
(2, 21, 21, 5),
)
def test_batched_and_unbatched_inputs_multi_hot(self):
x = KerasTensor([2, 3, 1])
unbatched_input = KerasTensor(
[
5,
]
)
self.assertEqual(knn.multi_hot(unbatched_input, 5, -1).shape, (5,))
self.assertEqual(knn.multi_hot(x, 5).shape, (2, 1, 5))
self.assertEqual(knn.multi_hot(x, 5, 1).shape, (2, 3, 1))
self.assertEqual(knn.multi_hot(x, 5, 2).shape, (2, 5, 1))
def test_one_hot(self):
x = KerasTensor([2, 3, 1])
self.assertEqual(knn.one_hot(x, 5).shape, (2, 3, 1, 5))
self.assertEqual(knn.one_hot(x, 5, 1).shape, (2, 5, 3, 1))
self.assertEqual(knn.one_hot(x, 5, 2).shape, (2, 3, 5, 1))
def test_binary_crossentropy(self):
x1 = KerasTensor([2, 3, 1])
x2 = KerasTensor([2, 3, 1])
self.assertEqual(knn.binary_crossentropy(x1, x2).shape, (2, 3, 1))
def test_categorical_crossentropy(self):
x1 = KerasTensor([2, 3, 4])
x2 = KerasTensor([2, 3, 4])
self.assertEqual(knn.categorical_crossentropy(x1, x2).shape, (2, 3))
def test_sparse_categorical_crossentropy(self):
x1 = KerasTensor([2, 3], dtype="int32")
x2 = KerasTensor([2, 3, 4])
self.assertEqual(
knn.sparse_categorical_crossentropy(x1, x2).shape, (2, 3)
)
def test_moments(self):
x = KerasTensor([2, 3, 4])
self.assertEqual(knn.moments(x, axes=[0])[0].shape, (3, 4))
self.assertEqual(knn.moments(x, axes=[0, 1])[0].shape, (4,))
self.assertEqual(
knn.moments(x, axes=[0, 1], keepdims=True)[0].shape, (1, 1, 4)
)
class NNOpsCorrectnessTest(testing.TestCase, parameterized.TestCase):
def test_relu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(knn.relu(x), [0, 0, 1, 2, 3])
def test_relu6(self):
x = np.array([-1, 0, 1, 2, 3, 4, 5, 6, 7], dtype=np.float32)
self.assertAllClose(knn.relu6(x), [0, 0, 1, 2, 3, 4, 5, 6, 6])
def test_sigmoid(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.sigmoid(x), [0.26894143, 0.5, 0.7310586, 0.880797, 0.95257413]
)
def test_softplus(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.softplus(x),
[0.31326166, 0.6931472, 1.3132616, 2.126928, 3.0485873],
)
def test_softsign(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(knn.softsign(x), [-0.5, 0, 0.5, 0.6666667, 0.75])
def test_silu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.silu(x),
[-0.26894143, 0, 0.7310586, 1.7615942, 2.8577223],
)
def test_log_sigmoid(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.log_sigmoid(x),
[-1.3132616, -0.6931472, -0.31326166, -0.126928, -0.04858732],
)
def test_leaky_relu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.leaky_relu(x),
[-0.2, 0, 1, 2, 3],
)
def test_hard_sigmoid(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.hard_sigmoid(x),
[0.33333334, 0.5, 0.6666667, 0.8333334, 1.0],
)
def test_elu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.elu(x),
[-0.63212055, 0, 1, 2, 3],
)
self.assertAllClose(
knn.elu(x, alpha=0.5),
[-0.31606027, 0, 1, 2, 3],
)
def test_selu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.selu(x),
[-1.1113307, 0.0, 1.050701, 2.101402, 3.152103],
)
def test_gelu(self):
x = np.array([-1, 0, 1, 2, 3], dtype=np.float32)
self.assertAllClose(
knn.gelu(x),
[-0.15880796, 0.0, 0.841192, 1.9545977, 2.9963627],
)
def test_softmax(self):
x = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllClose(
knn.softmax(x, axis=None), # Reduce on all axes.
[[0.045015, 0.122364, 0.33262], [0.045015, 0.122364, 0.33262]],
)
self.assertAllClose(
knn.softmax(x, axis=0),
[[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
)
self.assertAllClose(
knn.softmax(x, axis=-1),
[
[0.09003057, 0.24472848, 0.66524094],
[0.09003057, 0.24472848, 0.66524094],
],
)
self.assertAllClose(
knn.softmax(x), # Default axis should be -1.
[
[0.09003057, 0.24472848, 0.66524094],
[0.09003057, 0.24472848, 0.66524094],
],
)
def test_log_softmax(self):
x = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllClose(
knn.log_softmax(x, axis=None), # Reduce on all axes.
[
[-3.100753, -2.100753, -1.100753],
[-3.100753, -2.100753, -1.100753],
],
)
self.assertAllClose(
knn.log_softmax(x, axis=0),
[
[-0.693147, -0.693147, -0.693147],
[-0.693147, -0.693147, -0.693147],
],
)
self.assertAllClose(
knn.log_softmax(x, axis=-1),
[
[-2.407606, -1.407606, -0.407606],
[-2.407606, -1.407606, -0.407606],
],
)
self.assertAllClose(
knn.log_softmax(x), # Default axis should be -1.
[
[-2.407606, -1.407606, -0.407606],
[-2.407606, -1.407606, -0.407606],
],
)
def test_max_pool(self):
# Test 1D max pooling.
x = np.arange(120, dtype=float).reshape([2, 20, 3])
self.assertAllClose(
knn.max_pool(x, 2, 1, padding="valid"),
tf.nn.max_pool1d(x, 2, 1, padding="VALID"),
)
self.assertAllClose(
knn.max_pool(x, 2, 2, padding="same"),
tf.nn.max_pool1d(x, 2, 2, padding="SAME"),
)
# Test 2D max pooling.
x = np.arange(540, dtype=float).reshape([2, 10, 9, 3])
self.assertAllClose(
knn.max_pool(x, 2, 1, padding="valid"),
tf.nn.max_pool2d(x, 2, 1, padding="VALID"),
)
self.assertAllClose(
knn.max_pool(x, 2, (2, 1), padding="same"),
tf.nn.max_pool2d(x, 2, (2, 1), padding="SAME"),
)
def test_average_pool_valid_padding(self):
# Test 1D max pooling.
x = np.arange(120, dtype=float).reshape([2, 20, 3])
self.assertAllClose(
knn.average_pool(x, 2, 1, padding="valid"),
tf.nn.avg_pool1d(x, 2, 1, padding="VALID"),
)
# Test 2D max pooling.
x = np.arange(540, dtype=float).reshape([2, 10, 9, 3])
self.assertAllClose(
knn.average_pool(x, 2, 1, padding="valid"),
tf.nn.avg_pool2d(x, 2, 1, padding="VALID"),
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Torch outputs differently from TF when using `same` padding.",
)
def test_average_pool_same_padding(self):
# Test 1D max pooling.
x = np.arange(120, dtype=float).reshape([2, 20, 3])
self.assertAllClose(
knn.average_pool(x, 2, 2, padding="same"),
tf.nn.avg_pool1d(x, 2, 2, padding="SAME"),
)
# Test 2D max pooling.
x = np.arange(540, dtype=float).reshape([2, 10, 9, 3])
self.assertAllClose(
knn.average_pool(x, 2, (2, 1), padding="same"),
tf.nn.avg_pool2d(x, 2, (2, 1), padding="SAME"),
)
@parameterized.product(
strides=(1, 2, 3),
padding=("valid", "same"),
dilation_rate=(1, 2),
)
def test_conv_1d(self, strides, padding, dilation_rate):
if strides > 1 and dilation_rate > 1:
pytest.skip("Unsupported configuration")
inputs_1d = np.arange(120, dtype=float).reshape([2, 20, 3])
kernel = np.arange(24, dtype=float).reshape([4, 3, 2])
outputs = knn.conv(
inputs_1d,
kernel,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
)
expected = tf.nn.conv1d(
inputs_1d,
kernel,
strides,
padding=padding.upper(),
dilations=dilation_rate,
)
self.assertAllClose(outputs, expected)
def test_conv_2d(self):
inputs_2d = np.arange(600, dtype=float).reshape([2, 10, 10, 3])
kernel = np.arange(24, dtype=float).reshape([2, 2, 3, 2])
outputs = knn.conv(inputs_2d, kernel, 1, padding="valid")
expected = tf.nn.conv2d(inputs_2d, kernel, 1, padding="VALID")
self.assertAllClose(outputs, expected)
outputs = knn.conv(inputs_2d, kernel, (1, 2), padding="valid")
expected = tf.nn.conv2d(inputs_2d, kernel, (1, 2), padding="VALID")
self.assertAllClose(outputs, expected)
outputs = knn.conv(inputs_2d, kernel, (1, 2), padding="same")
expected = tf.nn.conv2d(inputs_2d, kernel, (1, 2), padding="SAME")
self.assertAllClose(outputs, expected)
outputs = knn.conv(inputs_2d, kernel, 2, padding="same")
expected = tf.nn.conv2d(inputs_2d, kernel, 2, padding="SAME")
self.assertAllClose(outputs, expected)
# Test group > 1.
inputs_2d = np.ones([2, 10, 10, 4])
kernel = np.ones([2, 2, 2, 6])
outputs = knn.conv(
inputs_2d, kernel, 2, padding="same", dilation_rate=1
)
expected = tf.nn.conv2d(
inputs_2d, kernel, 2, padding="SAME", dilations=1
)
self.assertAllClose(outputs, expected)
outputs = knn.conv(
inputs_2d,
kernel,
1,
padding="same",
dilation_rate=(2, 1),
)
expected = tf.nn.conv2d(
inputs_2d,
kernel,
1,
padding="SAME",
dilations=(2, 1),
)
self.assertAllClose(outputs, expected)
def test_conv_3d(self):
inputs_3d = np.arange(3072, dtype=float).reshape([2, 8, 8, 8, 3])
kernel = np.arange(162, dtype=float).reshape([3, 3, 3, 3, 2])
outputs = knn.conv(inputs_3d, kernel, 1, padding="valid")
expected = tf.nn.conv3d(
inputs_3d, kernel, (1, 1, 1, 1, 1), padding="VALID"
)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
outputs = knn.conv(
inputs_3d,
kernel,
(1, 1, 1),
padding="valid",
dilation_rate=(1, 1, 1),
)
expected = tf.nn.conv3d(
inputs_3d,
kernel,
(1, 1, 1, 1, 1),
padding="VALID",
dilations=(1, 1, 1, 1, 1),
)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
outputs = knn.conv(inputs_3d, kernel, 2, padding="valid")
expected = tf.nn.conv3d(
inputs_3d, kernel, (1, 2, 2, 2, 1), padding="VALID"
)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
outputs = knn.conv(inputs_3d, kernel, 2, padding="same")
expected = tf.nn.conv3d(
inputs_3d, kernel, (1, 2, 2, 2, 1), padding="SAME"
)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
def test_depthwise_conv_2d(self):
inputs_2d = np.arange(600, dtype=float).reshape([2, 10, 10, 3])
kernel = np.arange(24, dtype=float).reshape([2, 2, 3, 2])
outputs = knn.depthwise_conv(inputs_2d, kernel, 1, padding="valid")
expected = tf.nn.depthwise_conv2d(
inputs_2d, kernel, (1, 1, 1, 1), padding="VALID"
)
self.assertAllClose(outputs, expected)
outputs = knn.depthwise_conv(inputs_2d, kernel, (1, 1), padding="valid")
expected = tf.nn.depthwise_conv2d(
inputs_2d, kernel, (1, 1, 1, 1), padding="VALID"
)
self.assertAllClose(outputs, expected)
outputs = knn.depthwise_conv(inputs_2d, kernel, (2, 2), padding="same")
expected = tf.nn.depthwise_conv2d(
inputs_2d, kernel, (1, 2, 2, 1), padding="SAME"
)
self.assertAllClose(outputs, expected)
outputs = knn.depthwise_conv(
inputs_2d, kernel, 1, padding="same", dilation_rate=(2, 2)
)
expected = tf.nn.depthwise_conv2d(
inputs_2d, kernel, (1, 1, 1, 1), padding="SAME", dilations=(2, 2)
)
self.assertAllClose(outputs, expected)
def test_separable_conv_2d(self):
# Test 2D conv.
inputs_2d = np.arange(600, dtype=float).reshape([2, 10, 10, 3])
depthwise_kernel = np.arange(24, dtype=float).reshape([2, 2, 3, 2])
pointwise_kernel = np.arange(72, dtype=float).reshape([1, 1, 6, 12])
outputs = knn.separable_conv(
inputs_2d, depthwise_kernel, pointwise_kernel, 1, padding="valid"
)
expected = tf.nn.separable_conv2d(
inputs_2d,
depthwise_kernel,
pointwise_kernel,
(1, 1, 1, 1),
padding="VALID",
)
self.assertAllClose(outputs, expected)
outputs = knn.separable_conv(
inputs_2d,
depthwise_kernel,
pointwise_kernel,
(1, 1),
padding="valid",
)
expected = tf.nn.separable_conv2d(
inputs_2d,
depthwise_kernel,
pointwise_kernel,
(1, 1, 1, 1),
padding="VALID",
)
self.assertAllClose(outputs, expected)
outputs = knn.separable_conv(
inputs_2d, depthwise_kernel, pointwise_kernel, 2, padding="same"
)
expected = tf.nn.separable_conv2d(
inputs_2d,
depthwise_kernel,
pointwise_kernel,
(1, 2, 2, 1),
padding="SAME",
)
self.assertAllClose(outputs, expected)
outputs = knn.separable_conv(
inputs_2d,
depthwise_kernel,
pointwise_kernel,
1,
padding="same",
dilation_rate=(2, 2),
)
expected = tf.nn.separable_conv2d(
inputs_2d,
depthwise_kernel,
pointwise_kernel,
(1, 1, 1, 1),
padding="SAME",
dilations=(2, 2),
)
self.assertAllClose(outputs, expected)
def test_conv_transpose_1d(self):
inputs_1d = np.arange(24, dtype=float).reshape([2, 4, 3])
kernel = np.arange(30, dtype=float).reshape([2, 5, 3])
outputs = knn.conv_transpose(inputs_1d, kernel, 2, padding="valid")
expected = tf.nn.conv_transpose(
inputs_1d, kernel, [2, 8, 5], 2, padding="VALID"
)
self.assertAllClose(outputs, expected)
outputs = knn.conv_transpose(inputs_1d, kernel, 2, padding="same")
expected = tf.nn.conv_transpose(
inputs_1d, kernel, [2, 8, 5], 2, padding="SAME"
)
self.assertAllClose(outputs, expected)
def test_conv_transpose_2d(self):
inputs_2d = np.arange(96, dtype=float).reshape([2, 4, 4, 3])
kernel = np.arange(60, dtype=float).reshape([2, 2, 5, 3])
outputs = knn.conv_transpose(inputs_2d, kernel, (2, 2), padding="valid")
expected = tf.nn.conv_transpose(
inputs_2d, kernel, [2, 8, 8, 5], (2, 2), padding="VALID"
)
self.assertAllClose(outputs, expected)
outputs = knn.conv_transpose(inputs_2d, kernel, 2, padding="same")
expected = tf.nn.conv_transpose(
inputs_2d, kernel, [2, 8, 8, 5], 2, padding="SAME"
)
self.assertAllClose(outputs, expected)
def test_one_hot(self):
# Test 1D one-hot.
indices_1d = np.array([0, 1, 2, 3])
self.assertAllClose(
knn.one_hot(indices_1d, 4), tf.one_hot(indices_1d, 4)
)
self.assertAllClose(
knn.one_hot(indices_1d, 4, axis=0),
tf.one_hot(indices_1d, 4, axis=0),
)
# Test 2D one-hot.
indices_2d = np.array([[0, 1], [2, 3]])
self.assertAllClose(
knn.one_hot(indices_2d, 4), tf.one_hot(indices_2d, 4)
)
self.assertAllClose(
knn.one_hot(indices_2d, 4, axis=2),
tf.one_hot(indices_2d, 4, axis=2),
)
self.assertAllClose(
knn.one_hot(indices_2d, 4, axis=1),
tf.one_hot(indices_2d, 4, axis=1),
)
# Test 1D one-hot with negative inputs
indices_1d = np.array([0, -1, -1, 3])
self.assertAllClose(
knn.one_hot(indices_1d, 4), tf.one_hot(indices_1d, 4)
)
def test_binary_crossentropy(self):
# Test with from_logits=False
target = np.array([[0.1], [0.9], [0.2], [1.0]])
output = np.array([[0.1], [0.2], [0.3], [0.4]])
result = knn.binary_crossentropy(target, output, from_logits=False)
self.assertAllClose(
result,
np.array([[0.32508277], [1.47080801], [0.52613434], [0.91629048]]),
)
# Test with from_logits=True
target = np.array([[0.1], [0.9], [0.2], [1.0]])
output = np.array([[0.1], [0.2], [0.3], [0.4]])
result = knn.binary_crossentropy(target, output, from_logits=True)
self.assertAllClose(
result,
np.array([[0.73439666], [0.61813887], [0.79435524], [0.51301525]]),
)
# Test with output clipping
target = np.array([[0.1], [0.9], [0.2], [1.0]])
output = np.array([[0.99], [-0.2], [0.9], [-0.4]])
result = knn.binary_crossentropy(target, output, from_logits=True)
self.assertAllClose(
result,
np.array([[1.206961], [0.778139], [1.061154], [0.913015]]),
)
def test_categorical_crossentropy(self):
target = np.array(
[
[0.33008796, 0.0391289, 0.9503603],
[0.80376694, 0.92363342, 0.19147756],
]
)
output = np.array(
[
[0.23446431, 0.35822914, 0.06683268],
[0.3413979, 0.05420256, 0.81619654],
]
)
# Test from_logits=False
result = knn.categorical_crossentropy(
target, output, from_logits=False, axis=-1
)
self.assertAllClose(result, np.array([2.54095299, 3.96374412]))
# Test axis
result = knn.categorical_crossentropy(
target, output, from_logits=False, axis=0
)
self.assertAllClose(
result, np.array([0.71683073, 1.87988172, 2.46810762])
)
# Test from_logits=True
result = knn.categorical_crossentropy(
target, output, from_logits=True, axis=-1
)
self.assertAllClose(result, np.array([1.59419954, 2.49880593]))
# Test with output clipping
output = np.array(
[
[1.23446431, -0.35822914, 1.06683268],
[0.3413979, -0.05420256, 0.81619654],
]
)
result = knn.categorical_crossentropy(
target, output, from_logits=True, axis=-1
)
self.assertAllClose(result, np.array([1.16825923, 2.55436813]))
def test_sparse_categorical_crossentropy(self):
target = np.array([0, 1, 2])
output = np.array(
[[0.9, 0.05, 0.05], [0.05, 0.89, 0.06], [0.05, 0.01, 0.94]]
)
result = knn.sparse_categorical_crossentropy(target, output)
self.assertAllClose(result, [0.105361, 0.116534, 0.061875])
output = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
result = knn.sparse_categorical_crossentropy(
target, output, from_logits=True
)
self.assertAllClose(result, [0.001822, 0.000459, 0.169846])
def test_multi_hot(self):
# Test 1D multi-hot.
indices_1d = np.array([0, 1, 2, 3])
expected_output_1d = np.array([1, 1, 1, 1])
self.assertAllClose(knn.multi_hot(indices_1d, 4), expected_output_1d)
# Test 2D multi-hot.
indices_2d = np.array([[0, 1], [2, 3]])
expected_output_2d = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
self.assertAllClose(knn.multi_hot(indices_2d, 4), expected_output_2d)
# Test 1D multi-hot with negative inputs
indices_1d = np.array([0, -1, -1, 3])
expected_output_1d = np.array([1, 0, 0, 1])
self.assertAllClose(knn.multi_hot(indices_1d, 4), expected_output_1d)
def test_moments(self):
# Test 1D moments
x = np.array([0, 1, 2, 3, 4, 100, -200]).astype(np.float32)
mean, variance = knn.moments(x, axes=[0])
self.assertAllClose(mean, np.mean(x), atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, np.var(x), atol=1e-5, rtol=1e-5)
# Test batch statistics for 4D moments (batch, height, width, channels)
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0])
self.assertAllClose(mean, np.mean(x, axis=0), atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, np.var(x, axis=0), atol=1e-5, rtol=1e-5)
# Test global statistics for 4D moments (batch, height, width, channels)
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0, 1, 2])
expected_mean = np.mean(x, axis=(0, 1, 2))
expected_variance = np.var(x, axis=(0, 1, 2))
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
# Test keepdims
x = np.random.uniform(size=(2, 28, 28, 3)).astype(np.float32)
mean, variance = knn.moments(x, axes=[0, 1, 2], keepdims=True)
expected_mean = np.mean(x, axis=(0, 1, 2), keepdims=True)
expected_variance = np.var(x, axis=(0, 1, 2), keepdims=True)
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
# Test float16 which causes overflow
x = np.array(
[-741.0, 353.2, 1099.0, -1807.0, 502.8, -83.4, 333.5, -130.9],
dtype=np.float16,
)
mean, variance = knn.moments(x, axes=[0])
expected_mean = np.mean(x.astype(np.float32)).astype(np.float16)
# the output variance is clipped to the max value of np.float16 because
# it is overflowed
expected_variance = np.finfo(np.float16).max
self.assertAllClose(mean, expected_mean, atol=1e-5, rtol=1e-5)
self.assertAllClose(variance, expected_variance, atol=1e-5, rtol=1e-5)
| keras-core/keras_core/ops/nn_test.py/0 | {
"file_path": "keras-core/keras_core/ops/nn_test.py",
"repo_id": "keras-core",
"token_count": 23433
} | 46 |
from keras_core import initializers
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.optimizers import optimizer
@keras_core_export(["keras_core.optimizers.Adagrad"])
class Adagrad(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adagrad`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use `1.0`.
initial_accumulator_value: Floating point value. Starting value for the
accumulators (per-parameter momentum values). Must be non-negative.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Duchi et al., 2011](
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
"""
def __init__(
self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="adagrad",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
name=name,
**kwargs,
)
self.initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulators = []
initializer = initializers.Constant(self.initial_accumulator_value)
for var in var_list:
self._accumulators.append(
self.add_variable(
shape=var.shape,
initializer=initializer,
dtype=var.dtype,
name="accumulator",
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
accumulator = self._accumulators[self._get_variable_index(variable)]
accumulator.assign(accumulator + gradient * gradient)
variable.assign(
variable - (lr * gradient / ops.sqrt(accumulator + self.epsilon))
)
def get_config(self):
config = super().get_config()
config.update(
{
"initial_accumulator_value": self.initial_accumulator_value,
"epsilon": self.epsilon,
}
)
return config
Adagrad.__doc__ = Adagrad.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras-core/keras_core/optimizers/adagrad.py/0 | {
"file_path": "keras-core/keras_core/optimizers/adagrad.py",
"repo_id": "keras-core",
"token_count": 1642
} | 47 |
# flake8: noqa
import numpy as np
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.optimizers.nadam import Nadam
class NadamTest(testing.TestCase):
def test_config(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Nadam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4686, 1.4686, 2.4686, 3.4686], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Nadam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Nadam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281, 0.4281],
[-0.1738, -0.1731, -0.1726, -0.1723, -0.1721, -0.172, -0.1719, -0.1718, -0.1718, -0.1717],
[-0.7115, -0.7103, -0.7096, -0.7092, -0.709, -0.7088, -0.7086, -0.7085, -0.7085, -0.7084],
[-1.2335, -1.2322, -1.2313, -1.2309, -1.2306, -1.2304, -1.2302, -1.2301, -1.23, -1.2299],
[-1.7492, -1.7478, -1.7469, -1.7464, -1.7461, -1.7459, -1.7457, -1.7456, -1.7455, -1.7454]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Nadam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Nadam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| keras-core/keras_core/optimizers/nadam_test.py/0 | {
"file_path": "keras-core/keras_core/optimizers/nadam_test.py",
"repo_id": "keras-core",
"token_count": 1802
} | 48 |
import numpy as np
from keras_core import backend
from keras_core import regularizers
from keras_core import testing
# TODO: serialization tests
class RegularizersTest(testing.TestCase):
def test_l1(self):
value = np.random.random((4, 4))
x = backend.Variable(value)
y = regularizers.L1(0.1)(x)
self.assertAllClose(y, 0.1 * np.sum(np.abs(value)))
def test_l2(self):
value = np.random.random((4, 4))
x = backend.Variable(value)
y = regularizers.L2(0.1)(x)
self.assertAllClose(y, 0.1 * np.sum(np.square(value)))
def test_l1_l2(self):
value = np.random.random((4, 4))
x = backend.Variable(value)
y = regularizers.L1L2(l1=0.1, l2=0.2)(x)
self.assertAllClose(
y, 0.1 * np.sum(np.abs(value)) + 0.2 * np.sum(np.square(value))
)
def test_orthogonal_regularizer(self):
value = np.random.random((4, 4))
x = backend.Variable(value)
regularizers.OrthogonalRegularizer(factor=0.1, mode="rows")(x)
# TODO
def test_get_method(self):
obj = regularizers.get("l1l2")
self.assertIsInstance(obj, regularizers.L1L2)
obj = regularizers.get("l1")
self.assertIsInstance(obj, regularizers.L1)
obj = regularizers.get("l2")
self.assertIsInstance(obj, regularizers.L2)
obj = regularizers.get("orthogonal_regularizer")
self.assertIsInstance(obj, regularizers.OrthogonalRegularizer)
obj = regularizers.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
regularizers.get("typo")
| keras-core/keras_core/regularizers/regularizers_test.py/0 | {
"file_path": "keras-core/keras_core/regularizers/regularizers_test.py",
"repo_id": "keras-core",
"token_count": 755
} | 49 |
import numpy as np
import pytest
from absl.testing import parameterized
import keras_core
from keras_core import backend
from keras_core import initializers
from keras_core import layers
from keras_core import losses
from keras_core import metrics
from keras_core import ops
from keras_core import optimizers
from keras_core import testing
from keras_core.callbacks.callback import Callback
from keras_core.optimizers.rmsprop import RMSprop
if backend.backend() == "jax":
from keras_core.backend.jax.trainer import JAXTrainer as Trainer
elif backend.backend() == "torch":
from keras_core.backend.torch.trainer import TorchTrainer as Trainer
elif backend.backend() == "tensorflow":
from keras_core.backend.tensorflow.trainer import (
TensorFlowTrainer as Trainer,
)
elif backend.backend() == "numpy":
from keras_core.backend.numpy.trainer import NumpyTrainer as Trainer
else:
raise ImportError(f"Invalid backend: {backend.backend()}")
# A model is just a layer mixed in with a Trainer.
class ExampleModel(Trainer, layers.Dense):
def __init__(self, units):
layers.Dense.__init__(
self,
units=units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
Trainer.__init__(self)
class StructModel(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense_1 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
self.dense_2 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
def call(self, x):
return {
"y_one": self.dense_1(x["x_one"]),
"y_two": self.dense_2(x["x_two"]),
}
class ListModel(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense_1 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
self.dense_2 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
def call(self, x):
assert isinstance(x, (list, tuple))
return self.dense_1(x[0]) + self.dense_2(x[1])
class TrainingTestingLayer(Trainer, layers.Layer):
def __init__(self, **kwargs):
layers.Layer.__init__(self, **kwargs)
Trainer.__init__(self)
def call(self, x, training=False):
if training:
return x
return x * 0
class TestTrainer(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_metric_tracking(self):
class ModelWithMetric(Trainer, layers.Dense):
def __init__(self, units):
layers.Dense.__init__(
self,
units=units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
Trainer.__init__(self)
self.my_metric = metrics.MeanSquaredError(name="my_metric")
model = ModelWithMetric(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
x = np.ones((2, 4))
y = np.zeros((2, 3))
# Fit the model to make sure compile_metrics are built
model.fit(x, y, batch_size=2, epochs=1)
# The model should have 3 metrics: loss_tracker, compile_metrics,
# my_metric.
self.assertEqual(len(model.metrics), 3)
self.assertEqual(model.metrics[0], model._loss_tracker)
self.assertEqual(model.metrics[1], model.my_metric)
self.assertEqual(model.metrics[2], model._compile_metrics)
# All metrics should have their weights created
self.assertEqual(len(model._loss_tracker.variables), 2)
self.assertEqual(len(model._compile_metrics.variables), 2)
self.assertEqual(len(model.my_metric.variables), 2)
# And those weights are tracked at the model level
self.assertEqual(len(model.metrics_variables), 6)
self.assertLen(model.non_trainable_variables, 0)
# Models with only weighted_metrics should have the same 3 metrics
model_weighted = ModelWithMetric(units=3)
model_weighted.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
weighted_metrics=[metrics.MeanSquaredError()],
)
model_weighted.fit(
x,
y,
batch_size=2,
epochs=1,
sample_weight=np.ones(2),
)
self.assertEqual(len(model_weighted.metrics), 3)
@parameterized.named_parameters(
[
("eager", True, False, False),
("graph_fn", False, False, False),
("jit", False, True, False),
("steps_per_epoch_eager", True, False, True),
("steps_per_epoch_graph_fn", False, False, True),
("steps_per_epoch_jit", False, True, True),
]
)
@pytest.mark.requires_trainable_backend
def test_fit_flow(self, run_eagerly, jit_compile, use_steps_per_epoch):
if not run_eagerly and not jit_compile and use_steps_per_epoch:
if backend.backend() == "tensorflow":
self.skipTest(
"TODO: Graph mode without XLA in TF backend leads to "
"unexpected logs, need further checks."
)
model = ExampleModel(units=3)
epochs = 3
batch_size = 20
steps_per_epoch = 7
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
history = model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch if use_steps_per_epoch else None,
epochs=epochs,
)
history = history.history
self.assertIn("loss", history)
self.assertIn("mean_squared_error", history)
self.assertAllClose(
history["mean_squared_error"],
[14.402393, 10.991339, 8.388159],
atol=6.1051628e-1,
)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_evaluate_flow(self, run_eagerly, jit_compile):
model = ExampleModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
batch_size = 16
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
output = model.evaluate(x, y, batch_size=batch_size)
self.assertAllClose(output, [16.0, 16.0])
output = model.evaluate(x, y, batch_size=batch_size, return_dict=True)
self.assertIsInstance(output, dict)
self.assertIn("loss", output)
self.assertIn("mean_squared_error", output)
self.assertAllClose(output["mean_squared_error"], 16.0)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_predict_flow(self, run_eagerly, jit_compile):
# Test basic example
model = ExampleModel(units=3)
model.run_eagerly = run_eagerly
model.jit_compile = jit_compile
x = np.ones((100, 4))
batch_size = 16
outputs = model.predict(x, batch_size=batch_size)
self.assertAllClose(outputs, 4 * np.ones((100, 3)))
# Test with input/output structs
model = StructModel(units=3)
model.run_eagerly = run_eagerly
model.jit_compile = jit_compile
x = {
"x_one": np.ones((100, 4)),
"x_two": np.ones((100, 4)),
}
batch_size = 16
outputs = model.predict(x, batch_size=batch_size)
self.assertIsInstance(outputs, dict)
self.assertEqual(len(outputs), 2)
self.assertAllClose(outputs["y_one"], 4 * np.ones((100, 3)))
self.assertAllClose(outputs["y_two"], 4 * np.ones((100, 3)))
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Memory optimization is only implemented in JAX",
)
def test_fit_eval_flow_for_jax_model_weights(self):
model = ExampleModel(units=3)
epochs = 3
batch_size = 20
steps_per_epoch = 7
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
class ModelWeightCheck(Callback):
def __init__(self):
super().__init__()
# Note that we access model via self._model since self.model
# will trigger a sync of the jax training state back to the model.
def on_train_batch_begin(self, batch, logs=None):
for v in self._model.trainable_variables:
assert v._value is None
for v in self._model.non_trainable_variables:
assert v._value is None
for v in self._model.optimizer.variables:
assert v._value is None
for v in self._model.metrics_variables:
assert v._value is None
def on_test_batch_begin(self, batch, logs=None):
for v in self._model.non_trainable_variables:
assert v._value is None
for v in self._model.metrics_variables:
assert v._value is None
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[ModelWeightCheck()],
)
model.evaluate(
x,
y,
batch_size=batch_size,
callbacks=[ModelWeightCheck()],
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_steps_per_execution_steps_count(self):
class StepCount(Callback):
def __init__(self):
super().__init__()
self.count = 0
self.batches = [0, 3, 6]
def on_batch_begin(self, batch, logs=None):
assert batch == self.batches[self.count]
self.count += 1
x = np.ones((100, 4))
y = np.ones((100, 1))
batch_size = 16
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="adam",
steps_per_execution=3,
)
step_count = StepCount()
model.fit(x=x, y=y, batch_size=16, callbacks=[step_count], verbose=0)
self.assertEqual(step_count.count, 3)
model_2 = ExampleModel(units=1)
model_2.compile(loss="mse", optimizer="adam", steps_per_execution=1)
model_2.fit(x=x, y=y, batch_size=batch_size, verbose=0)
self.assertAllClose(model.get_weights(), model_2.get_weights())
self.assertAllClose(
model.predict(x, batch_size=batch_size),
model_2.predict(x, batch_size=batch_size),
)
self.assertAllClose(model.evaluate(x, y), model_2.evaluate(x, y))
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_steps_per_execution_steps_count_without_training(self):
class StepCount(Callback):
def __init__(self):
super().__init__()
self.test_count = 0
self.predict_count = 0
self.batches = [0, 3, 6]
def on_test_batch_begin(self, batch, logs=None):
assert batch == self.batches[self.test_count]
self.test_count += 1
def on_predict_batch_begin(self, batch, logs=None):
assert batch == self.batches[self.predict_count]
self.predict_count += 1
x = np.ones((100, 4))
y = np.ones((100, 1))
batch_size = 16
model = ExampleModel(units=1)
model.compile(loss="mse", steps_per_execution=3)
step_count = StepCount()
model.predict(x, batch_size=batch_size, callbacks=[step_count])
self.assertEqual(step_count.predict_count, 3)
model.evaluate(x, y, batch_size=batch_size, callbacks=[step_count])
self.assertEqual(step_count.test_count, 3)
@pytest.mark.requires_trainable_backend
def test_adds_loss_scaling_optimizer(self):
model = TrainingTestingLayer(dtype="mixed_float16")
model.compile(optimizer="rmsprop", loss="mse")
x = np.ones((128, 1))
y = np.zeros((128, 1))
model.fit(x, y, batch_size=32)
self.assertIsInstance(model.optimizer, optimizers.LossScaleOptimizer)
model = TrainingTestingLayer(dtype="mixed_float16")
model.compile(optimizer="rmsprop", loss="mse", auto_scale_loss=False)
x = np.ones((128, 1))
y = np.zeros((128, 1))
model.fit(x, y, batch_size=32)
self.assertIsInstance(model.optimizer, RMSprop)
model = TrainingTestingLayer(dtype="mixed_bfloat16")
model.compile(optimizer="rmsprop", loss="mse")
x = np.ones((128, 1))
y = np.zeros((128, 1))
model.fit(x, y, batch_size=32)
self.assertIsInstance(model.optimizer, RMSprop)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="half precision unsupported on torch CPU.",
)
def test_loss_scaling_prevents_underflow(self):
class DeepModel(Trainer, layers.Layer):
def __init__(self):
layers.Layer.__init__(self, dtype="mixed_float16")
Trainer.__init__(self)
self.layers = []
for _ in range(15):
# Sigmoid has a small gradient, will eventually underflow.
self.layers.append(
layers.Dense(
1,
use_bias=False,
kernel_initializer="ones",
activation="sigmoid",
dtype="mixed_float16",
)
)
def call(self, x):
for layer in self.layers:
x = layer(x)
return x
loss = losses.MeanSquaredError()
# Blow up any gradient updates, so underflow is obvious.
optimizer = optimizers.SGD(learning_rate=1e9)
model = DeepModel()
model.compile(optimizer, loss=loss, auto_scale_loss=False)
model.fit(np.ones((1, 1)), np.ones((1, 1)), batch_size=1)
first_kernel = model.layers[0].kernel
# Without autoscaling, the first dense will not update.
self.assertEqual(first_kernel, np.ones_like(first_kernel))
# Blow up any gradient updates, so underflow is obvious.
optimizer = optimizers.SGD(learning_rate=1e9)
model = DeepModel()
model.compile(optimizer, loss=loss, auto_scale_loss=True)
model.fit(np.ones((1, 1)), np.ones((1, 1)), batch_size=1)
first_kernel = model.layers[0].kernel
# With autoscaling, the first dense will update.
self.assertNotEqual(first_kernel, np.ones_like(first_kernel))
@pytest.mark.requires_trainable_backend
def test_training_arg(self):
model = TrainingTestingLayer()
model.compile(optimizer="rmsprop", loss="mse")
x = np.ones((128, 1))
y = np.zeros((128, 1))
history = model.fit(x, y, batch_size=32)
self.assertAllClose(history.history["loss"], [1.0])
val_loss = model.evaluate(x, y, batch_size=32)
self.assertAllClose(val_loss, 0.0)
preds = model.predict(x)
self.assertAllClose(preds, np.zeros((128, 1)))
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
@pytest.mark.requires_trainable_backend
def test_on_batch_methods(self, run_eagerly, jit_compile):
model = ExampleModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
sw = np.arange(100).reshape((100,)).astype("float32") / 50.0
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
logs = model.train_on_batch(x, y)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs[0], 16.0)
logs = model.train_on_batch(x, y, return_dict=True)
self.assertIsInstance(logs, dict)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs["loss"], 15.579)
logs = model.test_on_batch(x, y)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs[0], 15.173)
logs = model.test_on_batch(x, y, return_dict=True)
self.assertIsInstance(logs, dict)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs["loss"], 14.97)
output = model.predict_on_batch(x)
self.assertIsInstance(output, np.ndarray)
self.assertAllClose(output[0], np.array([3.789511, 3.789511, 3.789511]))
# With sample weights
logs = model.train_on_batch(x, y, sw)
self.assertAlmostEqual(logs[0], 14.819)
logs = model.test_on_batch(x, y, sw)
self.assertAlmostEqual(logs[0], 14.595)
output = model.predict_on_batch(x)
self.assertAllClose(output[0], np.array([3.689468, 3.689468, 3.689468]))
# With class weights
logs = model.train_on_batch(x, y, class_weight={1: 0.3, 0: 0.2})
self.assertAlmostEqual(logs[0], 12.899)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_on_batch_methods_without_training(self, run_eagerly, jit_compile):
model = ExampleModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
model.compile(
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
output = model.predict_on_batch(x)
self.assertIsInstance(output, np.ndarray)
self.assertAllClose(output[0], np.array([4.0, 4.0, 4.0]))
logs = model.test_on_batch(x, y)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs[0], 16.0)
logs = model.test_on_batch(x, y, return_dict=True)
self.assertIsInstance(logs, dict)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs["loss"], 16.0)
def test_nested_input_predict(self):
# https://github.com/keras-team/keras-core/issues/325
class TupleInputModel(keras_core.Model):
def call(self, inputs):
a, b = inputs
return a + b
model = TupleInputModel()
x1, x2 = np.random.rand(2, 3, 4)
out = model.predict((x1, x2))
self.assertEqual(out.shape, (3, 4))
class DictInputModel(keras_core.Model):
def call(self, inputs):
return inputs["a"] + inputs["b"]
model = DictInputModel()
x1, x2 = np.random.rand(2, 3, 4)
out = model.predict({"a": x1, "b": x2})
self.assertEqual(out.shape, (3, 4))
@pytest.mark.requires_trainable_backend
def test_callback_methods_keys(self):
class CustomCallback(Callback):
def on_train_begin(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_train_end(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == [
"loss",
"mean_absolute_error",
"val_loss",
"val_mean_absolute_error",
]
def on_epoch_begin(self, epoch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_epoch_end(self, epoch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == [
"loss",
"mean_absolute_error",
"val_loss",
"val_mean_absolute_error",
]
def on_test_begin(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_test_end(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["loss", "mean_absolute_error"]
def on_predict_begin(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_predict_end(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_train_batch_begin(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_train_batch_end(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["loss", "mean_absolute_error"]
def on_test_batch_begin(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_test_batch_end(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["loss", "mean_absolute_error"]
def on_predict_batch_begin(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_predict_batch_end(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["outputs"]
model = ExampleModel(units=3)
model.compile(
optimizer="adam", loss="mse", metrics=["mean_absolute_error"]
)
x = np.ones((16, 4))
y = np.zeros((16, 3))
x_test = np.ones((16, 4))
y_test = np.zeros((16, 3))
model.fit(
x,
y,
callbacks=[CustomCallback()],
batch_size=4,
validation_data=(x_test, y_test),
)
model.evaluate(x_test, y_test, batch_size=4)
model.predict(x_test, batch_size=4)
@pytest.mark.requires_trainable_backend
def test_internal_only_loss(self):
class LossLayer(layers.Layer):
def call(self, x):
self.add_loss(ops.sum(x))
return x
model = keras_core.Sequential(
[
layers.Dense(2),
LossLayer(),
layers.Dense(1),
]
)
model.compile(optimizer="adam")
x = np.ones((16, 2))
y = np.zeros((16, 1))
model.fit(x, y, batch_size=4)
def get_layer(self):
class ExampleLayer(keras_core.Layer):
def call(self, x):
return x * 2
return ExampleLayer
def get_model(self):
class ExampleModel(keras_core.Model):
def call(self, x):
return x * 2
return ExampleModel
def get_functional(self):
ExampleLayer = self.get_layer()
class ExampleFunctional(keras_core.Functional):
def __init__(self, input_shape=(None,)):
inputs = keras_core.Input(input_shape)
outputs = ExampleLayer()(inputs)
super().__init__(inputs=inputs, outputs=outputs)
return ExampleFunctional
@parameterized.named_parameters(
[
{
"testcase_name": "model",
"model_class": "get_model",
},
{
"testcase_name": "layer",
"model_class": "get_layer",
},
{
"testcase_name": "functional",
"model_class": "get_functional",
},
]
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
keras_core.backend.backend() != "tensorflow",
reason="Only tensorflow supports raggeds",
)
def test_trainer_with_raggeds(self, model_class):
from keras_core.utils.module_utils import tensorflow as tf
def loss_fn(y, y_pred, sample_weight=None):
return 0
model = getattr(self, model_class)()()
x = tf.ragged.constant([[1], [2, 3]])
# test forward pass
y = model(x)
self.assertEqual(type(y), tf.RaggedTensor)
# test training
if model_class in ["get_model", "get_functional"]:
model.compile(optimizer="adam", loss=loss_fn)
model.fit(x, x)
y = model.predict(x)
self.assertEqual(type(y), tf.RaggedTensor)
# test if everything works with the sequential model
model = keras_core.Sequential([model])
model.compile(optimizer="adam", loss=loss_fn)
model.fit(x, x)
y = model.predict(x)
self.assertEqual(type(y), tf.RaggedTensor)
def test_predict_dropout(self):
# Test that `predict` with a dropout op
# has nondeterministic behavior across batches.
inputs = layers.Input((20,))
outputs = layers.Dropout(0.5, seed=1337)(inputs, training=True)
model = keras_core.Model(inputs, outputs)
out1 = model.predict(np.ones((4, 20)), batch_size=2)
self.assertGreater(5, np.sum(np.abs(out1[:2, :] - out1[2:4, :])))
out2 = model.predict_on_batch(np.ones((2, 20)))
out3 = model.predict_on_batch(np.ones((2, 20)))
self.assertGreater(5, np.sum(np.abs(out2 - out3)))
@pytest.mark.requires_trainable_backend
def test_recompile(self):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = keras_core.Model(inputs, outputs)
model.compile(
optimizer="sgd", loss="mse", metrics=["mean_squared_error"]
)
history_1 = model.fit(np.ones((3, 2)), np.ones((3, 3))).history
eval_out_1 = model.evaluate(
np.ones((3, 2)), np.ones((3, 3)), return_dict=True
)
model.compile(
optimizer="sgd", loss="mse", metrics=["mean_absolute_error"]
)
history_2 = model.fit(np.ones((3, 2)), np.ones((3, 3))).history
eval_out_2 = model.evaluate(
np.ones((3, 2)), np.ones((3, 3)), return_dict=True
)
self.assertEqual(
sorted(list(history_1.keys())), ["loss", "mean_squared_error"]
)
self.assertEqual(
sorted(list(eval_out_1.keys())), ["loss", "mean_squared_error"]
)
self.assertEqual(
sorted(list(history_2.keys())), ["loss", "mean_absolute_error"]
)
self.assertEqual(
sorted(list(eval_out_2.keys())), ["loss", "mean_absolute_error"]
)
@pytest.mark.requires_trainable_backend
def test_nested_inputs(self):
model = ListModel(units=2)
out = model([np.ones((3, 2)), np.ones((3, 3))])
self.assertEqual(tuple(out.shape), (3, 2))
model.compile(optimizer="sgd", loss="mse", metrics=["mse"])
history = model.fit(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
).history
self.assertAllClose(history["loss"], 16.0)
train_out = model.train_on_batch(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
)
self.assertAllClose(train_out[0], 15.2200)
eval_out = model.evaluate(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
)
self.assertAllClose(eval_out[0], 13.0321)
eval_out = model.test_on_batch(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
)
self.assertAllClose(eval_out[0], 13.0321)
predict_out = model.predict([np.ones((3, 2)), np.ones((3, 3))])
self.assertEqual(predict_out.shape, (3, 2))
predict_out = model.predict_on_batch([np.ones((3, 2)), np.ones((3, 3))])
self.assertEqual(predict_out.shape, (3, 2))
@pytest.mark.requires_trainable_backend
def test_validation_data_infinite_generator(self):
# Test that you can pass an infinite generator to `validation_data`
# arg of fit() as well as a `validation_steps` argument and that
# validation only runs for the correct number of steps.
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = keras_core.Model(inputs, outputs)
model.compile(optimizer="sgd", loss="mse", metrics=["mse"])
class Recorder(keras_core.callbacks.Callback):
def __init__(self):
self.train_counter = 0
self.val_counter = 0
def on_train_batch_end(self, *args, **kwargs):
self.train_counter += 1
def on_test_batch_end(self, *args, **kwargs):
self.val_counter += 1
def infinite_gen():
while True:
yield np.ones((2, 2)), np.ones((2, 3))
recorder = Recorder()
model.fit(
infinite_gen(),
validation_data=infinite_gen(),
steps_per_epoch=3,
validation_steps=4,
epochs=1,
shuffle=False,
callbacks=[recorder],
)
self.assertEqual(recorder.train_counter, 3)
self.assertEqual(recorder.val_counter, 4)
| keras-core/keras_core/trainers/trainer_test.py/0 | {
"file_path": "keras-core/keras_core/trainers/trainer_test.py",
"repo_id": "keras-core",
"token_count": 15600
} | 50 |
"""Utilities related to image handling."""
import io
import pathlib
import warnings
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
try:
from PIL import Image as pil_image
try:
pil_image_resampling = pil_image.Resampling
except AttributeError:
pil_image_resampling = pil_image
except ImportError:
pil_image = None
pil_image_resampling = None
if pil_image_resampling is not None:
PIL_INTERPOLATION_METHODS = {
"nearest": pil_image_resampling.NEAREST,
"bilinear": pil_image_resampling.BILINEAR,
"bicubic": pil_image_resampling.BICUBIC,
"hamming": pil_image_resampling.HAMMING,
"box": pil_image_resampling.BOX,
"lanczos": pil_image_resampling.LANCZOS,
}
@keras_core_export(
[
"keras_core.utils.array_to_img",
"keras_core.preprocessing.image.array_to_img",
]
)
def array_to_img(x, data_format=None, scale=True, dtype=None):
"""Converts a 3D NumPy array to a PIL Image instance.
Usage:
```python
from PIL import Image
img = np.random.random(size=(100, 100, 3))
pil_img = keras_core.utils.array_to_img(img)
```
Args:
x: Input data, in any form that can be converted to a NumPy array.
data_format: Image data format, can be either `"channels_first"` or
`"channels_last"`. Defaults to `None`, in which case the global
setting `keras_core.backend.image_data_format()` is used (unless you
changed it, it defaults to `"channels_last"`).
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Defaults to `True`.
dtype: Dtype to use. `None` means the global setting
`keras_core.backend.floatx()` is used (unless you changed it, it
defaults to `"float32"`). Defaults to `None`.
Returns:
A PIL Image instance.
"""
data_format = backend.standardize_data_format(data_format)
if dtype is None:
dtype = backend.floatx()
if pil_image is None:
raise ImportError(
"Could not import PIL.Image. "
"The use of `array_to_img` requires PIL."
)
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError(
"Expected image array to have rank 3 (single image). "
f"Got array with shape: {x.shape}"
)
# Original NumPy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == "channels_first":
x = x.transpose(1, 2, 0)
if scale:
x = x - np.min(x)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
# RGBA
return pil_image.fromarray(x.astype("uint8"), "RGBA")
elif x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype("uint8"), "RGB")
elif x.shape[2] == 1:
# grayscale
if np.max(x) > 255:
# 32-bit signed integer grayscale image. PIL mode "I"
return pil_image.fromarray(x[:, :, 0].astype("int32"), "I")
return pil_image.fromarray(x[:, :, 0].astype("uint8"), "L")
else:
raise ValueError(f"Unsupported channel number: {x.shape[2]}")
@keras_core_export(
[
"keras_core.utils.img_to_array",
"keras_core.preprocessing.image.img_to_array",
]
)
def img_to_array(img, data_format=None, dtype=None):
"""Converts a PIL Image instance to a NumPy array.
Usage:
```python
from PIL import Image
img_data = np.random.random(size=(100, 100, 3))
img = keras_core.utils.array_to_img(img_data)
array = keras_core.utils.image.img_to_array(img)
```
Args:
img: Input PIL Image instance.
data_format: Image data format, can be either `"channels_first"` or
`"channels_last"`. Defaults to `None`, in which case the global
setting `keras_core.backend.image_data_format()` is used (unless you
changed it, it defaults to `"channels_last"`).
dtype: Dtype to use. `None` means the global setting
`keras_core.backend.floatx()` is used (unless you changed it, it
defaults to `"float32"`).
Returns:
A 3D NumPy array.
"""
data_format = backend.standardize_data_format(data_format)
if dtype is None:
dtype = backend.floatx()
# NumPy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if data_format == "channels_first":
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == "channels_first":
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError(f"Unsupported image shape: {x.shape}")
return x
@keras_core_export(
["keras_core.utils.save_img", "keras_core.preprocessing.image.save_img"]
)
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
"""Saves an image stored as a NumPy array to a path or file object.
Args:
path: Path or file object.
x: NumPy array.
data_format: Image data format, either `"channels_first"` or
`"channels_last"`.
file_format: Optional file format override. If omitted, the format to
use is determined from the filename extension. If a file object was
used instead of a filename, this parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
data_format = backend.standardize_data_format(data_format)
img = array_to_img(x, data_format=data_format, scale=scale)
if img.mode == "RGBA" and (file_format == "jpg" or file_format == "jpeg"):
warnings.warn(
"The JPG format does not support RGBA images, converting to RGB."
)
img = img.convert("RGB")
img.save(path, format=file_format, **kwargs)
@keras_core_export(
["keras_core.utils.load_img", "keras_core.preprocessing.image.load_img"]
)
def load_img(
path,
color_mode="rgb",
target_size=None,
interpolation="nearest",
keep_aspect_ratio=False,
):
"""Loads an image into PIL format.
Usage:
```python
image = keras_core.utils.load_img(image_path)
input_arr = keras_core.utils.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = model.predict(input_arr)
```
Args:
path: Path to image file.
color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`. Default: `"rgb"`.
The desired image format.
target_size: Either `None` (default to original size) or tuple of ints
`(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"`
is also supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
Returns:
A PIL Image instance.
"""
if pil_image is None:
raise ImportError(
"Could not import PIL.Image. The use of `load_img` requires PIL."
)
if isinstance(path, io.BytesIO):
img = pil_image.open(path)
elif isinstance(path, (pathlib.Path, bytes, str)):
if isinstance(path, pathlib.Path):
path = str(path.resolve())
with open(path, "rb") as f:
img = pil_image.open(io.BytesIO(f.read()))
else:
raise TypeError(
f"path should be path-like or io.BytesIO, not {type(path)}"
)
if color_mode == "grayscale":
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ("L", "I;16", "I"):
img = img.convert("L")
elif color_mode == "rgba":
if img.mode != "RGBA":
img = img.convert("RGBA")
elif color_mode == "rgb":
if img.mode != "RGB":
img = img.convert("RGB")
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in PIL_INTERPOLATION_METHODS:
raise ValueError(
"Invalid interpolation method {} specified. Supported "
"methods are {}".format(
interpolation,
", ".join(PIL_INTERPOLATION_METHODS.keys()),
)
)
resample = PIL_INTERPOLATION_METHODS[interpolation]
if keep_aspect_ratio:
width, height = img.size
target_width, target_height = width_height_tuple
crop_height = (width * target_height) // target_width
crop_width = (height * target_width) // target_height
# Set back to input height / width
# if crop_height / crop_width is not smaller.
crop_height = min(height, crop_height)
crop_width = min(width, crop_width)
crop_box_hstart = (height - crop_height) // 2
crop_box_wstart = (width - crop_width) // 2
crop_box_wend = crop_box_wstart + crop_width
crop_box_hend = crop_box_hstart + crop_height
crop_box = [
crop_box_wstart,
crop_box_hstart,
crop_box_wend,
crop_box_hend,
]
img = img.resize(width_height_tuple, resample, box=crop_box)
else:
img = img.resize(width_height_tuple, resample)
return img
def smart_resize(
x,
size,
interpolation="bilinear",
data_format="channels_last",
backend_module=None,
):
"""Resize images to a target size without aspect ratio distortion.
Image datasets typically yield images that have each a different
size. However, these images need to be batched before they can be
processed by Keras layers. To be batched, images need to share the same
height and width.
You could simply do, in TF (or JAX equivalent):
```python
size = (200, 200)
ds = ds.map(lambda img: resize(img, size))
```
However, if you do this, you distort the aspect ratio of your images, since
in general they do not all have the same aspect ratio as `size`. This is
fine in many cases, but not always (e.g. for image generation models
this can be a problem).
Note that passing the argument `preserve_aspect_ratio=True` to `resize`
will preserve the aspect ratio, but at the cost of no longer respecting the
provided target size.
This calls for:
```python
size = (200, 200)
ds = ds.map(lambda img: smart_resize(img, size))
```
Your output images will actually be `(200, 200)`, and will not be distorted.
Instead, the parts of the image that do not fit within the target size
get cropped out.
The resizing process is:
1. Take the largest centered crop of the image that has the same aspect
ratio as the target size. For instance, if `size=(200, 200)` and the input
image has size `(340, 500)`, we take a crop of `(340, 340)` centered along
the width.
2. Resize the cropped image to the target size. In the example above,
we resize the `(340, 340)` crop to `(200, 200)`.
Args:
x: Input image or batch of images (as a tensor or NumPy array).
Must be in format `(height, width, channels)`
or `(batch_size, height, width, channels)`.
size: Tuple of `(height, width)` integer. Target size.
interpolation: String, interpolation to use for resizing.
Defaults to `'bilinear'`.
Supports `bilinear`, `nearest`, `bicubic`,
`lanczos3`, `lanczos5`.
data_format: `"channels_last"` or `"channels_first"`.
backend_module: Backend module to use (if different from the default
backend).
Returns:
Array with shape `(size[0], size[1], channels)`.
If the input image was a NumPy array, the output is a NumPy array,
and if it was a backend-native tensor,
the output is a backend-native tensor.
"""
backend_module = backend_module or backend
if len(size) != 2:
raise ValueError(
f"Expected `size` to be a tuple of 2 integers, but got: {size}."
)
img = backend_module.convert_to_tensor(x)
if len(img.shape) is not None:
if len(img.shape) < 3 or len(img.shape) > 4:
raise ValueError(
"Expected an image array with shape `(height, width, "
"channels)`, or `(batch_size, height, width, channels)`, but "
f"got input with incorrect rank, of shape {img.shape}."
)
shape = backend_module.shape(img)
if data_format == "channels_last":
height, width = shape[-3], shape[-2]
else:
height, width = shape[-2], shape[-1]
target_height, target_width = size
# Set back to input height / width if crop_height / crop_width is not
# smaller.
if isinstance(height, int) and isinstance(width, int):
# For JAX, we need to keep the slice indices as static integers
crop_height = int(float(width * target_height) / target_width)
crop_height = min(height, crop_height)
crop_width = int(float(height * target_width) / target_height)
crop_width = min(width, crop_width)
crop_box_hstart = int(float(height - crop_height) / 2)
crop_box_wstart = int(float(width - crop_width) / 2)
else:
crop_height = backend_module.cast(
backend_module.cast(width * target_height, "float32")
/ target_width,
"int32",
)
crop_height = backend_module.numpy.minimum(height, crop_height)
crop_height = backend_module.cast(crop_height, "int32")
crop_width = backend_module.cast(
backend_module.cast(height * target_width, "float32")
/ target_height,
"int32",
)
crop_width = backend_module.numpy.minimum(width, crop_width)
crop_width = backend_module.cast(crop_width, "int32")
crop_box_hstart = backend_module.cast(
backend_module.cast(height - crop_height, "float32") / 2, "int32"
)
crop_box_wstart = backend_module.cast(
backend_module.cast(width - crop_width, "float32") / 2, "int32"
)
if data_format == "channels_last":
if len(img.shape) == 4:
img = img[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
img = img[
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
if len(img.shape) == 4:
img = img[
:,
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
else:
img = img[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
img = backend_module.image.resize(
img, size=size, interpolation=interpolation, data_format=data_format
)
if isinstance(x, np.ndarray):
return np.array(img)
return img
| keras-core/keras_core/utils/image_utils.py/0 | {
"file_path": "keras-core/keras_core/utils/image_utils.py",
"repo_id": "keras-core",
"token_count": 7425
} | 51 |
import numpy as np
from keras_core.api_export import keras_core_export
@keras_core_export(
[
"keras_core.utils.pad_sequences",
"keras_core.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D NumPy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> keras_core.utils.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> keras_core.utils.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> keras_core.utils.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> keras_core.utils.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
pad either before or after each sequence.
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to 0.)
Returns:
NumPy array with shape `(len(sequences), maxlen)`
"""
if not hasattr(sequences, "__len__"):
raise ValueError("`sequences` must be iterable.")
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError(
"`sequences` must be a list of iterables. "
f"Found non-iterable: {str(x)}"
) from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.unicode_
)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError(
f"`dtype` {dtype} is not compatible with `value`'s type: "
f"{type(value)}\nYou should set `dtype=object` for variable length "
"strings."
)
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == "pre":
trunc = s[-maxlen:]
elif truncating == "post":
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f"Shape of sample {trunc.shape[1:]} of sequence at "
f"position {idx} is different from expected shape "
f"{sample_shape}"
)
if padding == "post":
x[idx, : len(trunc)] = trunc
elif padding == "pre":
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
| keras-core/keras_core/utils/sequence_utils.py/0 | {
"file_path": "keras-core/keras_core/utils/sequence_utils.py",
"repo_id": "keras-core",
"token_count": 2075
} | 52 |
"""Script to create (and optionally install) a `.whl` archive for Keras Core.
Usage:
1. Create a `.whl` file in `dist/`:
```
python3 pip_build.py
```
2. Also install the new package immediately after:
```
python3 pip_build.py --install
```
"""
import argparse
import glob
import os
import pathlib
import shutil
import namex
# Needed because importing torch after TF causes the runtime to crash
import torch # noqa: F401
package = "keras_core"
build_directory = "tmp_build_dir"
dist_directory = "dist"
to_copy = ["setup.py", "README.md"]
def ignore_files(_, filenames):
return [f for f in filenames if f.endswith("_test.py")]
def copy_source_to_build_directory(root_path):
# Copy sources (`keras_core/` directory and setup files) to build
# directory
os.chdir(root_path)
os.mkdir(build_directory)
shutil.copytree(
package, os.path.join(build_directory, package), ignore=ignore_files
)
for fname in to_copy:
shutil.copy(fname, os.path.join(f"{build_directory}", fname))
os.chdir(build_directory)
def run_namex_conversion():
# Restructure the codebase so that source files live in `keras_core/src`
namex.convert_codebase(package, code_directory="src")
# Generate API __init__.py files in `keras_core/`
namex.generate_api_files(package, code_directory="src", verbose=True)
def create_legacy_directory():
# Make keras_core/_tf_keras/ by copying keras_core/
tf_keras_dirpath = os.path.join(package, "_tf_keras")
os.makedirs(tf_keras_dirpath)
with open(os.path.join(package, "__init__.py")) as f:
init_file = f.read()
init_file = init_file.replace(
"from keras_core import _legacy",
"from keras_core import _tf_keras",
)
with open(os.path.join(package, "__init__.py"), "w") as f:
f.write(init_file)
with open(os.path.join(tf_keras_dirpath, "__init__.py"), "w") as f:
f.write(init_file)
for dirname in os.listdir(package):
dirpath = os.path.join(package, dirname)
if os.path.isdir(dirpath) and dirname not in (
"_legacy",
"_tf_keras",
"src",
):
shutil.copytree(
dirpath,
os.path.join(tf_keras_dirpath, dirname),
ignore=ignore_files,
)
# Copy keras_core/_legacy/ file contents to keras_core/_tf_keras/
legacy_submodules = [
path[:-3]
for path in os.listdir(os.path.join(package, "src", "legacy"))
if path.endswith(".py")
]
legacy_submodules += [
path
for path in os.listdir(os.path.join(package, "src", "legacy"))
if os.path.isdir(os.path.join(package, "src", "legacy", path))
]
for root, _, fnames in os.walk(os.path.join(package, "_legacy")):
for fname in fnames:
if fname.endswith(".py"):
legacy_fpath = os.path.join(root, fname)
tf_keras_root = root.replace("/_legacy", "/_tf_keras")
core_api_fpath = os.path.join(
root.replace("/_legacy", ""), fname
)
if not os.path.exists(tf_keras_root):
os.makedirs(tf_keras_root)
tf_keras_fpath = os.path.join(tf_keras_root, fname)
with open(legacy_fpath) as f:
legacy_contents = f.read()
legacy_contents = legacy_contents.replace(
"keras_core._legacy", "keras_core._tf_keras"
)
if os.path.exists(core_api_fpath):
with open(core_api_fpath) as f:
core_api_contents = f.read()
core_api_contents = core_api_contents.replace(
"from keras_core import _tf_keras\n", ""
)
for legacy_submodule in legacy_submodules:
core_api_contents = core_api_contents.replace(
f"from keras_core import {legacy_submodule}\n",
"",
)
core_api_contents = core_api_contents.replace(
f"keras_core.{legacy_submodule}",
f"keras_core._tf_keras.{legacy_submodule}",
)
legacy_contents = core_api_contents + "\n" + legacy_contents
with open(tf_keras_fpath, "w") as f:
f.write(legacy_contents)
# Delete keras_core/_legacy/
shutil.rmtree(os.path.join(package, "_legacy"))
def export_version_string(__version__):
# Make sure to export the __version__ string
with open(os.path.join(package, "__init__.py")) as f:
init_contents = f.read()
with open(os.path.join(package, "__init__.py"), "w") as f:
f.write(init_contents + "\n\n" + f'__version__ = "{__version__}"\n')
def build_and_save_output(root_path, __version__):
# Build the package
os.system("python3 -m build")
# Save the dist files generated by the build process
os.chdir(root_path)
if not os.path.exists(dist_directory):
os.mkdir(dist_directory)
for fpath in glob.glob(
os.path.join(build_directory, dist_directory, "*.*")
):
shutil.copy(fpath, dist_directory)
# Find the .whl file path
whl_path = None
for fname in os.listdir(dist_directory):
if __version__ in fname and fname.endswith(".whl"):
whl_path = os.path.abspath(os.path.join(dist_directory, fname))
print(f"Build successful. Wheel file available at {whl_path}")
return whl_path
def build(root_path):
if os.path.exists(build_directory):
raise ValueError(f"Directory already exists: {build_directory}")
try:
copy_source_to_build_directory(root_path)
run_namex_conversion()
create_legacy_directory()
from keras_core.src.version import __version__ # noqa: E402
export_version_string(__version__)
return build_and_save_output(root_path, __version__)
finally:
# Clean up: remove the build directory (no longer needed)
shutil.rmtree(build_directory)
def install_whl(whl_fpath):
print(f"Installing wheel file: {whl_fpath}")
os.system(f"pip3 install {whl_fpath} --force-reinstall --no-dependencies")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--install",
action="store_true",
help="Whether to install the generated wheel file.",
)
args = parser.parse_args()
root_path = pathlib.Path(__file__).parent.resolve()
whl_path = build(root_path)
if whl_path and args.install:
install_whl(whl_path)
| keras-core/pip_build.py/0 | {
"file_path": "keras-core/pip_build.py",
"repo_id": "keras-core",
"token_count": 3233
} | 53 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import ChannelShuffle
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
class OldChannelShuffle(BaseImageAugmentationLayer):
"""Shuffle channels of an input image.
Input shape:
The expected images should be [0-255] pixel ranges.
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
groups: Number of groups to divide the input channels, defaults to 3.
seed: Integer. Used to create a random seed.
Call arguments:
inputs: Tensor representing images of shape
`(batch_size, width, height, channels)`, with dtype
tf.float32 / tf.uint8,
` or (width, height, channels)`, with dtype
tf.float32 / tf.uint8
training: A boolean argument that determines whether the call should be
run in inference mode or training mode, defaults to True.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
channel_shuffle = keras_cv.layers.ChannelShuffle()
augmented_images = channel_shuffle(images)
```
"""
def __init__(self, groups=3, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.groups = groups
self.seed = seed
def augment_image(self, image, transformation=None, **kwargs):
shape = tf.shape(image)
height, width = shape[0], shape[1]
num_channels = image.shape[2]
if not num_channels % self.groups == 0:
raise ValueError(
"The number of input channels should be "
"divisible by the number of groups."
f"Received: channels={num_channels}, groups={self.groups}"
)
channels_per_group = num_channels // self.groups
image = tf.reshape(
image, [height, width, self.groups, channels_per_group]
)
image = tf.transpose(image, perm=[2, 0, 1, 3])
image = tf.random.shuffle(image, seed=self.seed)
image = tf.transpose(image, perm=[1, 2, 3, 0])
image = tf.reshape(image, [height, width, num_channels])
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"groups": self.groups, "seed": self.seed})
return config
def compute_output_shape(self, input_shape):
return input_shape
class ChannelShuffleTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 32, 32, 3)
groups = 3
fixed_seed = 2023 # magic number
image = tf.random.uniform(shape=image_shape)
layer = ChannelShuffle(groups=groups, seed=fixed_seed)
old_layer = OldChannelShuffle(groups=groups, seed=fixed_seed)
output = layer(image)
old_output = old_layer(image)
self.assertNotAllClose(image, output)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 3000, 4000, 5000, 10000]
results = {}
aug_candidates = [ChannelShuffle, OldChannelShuffle]
aug_args = {"groups": 3}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_channel_shuffle.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_channel_shuffle.py",
"repo_id": "keras-cv",
"token_count": 2952
} | 54 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import core
from keras_cv.layers import RandomlyZoomedCrop
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomlyZoomedCrop(BaseImageAugmentationLayer):
"""Randomly crops a part of an image and zooms it by a provided amount size.
This implementation takes a distortion-oriented approach, which means the
amount of distortion in the image is proportional to the `zoom_factor`
argument. To do this, we first sample a random value for `zoom_factor` and
`aspect_ratio_factor`. Further we deduce a `crop_size` which abides by the
calculated aspect ratio. Finally we do the actual cropping operation and
resize the image to `(height, width)`.
Args:
height: The height of the output shape.
width: The width of the output shape.
zoom_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Represents the area relative to the original
image of the cropped image before resizing it to `(height, width)`.
aspect_ratio_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Aspect ratio means the ratio of width to
height of the cropped image. In the context of this layer, the
aspect ratio sampled represents a value to distort the aspect ratio
by.
Represents the lower and upper bound for the aspect ratio of the
cropped image before resizing it to `(height, width)`. For most
tasks, this should be `(3/4, 4/3)`. To perform a no-op provide the
value `(1.0, 1.0)`.
interpolation: (Optional) A string specifying the sampling method for
resizing, defaults to "bilinear".
seed: (Optional) Used to create a random seed, defaults to None.
"""
def __init__(
self,
height,
width,
zoom_factor,
aspect_ratio_factor,
interpolation="bilinear",
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height = height
self.width = width
self.aspect_ratio_factor = preprocessing_utils.parse_factor(
aspect_ratio_factor,
min_value=0.0,
max_value=None,
param_name="aspect_ratio_factor",
seed=seed,
)
self.zoom_factor = preprocessing_utils.parse_factor(
zoom_factor,
min_value=0.0,
max_value=None,
param_name="zoom_factor",
seed=seed,
)
self._check_class_arguments(
height, width, zoom_factor, aspect_ratio_factor
)
self.force_output_dense_images = True
self.interpolation = interpolation
self.seed = seed
def get_random_transformation(
self, image=None, label=None, bounding_box=None, **kwargs
):
zoom_factor = self.zoom_factor()
aspect_ratio = self.aspect_ratio_factor()
original_height = tf.cast(tf.shape(image)[-3], tf.float32)
original_width = tf.cast(tf.shape(image)[-2], tf.float32)
crop_size = (
tf.round(self.height / zoom_factor),
tf.round(self.width / zoom_factor),
)
new_height = crop_size[0] / tf.sqrt(aspect_ratio)
new_width = crop_size[1] * tf.sqrt(aspect_ratio)
height_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, original_height - new_height),
maxval=tf.maximum(0.0, original_height - new_height),
dtype=tf.float32,
)
width_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, original_width - new_width),
maxval=tf.maximum(0.0, original_width - new_width),
dtype=tf.float32,
)
new_height = new_height / original_height
new_width = new_width / original_width
height_offset = height_offset / original_height
width_offset = width_offset / original_width
return (new_height, new_width, height_offset, width_offset)
def call(self, inputs, training=True):
if training:
return super().call(inputs, training)
else:
inputs = self._ensure_inputs_are_compute_dtype(inputs)
inputs, meta_data = self._format_inputs(inputs)
output = inputs
# self._resize() returns valid results for both batched and
# unbatched
output["images"] = self._resize(inputs["images"])
return self._format_output(output, meta_data)
def augment_image(self, image, transformation, **kwargs):
image_shape = tf.shape(image)
height = tf.cast(image_shape[-3], tf.float32)
width = tf.cast(image_shape[-2], tf.float32)
image = tf.expand_dims(image, axis=0)
new_height, new_width, height_offset, width_offset = transformation
transform = OldRandomlyZoomedCrop._format_transform(
[
new_width,
0.0,
width_offset * width,
0.0,
new_height,
height_offset * height,
0.0,
0.0,
]
)
image = preprocessing_utils.transform(
images=image,
transforms=transform,
output_shape=(self.height, self.width),
interpolation=self.interpolation,
fill_mode="reflect",
)
return tf.squeeze(image, axis=0)
@staticmethod
def _format_transform(transform):
transform = tf.convert_to_tensor(transform, dtype=tf.float32)
return transform[tf.newaxis]
def _resize(self, image):
outputs = keras.preprocessing.image.smart_resize(
image, (self.height, self.width)
)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
def _check_class_arguments(
self, height, width, zoom_factor, aspect_ratio_factor
):
if not isinstance(height, int):
raise ValueError(
"`height` must be an integer. Received height={height}"
)
if not isinstance(width, int):
raise ValueError(
"`width` must be an integer. Received width={width}"
)
if (
not isinstance(zoom_factor, (tuple, list, core.FactorSampler))
or isinstance(zoom_factor, float)
or isinstance(zoom_factor, int)
):
raise ValueError(
"`zoom_factor` must be tuple of two positive floats"
" or keras_cv.core.FactorSampler instance. Received "
f"zoom_factor={zoom_factor}"
)
if (
not isinstance(
aspect_ratio_factor, (tuple, list, core.FactorSampler)
)
or isinstance(aspect_ratio_factor, float)
or isinstance(aspect_ratio_factor, int)
):
raise ValueError(
"`aspect_ratio_factor` must be tuple of two positive floats or "
"keras_cv.core.FactorSampler instance. Received "
f"aspect_ratio_factor={aspect_ratio_factor}"
)
def augment_target(self, augment_target, **kwargs):
return augment_target
def get_config(self):
config = super().get_config()
config.update(
{
"height": self.height,
"width": self.width,
"zoom_factor": self.zoom_factor,
"aspect_ratio_factor": self.aspect_ratio_factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
if isinstance(config["zoom_factor"], dict):
config["zoom_factor"] = keras.utils.deserialize_keras_object(
config["zoom_factor"]
)
if isinstance(config["aspect_ratio_factor"], dict):
config["aspect_ratio_factor"] = (
keras.utils.deserialize_keras_object(
config["aspect_ratio_factor"]
)
)
return cls(**config)
def _crop_and_resize(self, image, transformation, method=None):
image = tf.expand_dims(image, axis=0)
boxes = transformation
# See bit.ly/tf_crop_resize for more details
augmented_image = tf.image.crop_and_resize(
image, # image shape: [B, H, W, C]
boxes, # boxes: (1, 4) in this case; represents area
# to be cropped from the original image
[0], # box_indices: maps boxes to images along batch axis
# [0] since there is only one image
(self.height, self.width), # output size
method=method or self.interpolation,
)
return tf.squeeze(augmented_image, axis=0)
class RandomlyZoomedCropTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 64, 64, 3)
height, width = 32, 32
fixed_zoom_factor = (0.8, 0.8)
fixed_aspect_ratio_factor = (3.0 / 4.0, 3.0 / 4.0)
fixed_seed = 2023
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomlyZoomedCrop(
height,
width,
fixed_zoom_factor,
fixed_aspect_ratio_factor,
seed=fixed_seed,
)
old_layer = OldRandomlyZoomedCrop(
height,
width,
fixed_zoom_factor,
fixed_aspect_ratio_factor,
seed=fixed_seed,
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomlyZoomedCrop, OldRandomlyZoomedCrop]
aug_args = {
"height": 16,
"width": 16,
"zoom_factor": (0.8, 1.2),
"aspect_ratio_factor": (3.0 / 4.0, 4.0 / 3.0),
}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
# for more information please refer:
# https://github.com/tensorflow/tensorflow/issues/55194
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_randomly_zoomed_crop.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_randomly_zoomed_crop.py",
"repo_id": "keras-cv",
"token_count": 6050
} | 55 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
random_crop_and_resize_demo.py shows how to use the RandomCropAndResize
preprocessing layer for object detection.
"""
import demo_utils
import tensorflow as tf
from keras_cv.layers import preprocessing
IMG_SIZE = (256, 256)
def main():
dataset = demo_utils.load_voc_dataset(bounding_box_format="rel_xyxy")
random_rotation = preprocessing.RandomCropAndResize(
target_size=IMG_SIZE,
crop_area_factor=(0.5, 0.5),
aspect_ratio_factor=(0.5, 0.5),
bounding_box_format="rel_xyxy",
)
result = dataset.map(random_rotation, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_data(result, bounding_box_format="rel_xyxy")
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/bounding_box/random_crop_and_resize_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/bounding_box/random_crop_and_resize_demo.py",
"repo_id": "keras-cv",
"token_count": 442
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from absl import flags
from tensorflow import keras
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import metrics
from tensorflow.keras import optimizers
from keras_cv import losses
from keras_cv import models
from keras_cv import training
from keras_cv.datasets import imagenet
flags.DEFINE_string(
"model_name", None, "The name of the model in KerasCV.models to use."
)
flags.DEFINE_string(
"imagenet_path", None, "Directory from which to load Imagenet."
)
flags.DEFINE_string(
"backup_path", None, "Directory which will be used for training backups."
)
flags.DEFINE_string(
"weights_path",
None,
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_string(
"tensorboard_path",
None,
"Directory which will be used to store tensorboard logs.",
)
flags.DEFINE_integer(
"batch_size", 256, "Batch size for training and evaluation."
)
flags.DEFINE_boolean(
"use_xla", True, "whether to use XLA (jit_compile) for training."
)
flags.DEFINE_float(
"initial_learning_rate",
0.1,
"Initial learning rate which will reduce on plateau.",
)
flags.DEFINE_boolean(
"include_probe",
True,
"Whether to include probing during training.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
if FLAGS.model_name not in models.__dict__:
raise ValueError(f"Invalid model name: {FLAGS.model_name}")
NUM_CLASSES = 1000
IMAGE_SIZE = (224, 224)
EPOCHS = 250
train_ds = imagenet.load(
split="train",
tfrecord_path=FLAGS.imagenet_path,
batch_size=FLAGS.batch_size,
img_size=IMAGE_SIZE,
shuffle=True,
shuffle_buffer=2000,
reshuffle_each_iteration=True,
)
# For TPU training, use tf.distribute.TPUStrategy()
# MirroredStrategy is best for a single machine with multiple GPUs
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = models.__dict__[FLAGS.model_name]
model = model(
include_rescaling=True,
include_top=False,
input_shape=IMAGE_SIZE + (3,),
pooling="avg",
)
trainer = training.SimCLRTrainer(
encoder=model,
augmenter=training.SimCLRAugmenter(
value_range=(0, 255), target_size=IMAGE_SIZE
),
probe=layers.Dense(NUM_CLASSES, name="linear_probe"),
)
optimizer = optimizers.SGD(
learning_rate=FLAGS.initial_learning_rate,
momentum=0.9,
global_clipnorm=10,
)
loss_fn = losses.SimCLRLoss(temperature=0.5, reduction="none")
probe_loss = keras.losses.CategoricalCrossentropy(
reduction="none", from_logits=True
)
with strategy.scope():
training_metrics = [
metrics.CategoricalAccuracy(name="probe_accuracy"),
metrics.TopKCategoricalAccuracy(name="probe_top5_accuracy", k=5),
]
training_callbacks = [
callbacks.EarlyStopping(monitor="probe_accuracy", patience=20),
callbacks.BackupAndRestore(FLAGS.backup_path),
callbacks.ModelCheckpoint(FLAGS.weights_path, save_weights_only=True),
callbacks.TensorBoard(log_dir=FLAGS.tensorboard_path),
]
if FLAGS.include_probe:
training_callbacks += [
callbacks.ReduceLROnPlateau(
monitor="probe_accuracy",
factor=0.1,
patience=5,
min_lr=0.0001,
min_delta=0.005,
)
]
trainer.compile(
encoder_optimizer=optimizer,
encoder_loss=loss_fn,
probe_optimizer=optimizers.Adam(global_clipnorm=10),
probe_metrics=training_metrics,
probe_loss=probe_loss,
jit_compile=FLAGS.use_xla,
)
trainer.fit(
train_ds,
epochs=EPOCHS,
callbacks=training_callbacks,
)
| keras-cv/examples/training/contrastive/imagenet/simclr_training.py/0 | {
"file_path": "keras-cv/examples/training/contrastive/imagenet/simclr_training.py",
"repo_id": "keras-cv",
"token_count": 1661
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random as python_random
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
if keras_3():
from keras.random import * # noqa: F403, F401
else:
from keras_core.random import * # noqa: F403, F401
def _make_default_seed():
return python_random.randint(1, int(1e9))
class SeedGenerator:
def __new__(cls, seed=None, **kwargs):
if keras_3():
return keras.random.SeedGenerator(seed=seed, **kwargs)
return super().__new__(cls)
def __init__(self, seed=None):
if seed is None:
seed = _make_default_seed()
self._initial_seed = seed
self._current_seed = [0, seed]
def next(self, ordered=True):
self._current_seed[0] += 1
return self._current_seed[:]
def get_config(self):
return {"seed": self._initial_seed}
@classmethod
def from_config(cls, config):
return cls(**config)
def _draw_seed(seed):
if keras_3():
# Keras 3 seed can be directly passed to random functions
return seed
if isinstance(seed, SeedGenerator):
init_seed = seed.next()
else:
if seed is None:
seed = _make_default_seed()
init_seed = [0, seed]
return init_seed
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.normal(
shape,
mean=mean,
stddev=stddev,
seed=seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_normal(
shape,
mean=mean,
stddev=stddev,
seed=seed,
**kwargs,
)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
init_seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.uniform(
shape,
minval=minval,
maxval=maxval,
seed=init_seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_uniform(
shape,
minval=minval,
maxval=maxval,
seed=init_seed,
**kwargs,
)
def shuffle(x, axis=0, seed=None):
init_seed = _draw_seed(seed)
if keras_3():
return keras.random.shuffle(x=x, axis=axis, seed=init_seed)
else:
import tensorflow as tf
return tf.random.stateless_shuffle(x=x, axis=axis, seed=init_seed)
def categorical(logits, num_samples, dtype=None, seed=None):
init_seed = _draw_seed(seed)
kwargs = {}
if dtype:
kwargs["dtype"] = dtype
if keras_3():
return keras.random.categorical(
logits=logits,
num_samples=num_samples,
seed=init_seed,
**kwargs,
)
else:
import tensorflow as tf
return tf.random.stateless_categorical(
logits=logits,
num_samples=num_samples,
seed=init_seed,
**kwargs,
)
| keras-cv/keras_cv/backend/random.py/0 | {
"file_path": "keras-cv/keras_cv/backend/random.py",
"repo_id": "keras-cv",
"token_count": 1790
} | 58 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import keras_cv.bounding_box.validate_format as validate_format
from keras_cv import backend
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
@keras_cv_export("keras_cv.bounding_box.to_ragged")
def to_ragged(bounding_boxes, sentinel=-1, dtype=tf.float32):
"""converts a Dense padded bounding box `tf.Tensor` to a `tf.RaggedTensor`.
Bounding boxes are ragged tensors in most use cases. Converting them to a
dense tensor makes it easier to work with Tensorflow ecosystem.
This function can be used to filter out the masked out bounding boxes by
checking for padded sentinel value of the class_id axis of the
bounding_boxes.
Usage:
```python
bounding_boxes = {
"boxes": tf.constant([[2, 3, 4, 5], [0, 1, 2, 3]]),
"classes": tf.constant([[-1, 1]]),
}
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
print(bounding_boxes)
# {
# "boxes": [[0, 1, 2, 3]],
# "classes": [[1]]
# }
```
Args:
bounding_boxes: a Tensor of bounding boxes. May be batched, or
unbatched.
sentinel: The value indicating that a bounding box does not exist at the
current index, and the corresponding box is padding, defaults to -1.
dtype: the data type to use for the underlying Tensors.
Returns:
dictionary of `tf.RaggedTensor` or 'tf.Tensor' containing the filtered
bounding boxes.
"""
if backend.supports_ragged() is False:
raise NotImplementedError(
"`bounding_box.to_ragged` was called using a backend which does "
"not support ragged tensors. "
f"Current backend: {keras.backend.backend()}."
)
info = validate_format.validate_format(bounding_boxes)
if info["ragged"]:
return bounding_boxes
boxes = bounding_boxes.get("boxes")
classes = bounding_boxes.get("classes")
confidence = bounding_boxes.get("confidence", None)
mask = classes != sentinel
boxes = tf.ragged.boolean_mask(boxes, mask)
classes = tf.ragged.boolean_mask(classes, mask)
if confidence is not None:
confidence = tf.ragged.boolean_mask(confidence, mask)
if isinstance(boxes, tf.Tensor):
boxes = tf.RaggedTensor.from_tensor(boxes)
if isinstance(classes, tf.Tensor) and len(classes.shape) > 1:
classes = tf.RaggedTensor.from_tensor(classes)
if confidence is not None:
if isinstance(confidence, tf.Tensor) and len(confidence.shape) > 1:
confidence = tf.RaggedTensor.from_tensor(confidence)
result = bounding_boxes.copy()
result["boxes"] = tf.cast(boxes, dtype)
result["classes"] = tf.cast(classes, dtype)
if confidence is not None:
result["confidence"] = tf.cast(confidence, dtype)
return result
| keras-cv/keras_cv/bounding_box/to_ragged.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/to_ragged.py",
"repo_id": "keras-cv",
"token_count": 1271
} | 59 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer to convert Waymo Open Dataset proto to model inputs."""
from typing import Any
from typing import Dict
from typing import List
from typing import Sequence
from typing import Tuple
import numpy as np
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.utils import assert_waymo_open_dataset_installed
try:
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
except ImportError:
waymo_open_dataset = None
from keras_cv.datasets.waymo import struct
from keras_cv.layers.object_detection_3d import voxel_utils
WOD_FRAME_OUTPUT_SIGNATURE = {
"frame_id": tf.TensorSpec((), tf.int64),
"timestamp_offset": tf.TensorSpec((), tf.float32),
"timestamp_micros": tf.TensorSpec((), tf.int64),
"pose": tf.TensorSpec([4, 4], tf.float32),
"point_xyz": tf.TensorSpec([None, 3], tf.float32),
"point_feature": tf.TensorSpec([None, 4], tf.float32),
"point_mask": tf.TensorSpec([None], tf.bool),
"point_range_image_row_col_sensor_id": tf.TensorSpec([None, 3], tf.float32),
# Please refer to Waymo Open Dataset label proto for definitions.
"label_box": tf.TensorSpec([None, 7], tf.float32),
"label_box_id": tf.TensorSpec([None], tf.int64),
"label_box_meta": tf.TensorSpec([None, 4], tf.float32),
"label_box_class": tf.TensorSpec([None], tf.int32),
"label_box_density": tf.TensorSpec([None], tf.int32),
"label_box_detection_difficulty": tf.TensorSpec([None], tf.int32),
"label_box_mask": tf.TensorSpec([None], tf.bool),
"label_point_class": tf.TensorSpec([None], tf.int32),
"label_point_nlz": tf.TensorSpec([None], tf.int32),
}
# Maximum number of points from all lidars excluding the top lidar. Please refer
# to https://arxiv.org/pdf/1912.04838.pdf Figure 1 for sensor layouts.
_MAX_NUM_NON_TOP_LIDAR_POINTS = 30000
def _decode_range_images(frame) -> Dict[int, List[tf.Tensor]]:
"""Decodes range images from a Waymo Open Dataset frame.
Please refer to https://arxiv.org/pdf/1912.04838.pdf for more details.
Args:
frame: a Waymo Open Dataset frame.
Returns:
A dictionary mapping from sensor ID to list of range images ordered by
return indices.
"""
range_images = {}
for lidar in frame.lasers:
range_image_str_tensor = tf.io.decode_compressed(
lidar.ri_return1.range_image_compressed, "ZLIB"
)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
ri_tensor = tf.reshape(
tf.convert_to_tensor(value=ri.data, dtype=tf.float32), ri.shape.dims
)
range_images[lidar.name] = [ri_tensor]
if lidar.name == dataset_pb2.LaserName.TOP:
range_image_str_tensor = tf.io.decode_compressed(
lidar.ri_return2.range_image_compressed, "ZLIB"
)
ri = dataset_pb2.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
ri_tensor = tf.reshape(
tf.convert_to_tensor(value=ri.data, dtype=tf.float32),
ri.shape.dims,
)
range_images[lidar.name].append(ri_tensor)
return range_images
def _get_range_image_top_pose(frame) -> tf.Tensor:
"""Extracts range image pose tensor.
Args:
frame: a Waymo Open Dataset frame.
Returns:
Pose tensors for the range image.
"""
_, _, _, ri_pose = frame_utils.parse_range_image_and_camera_projection(
frame
)
assert ri_pose
ri_pose_tensor = tf.reshape(
tf.convert_to_tensor(value=ri_pose.data), ri_pose.shape.dims
)
# [H, W, 3, 3]
ri_pose_tensor_rotation = transform_utils.get_rotation_matrix(
ri_pose_tensor[..., 0], ri_pose_tensor[..., 1], ri_pose_tensor[..., 2]
)
ri_pose_tensor_translation = ri_pose_tensor[..., 3:]
ri_pose_tensor = transform_utils.get_transform(
ri_pose_tensor_rotation, ri_pose_tensor_translation
)
return ri_pose_tensor
def _get_point_top_lidar(
range_image: Sequence[tf.Tensor], frame
) -> struct.PointTensors:
"""Gets point related tensors for the top lidar.
Please refer to https://arxiv.org/pdf/1912.04838.pdf Table 2 for lidar
specifications.
Args:
range_image: range image tensors. The range image is:
[range, intensity, elongation, is_in_nlz].
frame: a Waymo Open Dataset frame.
Returns:
Point tensors.
"""
assert len(range_image) == 2
xyz_list = []
feature_list = []
row_col_list = []
nlz_list = []
has_second_return_list = []
is_second_return_list = []
# Extracts frame pose tensor.
frame_pose_tensor = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4])
)
# Extracts range image pose tensor.
ri_pose_tensor = _get_range_image_top_pose(frame)
# Extracts calibration data.
calibration = _get_lidar_calibration(frame, dataset_pb2.LaserName.TOP)
extrinsic = tf.reshape(np.array(calibration.extrinsic.transform), [4, 4])
beam_inclinations = tf.constant(calibration.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
for i in range(2):
ri_tensor = range_image[i]
mask = ri_tensor[:, :, 0] > 0
mask_idx = tf.cast(tf.where(mask), dtype=tf.int32)
xyz = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(ri_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(beam_inclinations, axis=0),
pixel_pose=tf.expand_dims(ri_pose_tensor, axis=0),
frame_pose=tf.expand_dims(frame_pose_tensor, axis=0),
)
xyz = tf.gather_nd(tf.squeeze(xyz, axis=0), mask_idx)
feature = tf.gather_nd(ri_tensor[:, :, 1:3], mask_idx)
nlz = tf.gather_nd(ri_tensor[:, :, -1] > 0, mask_idx)
xyz_list.append(xyz)
feature_list.append(feature)
nlz_list.append(nlz)
row_col_list.append(mask_idx)
if i == 0:
has_second_return = range_image[1][:, :, 0] > 0
has_second_return_list.append(
tf.gather_nd(has_second_return, mask_idx)
)
is_second_return_list.append(
tf.zeros([mask_idx.shape[0]], dtype=tf.bool)
)
else:
has_second_return_list.append(
tf.zeros([mask_idx.shape[0]], dtype=tf.bool)
)
is_second_return_list.append(
tf.ones([mask_idx.shape[0]], dtype=tf.bool)
)
xyz = tf.concat(xyz_list, axis=0)
feature = tf.concat(feature_list, axis=0)
row_col = tf.concat(row_col_list, axis=0)
nlz = tf.concat(nlz_list, axis=0)
has_second_return = tf.cast(
tf.concat(has_second_return_list, axis=0), dtype=tf.float32
)
is_second_return = tf.cast(
tf.concat(is_second_return_list, axis=0), dtype=tf.float32
)
# Complete feature: intensity, elongation, has_second, is_second.
feature = tf.concat(
[
feature,
has_second_return[:, tf.newaxis],
is_second_return[:, tf.newaxis],
],
axis=-1,
)
sensor_id = (
tf.ones([xyz.shape[0], 1], dtype=tf.int32) * dataset_pb2.LaserName.TOP
)
ri_row_col_sensor_id = tf.concat([row_col, sensor_id], axis=-1)
return struct.PointTensors(
point_xyz=xyz,
point_feature=feature,
point_range_image_row_col_sensor_id=ri_row_col_sensor_id,
label_point_nlz=nlz,
)
def _get_lidar_calibration(frame, name: int):
"""Gets lidar calibration for a given lidar."""
calibration = None
for c in frame.context.laser_calibrations:
if c.name == name:
calibration = c
assert calibration is not None
return calibration
def _downsample(point: struct.PointTensors, n: int) -> struct.PointTensors:
"""Randomly samples up to n points from the given point_tensor."""
num_points = point.point_xyz.shape[0]
if num_points <= n:
return point
mask = tf.range(start=0, limit=num_points, dtype=tf.int32)
mask = tf.random.shuffle(mask)
mask_index = mask[:n]
def _gather(t: tf.Tensor) -> tf.Tensor:
return tf.gather(t, mask_index)
tensors = {key: _gather(value) for key, value in vars(point).items()}
return struct.PointTensors(**tensors)
def _get_point_lidar(
ris: Dict[int, List[tf.Tensor]],
frame,
max_num_points: int,
) -> struct.PointTensors:
"""Gets point related tensors for non-top lidar.
The main differences from top lidar extraction are related to second return
and point down sampling.
Args:
ris: Mapping from lidar ID to range image tensor. The ri format is [range,
intensity, elongation, is_in_nlz].
frame: a Waymo Open Dataset frame.
max_num_points: maximum number of points from non-top lidar.
Returns:
Point related tensors.
"""
xyz_list = []
feature_list = []
nlz_list = []
ri_row_col_sensor_id_list = []
for sensor_id in ris.keys():
ri_tensor = ris[sensor_id]
assert len(ri_tensor) == 1, f"{sensor_id}"
ri_tensor = ri_tensor[0]
calibration = _get_lidar_calibration(frame, sensor_id)
extrinsic = tf.reshape(
np.array(calibration.extrinsic.transform), [4, 4]
)
beam_inclinations = range_image_utils.compute_inclination(
tf.constant(
[
calibration.beam_inclination_min,
calibration.beam_inclination_max,
]
),
height=ri_tensor.shape[0],
)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
xyz = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(ri_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(beam_inclinations, axis=0),
)
mask = ri_tensor[:, :, 0] > 0
mask_idx = tf.cast(tf.where(mask), dtype=tf.int32)
xyz = tf.gather_nd(tf.squeeze(xyz, axis=0), mask_idx)
feature = tf.gather_nd(ri_tensor[:, :, 1:3], mask_idx)
feature = tf.concat(
[feature, tf.zeros([feature.shape[0], 2], dtype=tf.float32)],
axis=-1,
)
nlz = tf.gather_nd(ri_tensor[:, :, -1] > 0, mask_idx)
xyz_list.append(xyz)
feature_list.append(feature)
nlz_list.append(nlz)
ri_row_col_sensor_id_list.append(
tf.concat(
[
mask_idx,
sensor_id * tf.ones([nlz.shape[0], 1], dtype=tf.int32),
],
axis=-1,
)
)
xyz = tf.concat(xyz_list, axis=0)
feature = tf.concat(feature_list, axis=0)
nlz = tf.concat(nlz_list, axis=0)
ri_row_col_sensor_id = tf.concat(ri_row_col_sensor_id_list, axis=0)
point_tensors = struct.PointTensors(
point_xyz=xyz,
point_feature=feature,
point_range_image_row_col_sensor_id=ri_row_col_sensor_id,
label_point_nlz=nlz,
)
point_tensors = _downsample(point_tensors, max_num_points)
return point_tensors
def _get_point(frame, max_num_lidar_points: int) -> struct.PointTensors:
"""Gets point related tensors from a Waymo Open Dataset frame.
Args:
frame: a Waymo Open Dataset frame.
max_num_lidar_points: maximum number of points from non-top lidars.
Returns:
Point related tensors.
"""
range_images = _decode_range_images(frame)
point_top_lidar = _get_point_top_lidar(
range_images[dataset_pb2.LaserName.TOP], frame
)
range_images.pop(dataset_pb2.LaserName.TOP)
point_tensors_lidar = _get_point_lidar(
range_images, frame, max_num_lidar_points
)
merged = {}
for key in vars(point_tensors_lidar).keys():
merged[key] = tf.concat(
[getattr(point_tensors_lidar, key), getattr(point_top_lidar, key)],
axis=0,
)
return struct.PointTensors(**merged)
def _get_point_label_box(
frame,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Extracts 3D box labels from a Waymo Open Dataset frame.
Args:
frame: a Waymo Open Dataset frame.
Returns:
box_3d: [M, 7] 3d boxes.
box_meta: [M, 4] speed and accel for each box.
box_class: [M] object class of each box.
box_id: [M] unique ID of each box.
box_density: [M] number of points in each box.
box_detection_difficulty: [M] difficulty level for detection.
"""
box_3d_list = []
box_meta_list = []
box_class_list = []
box_id_list = []
box_density_list = []
box_detection_difficulty_list = []
for label in frame.laser_labels:
model_object_type = label.type
density = label.num_lidar_points_in_box
detection_difficulty = label.detection_difficulty_level
if model_object_type == 0:
continue
b = label.box
box_3d_list.extend(
[
b.center_x,
b.center_y,
b.center_z,
b.length,
b.width,
b.height,
b.heading,
]
)
meta = label.metadata
box_meta_list.extend(
[
meta.speed_x,
meta.speed_y,
meta.accel_x,
meta.accel_y,
]
)
box_class_list.append(model_object_type)
box_id = tf.bitcast(
tf.fingerprint(
tf.expand_dims(label.id.encode(encoding="ascii"), 0)
)[0],
tf.int64,
)
box_id_list.append(box_id)
box_density_list.append(density)
box_detection_difficulty_list.append(detection_difficulty)
box_3d = tf.reshape(tf.constant(box_3d_list, dtype=tf.float32), [-1, 7])
box_meta = tf.reshape(tf.constant(box_meta_list, dtype=tf.float32), [-1, 4])
box_class = tf.constant(box_class_list, dtype=tf.int32)
box_id = tf.stack(box_id_list)
box_density = tf.constant(box_density_list, dtype=tf.int32)
box_detection_difficulty = tf.constant(
box_detection_difficulty_list, dtype=tf.int32
)
return (
box_3d,
box_meta,
box_class,
box_id,
box_density,
box_detection_difficulty,
)
def _get_box_class_per_point(
box: tf.Tensor, box_class: tf.Tensor, point_xyz: tf.Tensor
) -> tf.Tensor:
"""Extracts point labels.
Args:
box: [M, 7] box tensor.
box_class: [M] class of each box.
point_xyz: [N, 3] points.
Returns:
point_box_class: [N] box class of each point.
"""
n = point_xyz.shape[0]
m = box.shape[0]
if m == 0:
return tf.zeros([n], dtype=tf.int32)
# [N, M]
point_in_box = box_utils.is_within_box_3d(point_xyz, box)
# [N]
point_in_any_box = tf.math.reduce_any(point_in_box, axis=-1)
# [N]
point_box_idx = tf.math.argmax(point_in_box, axis=-1, output_type=tf.int32)
# [N]
point_box_class = tf.where(
point_in_any_box, tf.gather(box_class, point_box_idx), 0
)
return point_box_class
def _get_point_label(frame, point_xyz: tf.Tensor) -> struct.LabelTensors:
"""Extracts labels.
Args:
frame: an open dataset frame.
point_xyz: [N, 3] tensor representing point xyz.
Returns:
Label tensors.
"""
(
box_3d,
box_meta,
box_class,
box_id,
box_density,
box_detection_difficulty,
) = _get_point_label_box(frame)
point_box_class = _get_box_class_per_point(box_3d, box_class, point_xyz)
box_mask = tf.math.greater(box_class, 0)
return struct.LabelTensors(
label_box=box_3d,
label_box_id=box_id,
label_box_meta=box_meta,
label_box_class=box_class,
label_box_density=box_density,
label_box_detection_difficulty=box_detection_difficulty,
label_box_mask=box_mask,
label_point_class=point_box_class,
)
def _point_vehicle_to_global(
point_vehicle_xyz: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms points from vehicle to global frame.
Args:
point_vehicle_xyz: [..., N, 3] vehicle xyz.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The points in global frame.
"""
rot = sdc_pose[..., 0:3, 0:3]
loc = sdc_pose[..., 0:3, 3]
return (
tf.linalg.matmul(point_vehicle_xyz, rot, transpose_b=True)
+ loc[..., tf.newaxis, :]
)
def _point_global_to_vehicle(
point_xyz: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms points from global to vehicle frame.
Args:
point_xyz: [..., N, 3] global xyz.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The points in vehicle frame.
"""
rot = sdc_pose[..., 0:3, 0:3]
loc = sdc_pose[..., 0:3, 3]
return (
tf.linalg.matmul(point_xyz, rot)
+ voxel_utils.inv_loc(rot, loc)[..., tf.newaxis, :]
)
def _box_3d_vehicle_to_global(
box_3d: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms 3D boxes from vehicle to global frame.
Args:
box_3d: [..., N, 7] 3d boxes in vehicle frame.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The boxes in global frame.
"""
center = box_3d[..., 0:3]
dim = box_3d[..., 3:6]
heading = box_3d[..., 6]
new_center = _point_vehicle_to_global(center, sdc_pose)
new_heading = (
heading
+ tf.atan2(sdc_pose[..., 1, 0], sdc_pose[..., 0, 0])[..., tf.newaxis]
)
return tf.concat([new_center, dim, new_heading[..., tf.newaxis]], axis=-1)
def _box_3d_global_to_vehicle(
box_3d: tf.Tensor, sdc_pose: tf.Tensor
) -> tf.Tensor:
"""Transforms 3D boxes from global to vehicle frame.
Args:
box_3d: [..., N, 7] 3d boxes in global frame.
sdc_pose: [..., 4, 4] the SDC pose.
Returns:
The boxes in vehicle frame.
"""
center = box_3d[..., 0:3]
dim = box_3d[..., 3:6]
heading = box_3d[..., 6]
new_center = _point_global_to_vehicle(center, sdc_pose)
new_heading = (
heading
+ tf.atan2(sdc_pose[..., 0, 1], sdc_pose[..., 0, 0])[..., tf.newaxis]
)
return tf.concat([new_center, dim, new_heading[..., tf.newaxis]], axis=-1)
@keras_cv_export("keras_cv.datasets.waymo.build_tensors_from_wod_frame")
def build_tensors_from_wod_frame(frame) -> Dict[str, tf.Tensor]:
"""Builds tensors from a Waymo Open Dataset frame.
This function is to convert range image to point cloud. User can also work
with range image directly with frame_utils functions from
waymo_open_dataset.
Args:
frame: a Waymo Open Dataset frame.
Returns:
Flat dictionary of tensors.
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.build_tensors_from_wod_frame()"
)
frame_id_bytes = "{}_{}".format(
frame.context.name, frame.timestamp_micros
).encode(encoding="ascii")
frame_id = tf.bitcast(
tf.fingerprint(tf.expand_dims(frame_id_bytes, 0))[0], tf.int64
)
timestamp_micros = tf.constant(frame.timestamp_micros, dtype=tf.int64)
pose = tf.convert_to_tensor(
value=np.reshape(np.array(frame.pose.transform), [4, 4]),
dtype_hint=tf.float32,
)
point_tensors = _get_point(frame, _MAX_NUM_NON_TOP_LIDAR_POINTS)
point_label_tensors = _get_point_label(frame, point_tensors.point_xyz)
# Transforms lidar frames to global coordinates.
point_tensors.point_xyz = _point_vehicle_to_global(
point_tensors.point_xyz, pose
)
point_label_tensors.label_box = _box_3d_vehicle_to_global(
point_label_tensors.label_box, pose
)
# Constructs final results.
num_points = point_tensors.point_xyz.shape[0]
return {
"frame_id": frame_id,
"timestamp_offset": tf.constant(0.0, dtype=tf.float32),
"timestamp_micros": timestamp_micros,
"pose": pose,
"point_xyz": point_tensors.point_xyz,
"point_feature": point_tensors.point_feature,
"point_mask": tf.ones([num_points], dtype=tf.bool),
"point_range_image_row_col_sensor_id": point_tensors.point_range_image_row_col_sensor_id, # noqa: E501
"label_box": point_label_tensors.label_box,
"label_box_id": point_label_tensors.label_box_id,
"label_box_meta": point_label_tensors.label_box_meta,
"label_box_class": point_label_tensors.label_box_class,
"label_box_density": point_label_tensors.label_box_density,
"label_box_detection_difficulty": point_label_tensors.label_box_detection_difficulty, # noqa: E501
"label_box_mask": point_label_tensors.label_box_mask,
"label_point_class": point_label_tensors.label_point_class,
"label_point_nlz": point_tensors.label_point_nlz,
}
@keras_cv_export("keras_cv.datasets.waymo.pad_or_trim_tensors")
def pad_or_trim_tensors(
frame: Dict[str, tf.Tensor], max_num_point=199600, max_num_label_box=1000
) -> Dict[str, tf.Tensor]:
"""Pad or trim tensors from a frame to have uniform shapes.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame.
max_num_point: maximum number of lidar points to process.
max_num_label_box: maximum number of label boxes to process.
Returns:
A dictionary of feature tensors with uniform shapes.
"""
def _pad_fn(t: tf.Tensor, max_counts: int) -> tf.Tensor:
shape = [max_counts] + t.shape.as_list()[1:]
return voxel_utils._pad_or_trim_to(t, shape)
point_tensor_keys = {
"point_xyz",
"point_feature",
"point_range_image_row_col_sensor_id",
"point_mask",
"label_point_class",
"label_point_nlz",
}
box_tensor_keys = {
"label_box",
"label_box_id",
"label_box_meta",
"label_box_class",
"label_box_density",
"label_box_detection_difficulty",
"label_box_mask",
}
for key in point_tensor_keys:
t = frame[key]
if t is not None:
frame[key] = _pad_fn(t, max_num_point)
for key in box_tensor_keys:
t = frame[key]
if t is not None:
frame[key] = _pad_fn(t, max_num_label_box)
return frame
@keras_cv_export("keras_cv.datasets.waymo.transform_to_vehicle_frame")
def transform_to_vehicle_frame(
frame: Dict[str, tf.Tensor]
) -> Dict[str, tf.Tensor]:
"""Transform tensors in a frame from global coordinates to vehicle
coordinates.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame in
global frame.
Returns:
A dictionary of feature tensors in vehicle frame.
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.transform_to_vehicle_frame()"
)
def _transform_to_vehicle_frame(
point_global_xyz: tf.Tensor,
point_mask: tf.Tensor,
box_global: tf.Tensor,
box_mask: tf.Tensor,
sdc_pose: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
point_vehicle_xyz = _point_global_to_vehicle(point_global_xyz, sdc_pose)
point_vehicle_xyz = tf.where(
point_mask[..., tf.newaxis], point_vehicle_xyz, 0.0
)
box_vehicle = _box_3d_global_to_vehicle(box_global, sdc_pose)
box_vehicle = tf.where(box_mask[..., tf.newaxis], box_vehicle, 0.0)
return point_vehicle_xyz, box_vehicle
point_vehicle_xyz, box_vehicle = _transform_to_vehicle_frame(
frame["point_xyz"],
frame["point_mask"],
frame["label_box"],
frame["label_box_mask"],
frame["pose"],
)
frame["point_xyz"] = point_vehicle_xyz
frame["label_box"] = box_vehicle
# Override pose as the points and boxes are in the vehicle frame.
frame["pose"] = tf.eye(4)
if frame["label_point_nlz"] is not None:
frame["point_mask"] = tf.logical_and(
frame["point_mask"],
tf.logical_not(tf.cast(frame["label_point_nlz"], tf.bool)),
)
return frame
@keras_cv_export("keras_cv.datasets.waymo.convert_to_center_pillar_inputs")
def convert_to_center_pillar_inputs(
frame: Dict[str, tf.Tensor]
) -> Dict[str, Any]:
"""Converts an input frame into CenterPillar input format.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame
Returns:
A dictionary of two tensor dictionaries with keys "point_clouds"
and "3d_boxes".
"""
point_clouds = {
"point_xyz": frame["point_xyz"],
"point_feature": frame["point_feature"],
"point_mask": frame["point_mask"],
}
boxes = {
"boxes": frame["label_box"],
"classes": frame["label_box_class"],
"difficulty": frame["label_box_detection_difficulty"],
"mask": frame["label_box_mask"],
}
y = {
"point_clouds": point_clouds,
"3d_boxes": boxes,
}
return y
@keras_cv_export("keras_cv.datasets.waymo.build_tensors_for_augmentation")
def build_tensors_for_augmentation(
frame: Dict[str, tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Builds tensors for data augmentation from an input frame.
Args:
frame: a dictionary of feature tensors from a Waymo Open Dataset frame
Returns:
A dictionary of two tensors with keys "point_clouds" and "bounding_boxes"
and values which are tensors of shapes [num points, num features] and
[num boxes, num features]).
"""
assert_waymo_open_dataset_installed(
"keras_cv.datasets.waymo.build_tensors_for_augmentation()"
)
point_cloud = tf.concat(
[
frame["point_xyz"][tf.newaxis, ...],
frame["point_feature"][tf.newaxis, ...],
tf.cast(frame["point_mask"], tf.float32)[tf.newaxis, :, tf.newaxis],
],
axis=-1,
)
boxes = tf.concat(
[
frame["label_box"][tf.newaxis, :],
tf.cast(frame["label_box_class"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_mask"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_density"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
tf.cast(frame["label_box_detection_difficulty"], tf.float32)[
tf.newaxis, :, tf.newaxis
],
],
axis=-1,
)
return {
"point_clouds": tf.squeeze(point_cloud, axis=0),
"bounding_boxes": tf.squeeze(boxes, axis=0),
}
| keras-cv/keras_cv/datasets/waymo/transformer.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/waymo/transformer.py",
"repo_id": "keras-cv",
"token_count": 13086
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
BN_AXIS = 3
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
@keras_cv_export("keras_cv.layers.MBConvBlock")
class MBConvBlock(keras.layers.Layer):
def __init__(
self,
input_filters: int,
output_filters: int,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
bn_momentum=0.9,
activation="swish",
survival_probability: float = 0.8,
**kwargs
):
"""
Implementation of the MBConv block (Mobile Inverted Residual Bottleneck)
from:
[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381v4).
MBConv blocks are common blocks used in mobile-oriented and efficient
architectures, present in architectures such as MobileNet, EfficientNet,
MaxViT, etc.
MBConv blocks follow a narrow-wide-narrow structure - expanding a 1x1
convolution, applying depthwise convolution, and narrowing back to a 1x1
convolution, which is a more efficient operation than conventional
wide-narrow-wide structures.
As they're frequently used for models to be deployed to edge devices,
they're implemented as a layer for ease of use and re-use.
Args:
input_filters: int, the number of input filters
output_filters: int, the optional number of output filters after
Squeeze-Excitation
expand_ratio: default 1, the ratio by which input_filters are
multiplied to expand the structure in the middle expansion phase
kernel_size: default 3, the kernel_size to apply to the expansion
phase convolutions
strides: default 1, the strides to apply to the expansion phase
convolutions
se_ratio: default 0.0, Squeeze-Excitation happens before depthwise
convolution and before output convolution only if the se_ratio
is above 0. The filters used in this phase are chosen as the
maximum between 1 and input_filters*se_ratio
bn_momentum: default 0.9, the BatchNormalization momentum
activation: default "swish", the activation function used between
convolution operations
survival_probability: float, the optional dropout rate to apply
before the output convolution, defaults to 0.8
Returns:
A `tf.Tensor` representing a feature map, passed through the MBConv
block
Example usage:
```
inputs = tf.random.normal(shape=(1, 64, 64, 32), dtype=tf.float32)
layer = keras_cv.layers.MBConvBlock(input_filters=32, output_filters=32)
output = layer(inputs)
output.shape # TensorShape([1, 64, 64, 32])
```
""" # noqa: E501
super().__init__(**kwargs)
self.input_filters = input_filters
self.output_filters = output_filters
self.expand_ratio = expand_ratio
self.kernel_size = kernel_size
self.strides = strides
self.se_ratio = se_ratio
self.bn_momentum = bn_momentum
self.activation = activation
self.survival_probability = survival_probability
self.filters = self.input_filters * self.expand_ratio
self.filters_se = max(1, int(input_filters * se_ratio))
self.conv1 = keras.layers.Conv2D(
filters=self.filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "expand_conv",
)
self.bn1 = keras.layers.BatchNormalization(
axis=BN_AXIS,
momentum=self.bn_momentum,
name=self.name + "expand_bn",
)
self.act = keras.layers.Activation(
self.activation, name=self.name + "activation"
)
self.depthwise = keras.layers.DepthwiseConv2D(
kernel_size=self.kernel_size,
strides=self.strides,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "dwconv2",
)
self.bn2 = keras.layers.BatchNormalization(
axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "bn"
)
self.se_conv1 = keras.layers.Conv2D(
self.filters_se,
1,
padding="same",
activation=self.activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=self.name + "se_reduce",
)
self.se_conv2 = keras.layers.Conv2D(
self.filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=self.name + "se_expand",
)
self.output_conv = keras.layers.Conv2D(
filters=self.output_filters,
kernel_size=1 if expand_ratio != 1 else kernel_size,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format="channels_last",
use_bias=False,
name=self.name + "project_conv",
)
self.bn3 = keras.layers.BatchNormalization(
axis=BN_AXIS,
momentum=self.bn_momentum,
name=self.name + "project_bn",
)
if self.survival_probability:
self.dropout = keras.layers.Dropout(
self.survival_probability,
noise_shape=(None, 1, 1, 1),
name=self.name + "drop",
)
def build(self, input_shape):
if self.name is None:
self.name = keras.backend.get_uid("block0")
def call(self, inputs):
# Expansion phase
if self.expand_ratio != 1:
x = self.conv1(inputs)
x = self.bn1(x)
x = self.act(x)
else:
x = inputs
# Depthwise conv
x = self.depthwise(x)
x = self.bn2(x)
x = self.act(x)
# Squeeze and excite
if 0 < self.se_ratio <= 1:
se = keras.layers.GlobalAveragePooling2D(
name=self.name + "se_squeeze"
)(x)
if BN_AXIS == 1:
se_shape = (self.filters, 1, 1)
else:
se_shape = (1, 1, self.filters)
se = keras.layers.Reshape(se_shape, name=self.name + "se_reshape")(
se
)
se = self.se_conv1(se)
se = self.se_conv2(se)
x = keras.layers.multiply([x, se], name=self.name + "se_excite")
# Output phase
x = self.output_conv(x)
x = self.bn3(x)
if self.strides == 1 and self.input_filters == self.output_filters:
if self.survival_probability:
x = self.dropout(x)
x = keras.layers.Add(name=self.name + "add")([x, inputs])
return x
def get_config(self):
config = {
"input_filters": self.input_filters,
"output_filters": self.output_filters,
"expand_ratio": self.expand_ratio,
"kernel_size": self.kernel_size,
"strides": self.strides,
"se_ratio": self.se_ratio,
"bn_momentum": self.bn_momentum,
"activation": self.activation,
"survival_probability": self.survival_probability,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/mbconv.py/0 | {
"file_path": "keras-cv/keras_cv/layers/mbconv.py",
"repo_id": "keras-cv",
"token_count": 4147
} | 61 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.backend import assert_tf_keras
from keras_cv.bounding_box import iou
from keras_cv.layers.object_detection import box_matcher
from keras_cv.layers.object_detection import sampling
from keras_cv.utils import target_gather
@keras.utils.register_keras_serializable(package="keras_cv")
class _ROISampler(keras.layers.Layer):
"""
Sample ROIs for loss related calculation.
With proposals (ROIs) and ground truth, it performs the following:
1) compute IOU similarity matrix
2) match each proposal to ground truth box based on IOU
3) samples positive matches and negative matches and return
`append_gt_boxes` augments proposals with ground truth boxes. This is
useful in 2 stage detection networks during initialization where the
1st stage often cannot produce good proposals for 2nd stage. Setting it to
True will allow it to generate more reasonable proposals at the beginning.
`background_class` allow users to set the labels for background proposals.
Default is 0, where users need to manually shift the incoming `gt_classes`
if its range is [0, num_classes).
Args:
bounding_box_format: The format of bounding boxes to generate. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
roi_matcher: a `BoxMatcher` object that matches proposals with ground
truth boxes. The positive match must be 1 and negative match must be -1.
Such assumption is not being validated here.
positive_fraction: the positive ratio w.r.t `num_sampled_rois`, defaults
to 0.25.
background_class: the background class which is used to map returned the
sampled ground truth which is classified as background.
num_sampled_rois: the number of sampled proposals per image for
further (loss) calculation, defaults to 256.
append_gt_boxes: boolean, whether gt_boxes will be appended to rois
before sample the rois, defaults to True.
""" # noqa: E501
def __init__(
self,
bounding_box_format: str,
roi_matcher: box_matcher.BoxMatcher,
positive_fraction: float = 0.25,
background_class: int = 0,
num_sampled_rois: int = 256,
append_gt_boxes: bool = True,
**kwargs,
):
assert_tf_keras("keras_cv.layers._ROISampler")
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.roi_matcher = roi_matcher
self.positive_fraction = positive_fraction
self.background_class = background_class
self.num_sampled_rois = num_sampled_rois
self.append_gt_boxes = append_gt_boxes
self.built = True
# for debugging.
self._positives = keras.metrics.Mean()
self._negatives = keras.metrics.Mean()
def call(
self,
rois: tf.Tensor,
gt_boxes: tf.Tensor,
gt_classes: tf.Tensor,
):
"""
Args:
rois: [batch_size, num_rois, 4]
gt_boxes: [batch_size, num_gt, 4]
gt_classes: [batch_size, num_gt, 1]
Returns:
sampled_rois: [batch_size, num_sampled_rois, 4]
sampled_gt_boxes: [batch_size, num_sampled_rois, 4]
sampled_box_weights: [batch_size, num_sampled_rois, 1]
sampled_gt_classes: [batch_size, num_sampled_rois, 1]
sampled_class_weights: [batch_size, num_sampled_rois, 1]
"""
if self.append_gt_boxes:
# num_rois += num_gt
rois = tf.concat([rois, gt_boxes], axis=1)
num_rois = rois.get_shape().as_list()[1]
if num_rois is None:
raise ValueError(
f"`rois` must have static shape, got {rois.get_shape()}"
)
if num_rois < self.num_sampled_rois:
raise ValueError(
"num_rois must be less than `num_sampled_rois` "
f"({self.num_sampled_rois}), got {num_rois}"
)
rois = bounding_box.convert_format(
rois, source=self.bounding_box_format, target="yxyx"
)
gt_boxes = bounding_box.convert_format(
gt_boxes, source=self.bounding_box_format, target="yxyx"
)
# [batch_size, num_rois, num_gt]
similarity_mat = iou.compute_iou(
rois, gt_boxes, bounding_box_format="yxyx", use_masking=True
)
# [batch_size, num_rois] | [batch_size, num_rois]
matched_gt_cols, matched_vals = self.roi_matcher(similarity_mat)
# [batch_size, num_rois]
positive_matches = tf.math.equal(matched_vals, 1)
negative_matches = tf.math.equal(matched_vals, -1)
self._positives.update_state(
tf.reduce_sum(tf.cast(positive_matches, tf.float32), axis=-1)
)
self._negatives.update_state(
tf.reduce_sum(tf.cast(negative_matches, tf.float32), axis=-1)
)
# [batch_size, num_rois, 1]
background_mask = tf.expand_dims(
tf.logical_not(positive_matches), axis=-1
)
# [batch_size, num_rois, 1]
matched_gt_classes = target_gather._target_gather(
gt_classes, matched_gt_cols
)
# also set all background matches to `background_class`
matched_gt_classes = tf.where(
background_mask,
tf.cast(
self.background_class * tf.ones_like(matched_gt_classes),
gt_classes.dtype,
),
matched_gt_classes,
)
# [batch_size, num_rois, 4]
matched_gt_boxes = target_gather._target_gather(
gt_boxes, matched_gt_cols
)
encoded_matched_gt_boxes = bounding_box._encode_box_to_deltas(
anchors=rois,
boxes=matched_gt_boxes,
anchor_format="yxyx",
box_format="yxyx",
variance=[0.1, 0.1, 0.2, 0.2],
)
# also set all background matches to 0 coordinates
encoded_matched_gt_boxes = tf.where(
background_mask,
tf.zeros_like(matched_gt_boxes),
encoded_matched_gt_boxes,
)
# [batch_size, num_rois]
sampled_indicators = sampling.balanced_sample(
positive_matches,
negative_matches,
self.num_sampled_rois,
self.positive_fraction,
)
# [batch_size, num_sampled_rois] in the range of [0, num_rois)
sampled_indicators, sampled_indices = tf.math.top_k(
sampled_indicators, k=self.num_sampled_rois, sorted=True
)
# [batch_size, num_sampled_rois, 4]
sampled_rois = target_gather._target_gather(rois, sampled_indices)
# [batch_size, num_sampled_rois, 4]
sampled_gt_boxes = target_gather._target_gather(
encoded_matched_gt_boxes, sampled_indices
)
# [batch_size, num_sampled_rois, 1]
sampled_gt_classes = target_gather._target_gather(
matched_gt_classes, sampled_indices
)
# [batch_size, num_sampled_rois, 1]
# all negative samples will be ignored in regression
sampled_box_weights = target_gather._target_gather(
tf.cast(positive_matches[..., tf.newaxis], gt_boxes.dtype),
sampled_indices,
)
# [batch_size, num_sampled_rois, 1]
sampled_indicators = sampled_indicators[..., tf.newaxis]
sampled_class_weights = tf.cast(sampled_indicators, gt_classes.dtype)
return (
sampled_rois,
sampled_gt_boxes,
sampled_box_weights,
sampled_gt_classes,
sampled_class_weights,
)
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"positive_fraction": self.positive_fraction,
"background_class": self.background_class,
"num_sampled_rois": self.num_sampled_rois,
"append_gt_boxes": self.append_gt_boxes,
"roi_matcher": self.roi_matcher.get_config(),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
roi_matcher_config = config.pop("roi_matcher")
roi_matcher = box_matcher.BoxMatcher(**roi_matcher_config)
return cls(roi_matcher=roi_matcher, **config)
| keras-cv/keras_cv/layers/object_detection/roi_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_sampler.py",
"repo_id": "keras-cv",
"token_count": 4104
} | 62 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Also export the image KPLs from core keras, so that user can import all the
# image KPLs from one place.
from tensorflow.keras.layers import CenterCrop
from tensorflow.keras.layers import RandomHeight
from tensorflow.keras.layers import RandomWidth
from keras_cv.layers.preprocessing.aug_mix import AugMix
from keras_cv.layers.preprocessing.auto_contrast import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.channel_shuffle import ChannelShuffle
from keras_cv.layers.preprocessing.cut_mix import CutMix
from keras_cv.layers.preprocessing.equalization import Equalization
from keras_cv.layers.preprocessing.fourier_mix import FourierMix
from keras_cv.layers.preprocessing.grayscale import Grayscale
from keras_cv.layers.preprocessing.grid_mask import GridMask
from keras_cv.layers.preprocessing.jittered_resize import JitteredResize
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.layers.preprocessing.mosaic import Mosaic
from keras_cv.layers.preprocessing.posterization import Posterization
from keras_cv.layers.preprocessing.rand_augment import RandAugment
from keras_cv.layers.preprocessing.random_apply import RandomApply
from keras_cv.layers.preprocessing.random_aspect_ratio import RandomAspectRatio
from keras_cv.layers.preprocessing.random_augmentation_pipeline import (
RandomAugmentationPipeline,
)
from keras_cv.layers.preprocessing.random_brightness import RandomBrightness
from keras_cv.layers.preprocessing.random_channel_shift import (
RandomChannelShift,
)
from keras_cv.layers.preprocessing.random_choice import RandomChoice
from keras_cv.layers.preprocessing.random_color_degeneration import (
RandomColorDegeneration,
)
from keras_cv.layers.preprocessing.random_color_jitter import RandomColorJitter
from keras_cv.layers.preprocessing.random_contrast import RandomContrast
from keras_cv.layers.preprocessing.random_crop import RandomCrop
from keras_cv.layers.preprocessing.random_crop_and_resize import (
RandomCropAndResize,
)
from keras_cv.layers.preprocessing.random_cutout import RandomCutout
from keras_cv.layers.preprocessing.random_flip import RandomFlip
from keras_cv.layers.preprocessing.random_gaussian_blur import (
RandomGaussianBlur,
)
from keras_cv.layers.preprocessing.random_hue import RandomHue
from keras_cv.layers.preprocessing.random_jpeg_quality import RandomJpegQuality
from keras_cv.layers.preprocessing.random_rotation import RandomRotation
from keras_cv.layers.preprocessing.random_saturation import RandomSaturation
from keras_cv.layers.preprocessing.random_sharpness import RandomSharpness
from keras_cv.layers.preprocessing.random_shear import RandomShear
from keras_cv.layers.preprocessing.random_translation import RandomTranslation
from keras_cv.layers.preprocessing.random_zoom import RandomZoom
from keras_cv.layers.preprocessing.repeated_augmentation import (
RepeatedAugmentation,
)
from keras_cv.layers.preprocessing.rescaling import Rescaling
from keras_cv.layers.preprocessing.resizing import Resizing
from keras_cv.layers.preprocessing.solarization import Solarization
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
| keras-cv/keras_cv/layers/preprocessing/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/__init__.py",
"repo_id": "keras-cv",
"token_count": 1195
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class GrayscaleTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 52, 24, 3))
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (2, 52, 24, 1))
self.assertEqual(xs2.shape, (2, 52, 24, 3))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack([2 * tf.ones((10, 10, 3)), tf.ones((10, 10, 3))], axis=0),
tf.float32,
)
# test 1
layer = preprocessing.Grayscale(
output_channels=1,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs1 = augment(xs)
# test 2
layer = preprocessing.Grayscale(
output_channels=3,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs2 = augment(xs)
self.assertEqual(xs1.shape, (2, 10, 10, 1))
self.assertEqual(xs2.shape, (2, 10, 10, 3))
def test_non_square_image(self):
xs = tf.cast(
tf.stack([2 * tf.ones((52, 24, 3)), tf.ones((52, 24, 3))], axis=0),
tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (2, 52, 24, 1))
self.assertEqual(xs2.shape, (2, 52, 24, 3))
def test_in_single_image(self):
xs = tf.cast(
tf.ones((52, 24, 3)),
dtype=tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, (52, 24, 1))
self.assertEqual(xs2.shape, (52, 24, 3))
| keras-cv/keras_cv/layers/preprocessing/grayscale_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grayscale_test.py",
"repo_id": "keras-cv",
"token_count": 1376
} | 64 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import get_interpolation
from keras_cv.utils import parse_factor
@keras_cv_export("keras_cv.layers.RandomAspectRatio")
class RandomAspectRatio(BaseImageAugmentationLayer):
"""RandomAspectRatio randomly distorts the aspect ratio of the provided
image.
This is done on an element-wise basis, and as a consequence this layer
always returns a tf.RaggedTensor.
Args:
factor: a range of values in the range `(0, infinity)` that determines
the percentage to distort the aspect ratio of each image by.
interpolation: interpolation method used in the `Resize` op.
Supported values are `"nearest"` and `"bilinear"`.
Defaults to `"bilinear"`.
"""
def __init__(
self,
factor,
interpolation="bilinear",
bounding_box_format=None,
seed=None,
**kwargs
):
super().__init__(**kwargs)
self.interpolation = get_interpolation(interpolation)
self.factor = parse_factor(
factor,
min_value=0.0,
max_value=None,
seed=seed,
param_name="factor",
)
self.bounding_box_format = bounding_box_format
self.seed = seed
self.auto_vectorize = False
self.force_output_ragged_images = True
def get_random_transformation(self, **kwargs):
return self.factor(dtype=self.compute_dtype)
def compute_image_signature(self, images):
return tf.RaggedTensorSpec(
shape=(None, None, images.shape[-1]),
ragged_rank=1,
dtype=self.compute_dtype,
)
def augment_bounding_boxes(
self, bounding_boxes, transformation, image, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"Please provide a `bounding_box_format` when augmenting "
"bounding boxes with `RandomAspectRatio()`."
)
bounding_boxes = bounding_boxes.copy()
img_shape = tf.shape(image)
img_shape = tf.cast(img_shape, self.compute_dtype)
height, width = img_shape[0], img_shape[1]
height = height / transformation
width = width * transformation
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
image_shape=img_shape,
)
x, y, x2, y2 = tf.split(bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1)
x = x * transformation
x2 = x2 * transformation
y = y / transformation
y2 = y2 / transformation
boxes = tf.concat([x, y, x2, y2], axis=-1)
boxes = bounding_box.convert_format(
boxes,
source="xyxy",
target=self.bounding_box_format,
image_shape=tf.stack([height, width, 3], axis=0),
)
bounding_boxes["boxes"] = boxes
return bounding_boxes
def augment_image(self, image, transformation, **kwargs):
# images....transformation
img_shape = tf.cast(tf.shape(image), self.compute_dtype)
height, width = img_shape[0], img_shape[1]
height = height / transformation
width = width * transformation
target_size = tf.cast(tf.stack([height, width]), tf.int32)
result = tf.image.resize(
image, size=target_size, method=self.interpolation
)
return tf.cast(result, self.compute_dtype)
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio.py",
"repo_id": "keras-cv",
"token_count": 2007
} | 65 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomCrop")
class RandomCrop(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly crops images.
This layer will randomly choose a location to crop images down to a target
size.
If an input image is smaller than the target size, the input will be
resized and cropped to return the largest possible window in the image that
matches the target aspect ratio.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
"""
def __init__(
self, height, width, seed=None, bounding_box_format=None, **kwargs
):
super().__init__(
**kwargs,
autocast=False,
seed=seed,
)
self.height = height
self.width = width
self.bounding_box_format = bounding_box_format
self.seed = seed
self.force_output_dense_images = True
def compute_ragged_image_signature(self, images):
ragged_spec = tf.RaggedTensorSpec(
shape=(self.height, self.width, images.shape[-1]),
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def get_random_transformation_batch(self, batch_size, **kwargs):
tops = tf.cast(
self._random_generator.uniform(
shape=(batch_size, 1), minval=0, maxval=1
),
self.compute_dtype,
)
lefts = tf.cast(
self._random_generator.uniform(
shape=(batch_size, 1), minval=0, maxval=1
),
self.compute_dtype,
)
return {"tops": tops, "lefts": lefts}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
tops = transformation["tops"]
lefts = transformation["lefts"]
transformation = {
"tops": tf.expand_dims(tops, axis=0),
"lefts": tf.expand_dims(lefts, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
batch_size = tf.shape(images)[0]
channel = tf.shape(images)[-1]
heights, widths = self._get_image_shape(images)
h_diffs = heights - self.height
w_diffs = widths - self.width
# broadcast
h_diffs = (
tf.ones(
shape=(batch_size, self.height, self.width, channel),
dtype=tf.int32,
)
* h_diffs[:, tf.newaxis, tf.newaxis, :]
)
w_diffs = (
tf.ones(
shape=(batch_size, self.height, self.width, channel),
dtype=tf.int32,
)
* w_diffs[:, tf.newaxis, tf.newaxis, :]
)
return tf.where(
tf.math.logical_and(h_diffs >= 0, w_diffs >= 0),
self._crop_images(images, transformations),
self._resize_images(images),
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomCrop()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomCrop(bounding_box_format='xyxy')`"
)
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes = bounding_box.to_dense(
bounding_boxes, default_value=-1
)
batch_size = tf.shape(raw_images)[0]
heights, widths = self._get_image_shape(raw_images)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=raw_images,
)
h_diffs = heights - self.height
w_diffs = widths - self.width
# broadcast
num_bounding_boxes = tf.shape(bounding_boxes["boxes"])[-2]
h_diffs = (
tf.ones(
shape=(batch_size, num_bounding_boxes, 4),
dtype=tf.int32,
)
* h_diffs[:, tf.newaxis, :]
)
w_diffs = (
tf.ones(
shape=(batch_size, num_bounding_boxes, 4),
dtype=tf.int32,
)
* w_diffs[:, tf.newaxis, :]
)
boxes = tf.where(
tf.math.logical_and(h_diffs >= 0, w_diffs >= 0),
self._crop_bounding_boxes(
raw_images, bounding_boxes["boxes"], transformations
),
self._resize_bounding_boxes(
raw_images,
bounding_boxes["boxes"],
),
)
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="xyxy",
image_shape=(self.height, self.width, None),
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
image_shape=(self.height, self.width, None),
)
return bounding_boxes
def _get_image_shape(self, images):
if isinstance(images, tf.RaggedTensor):
heights = tf.reshape(images.row_lengths(), (-1, 1))
widths = tf.reshape(
tf.reduce_max(images.row_lengths(axis=2), 1), (-1, 1)
)
else:
batch_size = tf.shape(images)[0]
heights = tf.repeat(tf.shape(images)[H_AXIS], repeats=[batch_size])
heights = tf.reshape(heights, shape=(-1, 1))
widths = tf.repeat(tf.shape(images)[W_AXIS], repeats=[batch_size])
widths = tf.reshape(widths, shape=(-1, 1))
return tf.cast(heights, dtype=tf.int32), tf.cast(widths, dtype=tf.int32)
def _crop_images(self, images, transformations):
batch_size = tf.shape(images)[0]
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
tops = transformations["tops"]
lefts = transformations["lefts"]
x1s = lefts * (widths - self.width)
y1s = tops * (heights - self.height)
x2s = x1s + self.width
y2s = y1s + self.height
# normalize
x1s /= widths
y1s /= heights
x2s /= widths
y2s /= heights
boxes = tf.concat([y1s, x1s, y2s, x2s], axis=-1)
images = tf.image.crop_and_resize(
tf.cast(images, tf.float32),
tf.cast(boxes, tf.float32),
tf.range(batch_size),
[self.height, self.width],
method="nearest",
)
return tf.cast(images, dtype=self.compute_dtype)
def _resize_images(self, images):
resizing_layer = cv_layers.Resizing(self.height, self.width)
outputs = resizing_layer(images)
return tf.cast(outputs, dtype=self.compute_dtype)
def _crop_bounding_boxes(self, images, boxes, transformation):
tops = transformation["tops"]
lefts = transformation["lefts"]
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
# compute offsets for xyxy bounding_boxes
top_offsets = tf.cast(
tf.math.round(tops * (heights - self.height)),
dtype=self.compute_dtype,
)
left_offsets = tf.cast(
tf.math.round(lefts * (widths - self.width)),
dtype=self.compute_dtype,
)
x1s, y1s, x2s, y2s = tf.split(
tf.cast(boxes, self.compute_dtype), 4, axis=-1
)
x1s -= tf.expand_dims(left_offsets, axis=1)
y1s -= tf.expand_dims(top_offsets, axis=1)
x2s -= tf.expand_dims(left_offsets, axis=1)
y2s -= tf.expand_dims(top_offsets, axis=1)
outputs = tf.concat([x1s, y1s, x2s, y2s], axis=-1)
return outputs
def _resize_bounding_boxes(self, images, boxes):
heights, widths = self._get_image_shape(images)
heights = tf.cast(heights, dtype=self.compute_dtype)
widths = tf.cast(widths, dtype=self.compute_dtype)
x_scale = tf.cast(self.width / widths, dtype=self.compute_dtype)
y_scale = tf.cast(self.height / heights, dtype=self.compute_dtype)
x1s, y1s, x2s, y2s = tf.split(
tf.cast(boxes, self.compute_dtype), 4, axis=-1
)
outputs = tf.concat(
[
x1s * x_scale[:, tf.newaxis, :],
y1s * y_scale[:, tf.newaxis, :],
x2s * x_scale[:, tf.newaxis, :],
y2s * y_scale[:, tf.newaxis, :],
],
axis=-1,
)
return outputs
def get_config(self):
config = {
"height": self.height,
"width": self.width,
"seed": self.seed,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_crop.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop.py",
"repo_id": "keras-cv",
"token_count": 5385
} | 66 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomSaturation")
class RandomSaturation(VectorizedBaseImageAugmentationLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image saturation is impacted. `factor=0.5` makes this layer perform
a no-op operation. `factor=0.0` makes the image to be fully
grayscale. `factor=1.0` makes the image to be fully saturated.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_saturation = keras_cv.layers.preprocessing.RandomSaturation()
augmented_images = random_saturation(images)
```
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
min_value=0.0,
max_value=1.0,
)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(shape=(batch_size,))
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations, **kwargs):
# Convert the factor range from [0, 1] to [0, +inf]. Note that the
# tf.image.adjust_saturation is trying to apply the following math
# formula `output_saturation = input_saturation * factor`. We use the
# following method to the do the mapping.
# `y = x / (1 - x)`.
# This will ensure:
# y = +inf when x = 1 (full saturation)
# y = 1 when x = 0.5 (no augmentation)
# y = 0 when x = 0 (full gray scale)
# Convert the transformation to tensor in case it is a float. When
# transformation is 1.0, then it will result in to divide by zero error,
# but it will be handled correctly when it is a one tensor.
transformations = tf.convert_to_tensor(transformations)
adjust_factors = transformations / (1 - transformations)
adjust_factors = tf.cast(adjust_factors, dtype=images.dtype)
images = tf.image.rgb_to_hsv(images)
s_channel = tf.multiply(
images[..., 1], adjust_factors[..., tf.newaxis, tf.newaxis]
)
s_channel = tf.clip_by_value(
s_channel, clip_value_min=0.0, clip_value_max=1.0
)
images = tf.stack([images[..., 0], s_channel, images[..., 2]], axis=-1)
images = tf.image.hsv_to_rgb(images)
return images
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_saturation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_saturation.py",
"repo_id": "keras-cv",
"token_count": 1942
} | 67 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.Solarization")
class Solarization(VectorizedBaseImageAugmentationLayer):
"""Applies (max_value - pixel + min_value) for each pixel in the image.
When created without `threshold` parameter, the layer performs solarization
to all values. When created with specified `threshold` the layer only
augments pixels that are above the `threshold` value
Reference:
- [AutoAugment: Learning Augmentation Policies from Data](
https://arxiv.org/abs/1805.09501
)
- [RandAugment](https://arxiv.org/pdf/1909.13719.pdf)
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`.
addition_factor: (Optional) A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, addition_factor)`. If specified, this value is
added to each pixel before solarization and thresholding. The
addition value should be scaled according to the value range
(0, 255), defaults to 0.0.
threshold_factor: (Optional) A tuple of two floats, a single float or
a `keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, threshold_factor)`. If specified, only pixel
values above this threshold will be solarized.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
print(images[0, 0, 0])
# [59 62 63]
# Note that images are Tensor with values in the range [0, 255]
solarization = Solarization(value_range=(0, 255))
images = solarization(images)
print(images[0, 0, 0])
# [196, 193, 192]
```
Call arguments:
images: Tensor of type int or float, with pixels in
range [0, 255] and shape [batch, height, width, channels]
or [height, width, channels].
"""
def __init__(
self,
value_range,
addition_factor=0.0,
threshold_factor=0.0,
seed=None,
**kwargs
):
super().__init__(seed=seed, **kwargs)
self.seed = seed
self.addition_factor = preprocessing.parse_factor(
addition_factor,
max_value=255,
seed=seed,
param_name="addition_factor",
)
self.threshold_factor = preprocessing.parse_factor(
threshold_factor,
max_value=255,
seed=seed,
param_name="threshold_factor",
)
self.value_range = value_range
def get_random_transformation_batch(self, batch_size, **kwargs):
return {
"additions": self.addition_factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
),
"thresholds": self.threshold_factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
),
}
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(image, transformation)
def augment_images(self, images, transformations, **kwargs):
thresholds = transformations["thresholds"]
additions = transformations["additions"]
images = preprocessing.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
results = images + additions
results = tf.clip_by_value(results, 0, 255)
results = tf.where(results < thresholds, results, 255 - results)
results = preprocessing.transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return results
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"threshold_factor": self.threshold_factor,
"addition_factor": self.addition_factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["threshold_factor"], dict):
config["threshold_factor"] = keras.utils.deserialize_keras_object(
config["threshold_factor"]
)
if isinstance(config["addition_factor"], dict):
config["addition_factor"] = keras.utils.deserialize_keras_object(
config["addition_factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/solarization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/solarization.py",
"repo_id": "keras-cv",
"token_count": 2553
} | 68 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from tensorflow import keras
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_point_feature_noise import ( # noqa: E501
FrustumRandomPointFeatureNoise,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX
class FrustumRandomPointFeatureNoiseTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.5
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
self.assertAllClose(
inputs[POINT_CLOUDS][:, :, :POINTCLOUD_LABEL_INDEX],
outputs[POINT_CLOUDS][:, :, :POINTCLOUD_LABEL_INDEX],
)
def test_augment_specific_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
)
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 2],
]
]
* 2
).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
augmented_point_clouds = np.array(
[
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 1.3747642],
],
[
[0, 1, 2, 3, 4, 5],
[10, 1, 2, 3, 4, 2],
[100, 100, 2, 3, 4, 1],
[-20, -20, 21, 1, 0, 1.6563809],
],
]
).astype("float32")
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# [-20, -20, 21, 1, 0, 2] is randomly selected as the frustum center.
# [0, 1, 2, 3, 4, 5] and [10, 1, 2, 3, 4, 2] are not changed due to less
# than r_distance. [100, 100, 2, 3, 4, 1] is not changed due to outside
# phi_width.
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
def test_augment_only_one_valid_point_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
)
point_clouds = np.array(
[
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 4, 1],
[0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
# bounding boxes and point clouds (x, y, z, class) are not modified.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 4.119616, 0.619783],
[0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[100, 100, 2, 3, 3.192014, 0.618371],
[0, 0, 0, 0, 0, 0],
],
]
).astype("float32")
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# [100, 100, 2, 3, 4, 1] is selected as the frustum center because it is
# the only valid point.
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
def test_not_augment_max_noise_level0_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_not_augment_max_noise_level1_frustum_empty_point_clouds_and_bounding_boxes( # noqa: E501
self,
):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=10, theta_width=0, phi_width=0, max_noise_level=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_all_points(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0,
theta_width=1,
phi_width=1,
max_noise_level=1.0,
exclude_classes=1,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_the_first_half_points(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0,
theta_width=10,
phi_width=10,
max_noise_level=1.0,
exclude_classes=[1, 2],
)
point_clouds = np.random.random(size=(2, 10, 10)).astype("float32")
class_1 = np.ones(shape=(2, 2, 1)).astype("float32")
class_2 = np.ones(shape=(2, 3, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 5, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :5, :], outputs[POINT_CLOUDS][:, :5, :]
)
self.assertNotAllClose(
inputs[POINT_CLOUDS][:, 5:, :], outputs[POINT_CLOUDS][:, 5:, :]
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomPointFeatureNoise(
r_distance=0, theta_width=1, phi_width=1, max_noise_level=0.5
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise_test.py",
"repo_id": "keras-cv",
"token_count": 4481
} | 69 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from tensorflow import keras
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.random_drop_box import RandomDropBox
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
ADDITIONAL_POINT_CLOUDS = base_augmentation_layer_3d.ADDITIONAL_POINT_CLOUDS
ADDITIONAL_BOUNDING_BOXES = base_augmentation_layer_3d.ADDITIONAL_BOUNDING_BOXES
class RandomDropBoxTest(TestCase):
def test_drop_class1_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(label_index=1, max_drop_bounding_boxes=4)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Drop the first object bounding box [0, 0, 0, 4, 4, 4, 0, 1] and
# points.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 20, 1, 1, 1, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_drop_both_boxes_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(max_drop_bounding_boxes=4)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Drop both object bounding boxes and points.
augmented_point_clouds = np.array(
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_not_drop_any_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(2)
add_layer = RandomDropBox(max_drop_bounding_boxes=0)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Do not drop any bounding box or point.
augmented_point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_batch_drop_one_of_the_box_point_clouds_and_bounding_boxes(self):
keras.utils.set_random_seed(4)
add_layer = RandomDropBox(max_drop_bounding_boxes=2)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
]
* 3
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
outputs = add_layer(inputs)
# Batch 0: drop the first bounding box [0, 0, 0, 4, 4, 4, 0, 1] and
# points,
# Batch 1,2: drop the second bounding box [20, 20, 20, 3, 3, 3, 0, 2]
# and points,
augmented_point_clouds = np.array(
[
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[10, 1, 2, 3, 4],
[0, 0, 0, 0, 0],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2,
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 1, 2, 3, 4],
[0, 0, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2,
]
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[
[0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 20, 3, 3, 3, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2,
]
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box_test.py",
"repo_id": "keras-cv",
"token_count": 7814
} | 70 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.TransformerEncoder")
class TransformerEncoder(layers.Layer):
"""
Transformer encoder block implementation as a Keras Layer.
Args:
project_dim: the dimensionality of the projection of the encoder, and
output of the `MultiHeadAttention`
mlp_dim: the intermediate dimensionality of the MLP head before
projecting to `project_dim`
num_heads: the number of heads for the `MultiHeadAttention` layer
mlp_dropout: default 0.1, the dropout rate to apply between the layers
of the MLP head of the encoder
attention_dropout: default 0.1, the dropout rate to apply in the
MultiHeadAttention layer
activation: default 'tf.activations.gelu', the activation function to
apply in the MLP head - should be a function
layer_norm_epsilon: default 1e-06, the epsilon for `LayerNormalization`
layers
Basic usage:
```
project_dim = 1024
mlp_dim = 3072
num_heads = 4
encoded_patches = keras_cv.layers.PatchingAndEmbedding(
project_dim=project_dim,
patch_size=16)(img_batch)
trans_encoded = keras_cv.layers.TransformerEncoder(project_dim=project_dim,
mlp_dim = mlp_dim,
num_heads=num_heads)(encoded_patches)
print(trans_encoded.shape) # (1, 197, 1024)
```
"""
def __init__(
self,
project_dim,
num_heads,
mlp_dim,
mlp_dropout=0.1,
attention_dropout=0.1,
activation=keras.activations.gelu,
layer_norm_epsilon=1e-06,
**kwargs,
):
super().__init__(**kwargs)
self.project_dim = project_dim
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.mlp_dropout = mlp_dropout
self.attention_dropout = attention_dropout
self.activation = activation
self.layer_norm_epsilon = layer_norm_epsilon
self.mlp_units = [mlp_dim, project_dim]
self.layer_norm1 = layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.layer_norm2 = layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.attn = layers.MultiHeadAttention(
num_heads=self.num_heads,
key_dim=self.project_dim // self.num_heads,
dropout=self.attention_dropout,
)
self.dense1 = layers.Dense(self.mlp_units[0])
self.dense2 = layers.Dense(self.mlp_units[1])
def call(self, inputs):
"""Calls the Transformer Encoder on an input sequence.
Args:
inputs: A `tf.Tensor` of shape [batch, height, width, channels]
Returns:
`A tf.Tensor` of shape [batch, patch_num+1, embedding_dim]
"""
if inputs.shape[-1] != self.project_dim:
raise ValueError(
"The input and output dimensionality must be the same, but the "
f"TransformerEncoder was provided with {inputs.shape[-1]} and "
f"{self.project_dim}"
)
x = self.layer_norm1(inputs)
x = self.attn(x, x)
x = layers.Dropout(self.mlp_dropout)(x)
x = layers.Add()([x, inputs])
y = self.layer_norm2(x)
y = self.dense1(y)
if self.activation == keras.activations.gelu:
y = self.activation(y, approximate=True)
else:
y = self.activation(y)
y = layers.Dropout(self.mlp_dropout)(y)
y = self.dense2(y)
y = layers.Dropout(self.mlp_dropout)(y)
output = layers.Add()([x, y])
return output
def get_config(self):
config = super().get_config()
activation = self.activation
if not isinstance(activation, (str, dict)):
activation = keras.activations.serialize(activation)
config.update(
{
"project_dim": self.project_dim,
"mlp_dim": self.mlp_dim,
"num_heads": self.num_heads,
"attention_dropout": self.attention_dropout,
"mlp_dropout": self.mlp_dropout,
"activation": activation,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
activation = config.pop("activation")
if isinstance(activation, (str, dict)):
activation = keras.activations.deserialize(activation)
return cls(activation=activation, **config)
| keras-cv/keras_cv/layers/transformer_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/transformer_encoder.py",
"repo_id": "keras-cv",
"token_count": 2358
} | 71 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.losses.iou_loss import IoULoss
from keras_cv.tests.test_case import TestCase
class IoUTest(TestCase):
def test_output_shape(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
iou_loss = IoULoss(bounding_box_format="xywh")
self.assertAllEqual(iou_loss(y_true, y_pred).shape, ())
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
iou_loss = IoULoss(bounding_box_format="xywh", reduction="none")
self.assertAllEqual(
iou_loss(y_true, y_pred).shape,
[
2,
],
)
def test_output_shape_relative(self):
y_true = [
[0.0, 0.0, 0.1, 0.1],
[0.0, 0.0, 0.2, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.2, 0.3, 0.3],
]
y_pred = [
[0.0, 0.0, 0.5, 0.6],
[0.0, 0.0, 0.7, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.1, 0.3, 0.3],
]
iou_loss = IoULoss(bounding_box_format="rel_xyxy")
self.assertAllEqual(iou_loss(y_true, y_pred).shape, ())
def test_output_value(self):
y_true = [
[0, 0, 1, 1],
[0, 0, 2, 3],
[4, 5, 3, 6],
[2, 2, 3, 3],
]
y_pred = [
[0, 0, 5, 6],
[0, 0, 7, 3],
[4, 5, 5, 6],
[2, 1, 3, 3],
]
iou_loss = IoULoss(bounding_box_format="xywh")
# -log(compute_iou(y_true, y_pred)) = 1.0363084
self.assertAllClose(iou_loss(y_true, y_pred), 1.0363084)
| keras-cv/keras_cv/losses/iou_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/iou_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1225
} | 72 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models import legacy
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetMBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetSBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetTinyBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetXLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet169Backbone,
)
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet201Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB0Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB1Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB2Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB3Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_aliases import ( # noqa: E501
EfficientNetLiteB4Backbone,
)
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_backbone import ( # noqa: E501
EfficientNetLiteBackbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B1Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B2Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B3Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B4Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B5Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B6Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B7Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B1Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B2Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2B3Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2LBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2MBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2SBackbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB0Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB1Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB2Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB3Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB4Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB5Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import (
MobileNetV3LargeBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import (
MobileNetV3SmallBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import (
MobileNetV3Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet18Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet34Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet50Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet101Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet152Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet18V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet34V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet101V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet152V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetHBackbone
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetLBackbone
from keras_cv.models.backbones.vit_det.vit_det_backbone import ViTDetBackbone
from keras_cv.models.classification.image_classifier import ImageClassifier
from keras_cv.models.feature_extractor.clip import CLIP
from keras_cv.models.object_detection.retinanet.retinanet import RetinaNet
from keras_cv.models.object_detection.yolo_v8.yolo_v8_backbone import (
YOLOV8Backbone,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector import (
YOLOV8Detector,
)
from keras_cv.models.segmentation import BASNet
from keras_cv.models.segmentation import DeepLabV3Plus
from keras_cv.models.segmentation import SAMMaskDecoder
from keras_cv.models.segmentation import SAMPromptEncoder
from keras_cv.models.segmentation import SegmentAnythingModel
from keras_cv.models.segmentation import TwoWayTransformer
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormer
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB0
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB1
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB2
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB3
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB4
from keras_cv.models.segmentation.segformer.segformer_aliases import SegFormerB5
from keras_cv.models.stable_diffusion import StableDiffusion
from keras_cv.models.stable_diffusion import StableDiffusionV2
| keras-cv/keras_cv/models/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/models/__init__.py",
"repo_id": "keras-cv",
"token_count": 3095
} | 73 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class DenseNetBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = DenseNet121Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(self.get_temp_dir(), "densenet_backbone.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, DenseNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = DenseNet121Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "densenet_alias_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, DenseNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = DenseNet121Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1024),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = DenseNetBackbone(
stackwise_num_repeats=[6, 12, 24, 16],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 1024))
| keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2063
} | 74 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNetV2 model preset configurations."""
backbone_presets_no_weights = {
"efficientnetv2_s": {
"metadata": {
"description": (
"EfficientNet architecture with 6 convolutional blocks."
),
"params": 20331360,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s/2", # noqa: E501
},
"efficientnetv2_m": {
"metadata": {
"description": (
"EfficientNet architecture with 7 convolutional blocks."
),
"params": 53150388,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_m/2", # noqa: E501
},
"efficientnetv2_l": {
"metadata": {
"description": (
"EfficientNet architecture with 7 convolutional "
"blocks, but more filters the in `efficientnetv2_m`."
),
"params": 117746848,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_l/2", # noqa: E501
},
"efficientnetv2_b0": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`."
),
"params": 5919312,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0/2", # noqa: E501
},
"efficientnetv2_b1": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`."
),
"params": 6931124,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1/2", # noqa: E501
},
"efficientnetv2_b2": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`."
),
"params": 8769374,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2/2", # noqa: E501
},
"efficientnetv2_b3": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.2` and `depth_coefficient=1.4`."
),
"params": 12930622,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b3/2", # noqa: E501
},
}
backbone_presets_with_weights = {
"efficientnetv2_s_imagenet": {
"metadata": {
"description": (
"EfficientNet architecture with 6 convolutional "
"blocks. Weights are initialized to pretrained imagenet "
"classification weights.Published weights are capable of "
"scoring 83.9%top 1 accuracy "
"and 96.7% top 5 accuracy on imagenet."
),
"params": 20331360,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s_imagenet/2", # noqa: E501
},
"efficientnetv2_b0_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`. "
"Weights are "
"initialized to pretrained imagenet classification weights. "
"Published weights are capable of scoring 77.1% top 1 accuracy "
"and 93.3% top 5 accuracy on imagenet."
),
"params": 5919312,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0_imagenet/2", # noqa: E501
},
"efficientnetv2_b1_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`. "
"Weights are "
"initialized to pretrained imagenet classification weights."
"Published weights are capable of scoring 79.1% top 1 accuracy "
"and 94.4% top 5 accuracy on imagenet."
),
"params": 6931124,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1_imagenet/2", # noqa: E501
},
"efficientnetv2_b2_imagenet": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 6 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`. "
"Weights are initialized to pretrained "
"imagenet classification weights."
"Published weights are capable of scoring 80.1% top 1 accuracy "
"and 94.9% top 5 accuracy on imagenet."
),
"params": 8769374,
"official_name": "EfficientNetV2",
"path": "efficientnetv2",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2_imagenet/2", # noqa: E501
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 3507
} | 75 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """ResNetBackbone (V1) model with {num_layers} layers.
Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
The difference in ResNetV1 and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = ResNet{num_layers}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.ResNet18Backbone")
class ResNet18Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet18", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet34Backbone")
class ResNet34Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet34", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet50Backbone")
class ResNet50Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet50", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"resnet50_imagenet": copy.deepcopy(
backbone_presets["resnet50_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.ResNet101Backbone")
class ResNet101Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet101", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.ResNet152Backbone")
class ResNet152Backbone(ResNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return ResNetBackbone.from_preset("resnet152", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(ResNet18Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=18))
setattr(ResNet34Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=34))
setattr(ResNet50Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=50))
setattr(ResNet101Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=101))
setattr(ResNet152Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=152))
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_aliases.py",
"repo_id": "keras-cv",
"token_count": 2951
} | 76 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VitDet model preset configurations."""
backbone_presets_no_weights = {
"vitdet_base": {
"metadata": {
"description": (
"Detectron2 ViT basebone with 12 "
"transformer encoders with embed dim 768 and attention layers"
" with 12 heads with global attention on encoders 2, 5, 8, "
"and 11."
),
"params": 89_670_912,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_base/2",
},
"vitdet_large": {
"metadata": {
"description": (
"Detectron2 ViT basebone with 24 "
"transformer encoders with embed dim "
"1024 and attention layers with 16 heads with global "
"attention on encoders 5, 11, 17, and 23."
),
"params": 308_278_272,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_large/2",
},
"vitdet_huge": {
"metadata": {
"description": (
"Detectron2 ViT basebone model "
"with 32 transformer encoders with embed dim "
"1280 and attention layers with 16 heads with global "
"attention on encoders 7, 15, 23, and 31."
),
"params": 637_026_048,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_huge/2",
},
}
backbone_presets_with_weights = {
"vitdet_base_sa1b": {
"metadata": {
"description": (
"A base Detectron2 ViT backbone trained on the SA1B dataset."
),
"params": 89_670_912,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_base_sa1b/2",
},
"vitdet_large_sa1b": {
"metadata": {
"description": (
"A large Detectron2 ViT backbone trained on the SA1B dataset."
),
"params": 308_278_272,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_large_sa1b/2",
},
"vitdet_huge_sa1b": {
"metadata": {
"description": (
"A huge Detectron2 ViT backbone trained on the SA1B dataset."
),
"params": 637_026_048,
"official_name": "VitDet",
"path": "vit_det",
},
"kaggle_handle": "kaggle://keras/vitdet/keras/vitdet_huge_sa1b/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1710
} | 77 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import regex as re
import tensorflow as tf
import tensorflow_text as tf_text
try:
import keras_nlp
from keras_nlp.tokenizers import BytePairTokenizer
except ImportError:
keras_nlp = None
# As python and TF handles special spaces differently, we need to
# manually handle special spaces during string split.
SPECIAL_WHITESPACES = r"\x{a0}\x{2009}\x{202f}\x{3000}"
SPLIT_PATTERN_1 = (
r"'s|'t|'re|'ve|'m|'ll|'d"
+ r"|[\s{special_spaces}]+[\n\r\t\f६{special_spaces}]| ?\p{L}+|"
+ r" ?[\p{N}]+| ?[^\s\p{L}\p{N}{special_spaces}</w>]+"
)
SPLIT_PATTERN_1 = SPLIT_PATTERN_1.replace(
"{special_spaces}", SPECIAL_WHITESPACES
)
SPLIT_PATTERN_2 = rf"""[\s६{SPECIAL_WHITESPACES}]$"""
def split_strings_for_bpe(inputs, unsplittable_tokens=None):
# We need to recreate the exact behavior of token presplitting in the
# original gpt2 tokenizer which uses a lookahead. As re2 does not
# support lookahead match, we are using an alternative insert a special
# token "६" before leading space of non-space characters and after the
# trailing space, e.g., " keras" will be "६ keras".
inputs = tf.strings.regex_replace(
inputs, rf"( )([^\s{SPECIAL_WHITESPACES}])", r"६\1\2"
)
inputs = tf.strings.regex_replace(
inputs, rf"(\s{SPECIAL_WHITESPACES})$", r"\1६"
)
inputs = tf.strings.regex_replace(inputs, r"\s", "")
if unsplittable_tokens:
alts = create_alts_for_unsplittable_tokens(unsplittable_tokens)
for token, alt in zip(unsplittable_tokens, alts):
escaped_token = re.escape(token)
inputs = tf_text.regex_split(inputs, escaped_token, escaped_token)
inputs = tf.strings.regex_replace(inputs, escaped_token, alt)
raw_tokens = tf_text.regex_split(inputs, SPLIT_PATTERN_1, SPLIT_PATTERN_1)
# Second pass splits out the last whilespace char or "६".
raw_tokens = tf_text.regex_split(
raw_tokens, SPLIT_PATTERN_2, SPLIT_PATTERN_2
)
if unsplittable_tokens:
# Replace special tokens alternate with originals.
for token, alt in zip(unsplittable_tokens, alts):
escaped_alt = re.escape(alt)
raw_tokens = tf.strings.regex_replace(
raw_tokens, escaped_alt, token
)
# Add '</w>' to the end of each token
tokens_with_end_tag = tf.strings.regex_replace(
raw_tokens, r"(\p{L}+)", r"\1</w>"
)
while tokens_with_end_tag.shape.rank > 2:
tokens_with_end_tag = tokens_with_end_tag.merge_dims(1, 2)
return remove_strings_from_inputs(tokens_with_end_tag, "६")
def create_alts_for_unsplittable_tokens(unsplittable_tokens):
# Create alternates for all special tokens that will be not split during
# tokenization.
alts = []
prefix = "Ĵ"
# Trim out splitters.
replace_pattern = r"'|\s+|[^\p{L}\p{N}]+"
for token in unsplittable_tokens:
token = re.sub(replace_pattern, "", token)
alts.append(prefix + token)
return alts
def remove_strings_from_inputs(tensor, string_to_remove):
"""Remove certain strings from input tensor."""
non_empty_mask = tensor != string_to_remove
flatten_indexes = tf.where(non_empty_mask)
flatten_result = tf.gather_nd(tensor, flatten_indexes)
row_lengths = tf.reduce_sum(tf.cast(non_empty_mask, "int64"), axis=1)
result = tf.RaggedTensor.from_row_lengths(
values=flatten_result,
row_lengths=row_lengths,
)
return result
class CLIPTokenizer(BytePairTokenizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if keras_nlp is None:
raise ValueError(
"ClipTokenizer requires keras-nlp. Please install "
"using pip `pip install -U keras-nlp && pip install -U keras`"
)
def _bpe_merge_and_update_cache(self, tokens):
"""Process unseen tokens and add to cache."""
words = self._transform_bytes(tokens)
tokenized_words = self._bpe_merge(words)
# For each word, join all its token by a whitespace,
# e.g., ["dragon", "fly"] => "dragon fly" for hash purpose.
tokenized_words = tf.strings.reduce_join(
tokenized_words,
axis=1,
)
self.cache.insert(tokens, tokenized_words)
def tokenize(self, inputs):
self._check_vocabulary()
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
inputs = tf.convert_to_tensor(inputs)
if self.add_prefix_space:
inputs = tf.strings.join([" ", inputs])
scalar_input = inputs.shape.rank == 0
if scalar_input:
inputs = tf.expand_dims(inputs, 0)
raw_tokens = split_strings_for_bpe(inputs, self.unsplittable_tokens)
token_row_splits = raw_tokens.row_splits
flat_tokens = raw_tokens.flat_values
# Check cache.
cache_lookup = self.cache.lookup(flat_tokens)
cache_mask = cache_lookup == ""
has_unseen_words = tf.math.reduce_any(
(cache_lookup == "") & (flat_tokens != "")
)
def process_unseen_tokens():
unseen_tokens = tf.boolean_mask(flat_tokens, cache_mask)
self._bpe_merge_and_update_cache(unseen_tokens)
return self.cache.lookup(flat_tokens)
# If `has_unseen_words == True`, it means not all tokens are in cache,
# we will process the unseen tokens. Otherwise return the cache lookup.
tokenized_words = tf.cond(
has_unseen_words,
process_unseen_tokens,
lambda: cache_lookup,
)
tokens = tf.strings.split(tokenized_words, sep=" ")
if self.compute_dtype != tf.string:
# Encode merged tokens.
tokens = self.token_to_id_map.lookup(tokens)
# Unflatten to match input.
tokens = tf.RaggedTensor.from_row_splits(
tokens.flat_values,
tf.gather(tokens.row_splits, token_row_splits),
)
# Convert to a dense output if `sequence_length` is set.
if self.sequence_length:
output_shape = tokens.shape.as_list()
output_shape[-1] = self.sequence_length
tokens = tokens.to_tensor(shape=output_shape)
# Convert to a dense output if input in scalar
if scalar_input:
tokens = tf.squeeze(tokens, 0)
tf.ensure_shape(tokens, shape=[self.sequence_length])
return tokens
| keras-cv/keras_cv/models/feature_extractor/clip/clip_tokenizer.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_tokenizer.py",
"repo_id": "keras-cv",
"token_count": 3127
} | 78 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RegNet models for KerasCV.
References:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) (CVPR 2020)
- [Based on the Original keras.applications RegNet](https://github.com/keras-team/keras/blob/master/keras/applications/regnet.py)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras import layers
from keras_cv.layers import SqueezeAndExcite2D
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.weights import parse_weights
# The widths and depths are deduced from a quantized linear function. For
# more information, please refer to "Designing Network Design Spaces" by
# Radosavovic et al.
# BatchNorm momentum and epsilon values taken from original implementation.
MODEL_CONFIGS = {
"x002": {
"depths": [1, 1, 4, 7],
"widths": [24, 56, 152, 368],
"group_width": 8,
"default_size": 224,
"block_type": "X",
},
"x004": {
"depths": [1, 2, 7, 12],
"widths": [32, 64, 160, 384],
"group_width": 16,
"default_size": 224,
"block_type": "X",
},
"x006": {
"depths": [1, 3, 5, 7],
"widths": [48, 96, 240, 528],
"group_width": 24,
"default_size": 224,
"block_type": "X",
},
"x008": {
"depths": [1, 3, 7, 5],
"widths": [64, 128, 288, 672],
"group_width": 16,
"default_size": 224,
"block_type": "X",
},
"x016": {
"depths": [2, 4, 10, 2],
"widths": [72, 168, 408, 912],
"group_width": 24,
"default_size": 224,
"block_type": "X",
},
"x032": {
"depths": [2, 6, 15, 2],
"widths": [96, 192, 432, 1008],
"group_width": 48,
"default_size": 224,
"block_type": "X",
},
"x040": {
"depths": [2, 5, 14, 2],
"widths": [80, 240, 560, 1360],
"group_width": 40,
"default_size": 224,
"block_type": "X",
},
"x064": {
"depths": [2, 4, 10, 1],
"widths": [168, 392, 784, 1624],
"group_width": 56,
"default_size": 224,
"block_type": "X",
},
"x080": {
"depths": [2, 5, 15, 1],
"widths": [80, 240, 720, 1920],
"group_width": 120,
"default_size": 224,
"block_type": "X",
},
"x120": {
"depths": [2, 5, 11, 1],
"widths": [224, 448, 896, 2240],
"group_width": 112,
"default_size": 224,
"block_type": "X",
},
"x160": {
"depths": [2, 6, 13, 1],
"widths": [256, 512, 896, 2048],
"group_width": 128,
"default_size": 224,
"block_type": "X",
},
"x320": {
"depths": [2, 7, 13, 1],
"widths": [336, 672, 1344, 2520],
"group_width": 168,
"default_size": 224,
"block_type": "X",
},
"y002": {
"depths": [1, 1, 4, 7],
"widths": [24, 56, 152, 368],
"group_width": 8,
"default_size": 224,
"block_type": "Y",
},
"y004": {
"depths": [1, 3, 6, 6],
"widths": [48, 104, 208, 440],
"group_width": 8,
"default_size": 224,
"block_type": "Y",
},
"y006": {
"depths": [1, 3, 7, 4],
"widths": [48, 112, 256, 608],
"group_width": 16,
"default_size": 224,
"block_type": "Y",
},
"y008": {
"depths": [1, 3, 8, 2],
"widths": [64, 128, 320, 768],
"group_width": 16,
"default_size": 224,
"block_type": "Y",
},
"y016": {
"depths": [2, 6, 17, 2],
"widths": [48, 120, 336, 888],
"group_width": 24,
"default_size": 224,
"block_type": "Y",
},
"y032": {
"depths": [2, 5, 13, 1],
"widths": [72, 216, 576, 1512],
"group_width": 24,
"default_size": 224,
"block_type": "Y",
},
"y040": {
"depths": [2, 6, 12, 2],
"widths": [128, 192, 512, 1088],
"group_width": 64,
"default_size": 224,
"block_type": "Y",
},
"y064": {
"depths": [2, 7, 14, 2],
"widths": [144, 288, 576, 1296],
"group_width": 72,
"default_size": 224,
"block_type": "Y",
},
"y080": {
"depths": [2, 4, 10, 1],
"widths": [168, 448, 896, 2016],
"group_width": 56,
"default_size": 224,
"block_type": "Y",
},
"y120": {
"depths": [2, 5, 11, 1],
"widths": [224, 448, 896, 2240],
"group_width": 112,
"default_size": 224,
"block_type": "Y",
},
"y160": {
"depths": [2, 4, 11, 1],
"widths": [224, 448, 1232, 3024],
"group_width": 112,
"default_size": 224,
"block_type": "Y",
},
"y320": {
"depths": [2, 5, 12, 1],
"widths": [232, 696, 1392, 3712],
"group_width": 232,
"default_size": 224,
"block_type": "Y",
},
}
BASE_DOCSTRING = """This class represents the {name} architecture.
Reference:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
(CVPR 2020)
For image classification use cases, see
[this page for detailed examples](https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
The naming of models is as follows: `RegNet<block_type><flops>` where
`block_type` is one of `(X, Y)` and `flops` signifies hundred million
floating point operations. For example RegNetY064 corresponds to RegNet with
Y block and 6.4 giga flops (64 hundred million flops).
Args:
include_rescaling: whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: Whether to include the fully-connected
layer at the top of the network.
num_classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True.
weights: One of `None` (random initialization), or the path to the weights
file to be loaded, defaults to `None`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, defaults to (None, None, 3).
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`, defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to `"softmax"`.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
""" # noqa: E501
def apply_conv2d_bn(
x,
filters,
kernel_size,
strides=1,
use_bias=False,
groups=1,
padding="valid",
kernel_initializer="he_normal",
batch_norm=True,
activation="relu",
name="",
):
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
groups=groups,
use_bias=use_bias,
padding=padding,
kernel_initializer=kernel_initializer,
name=name,
)(x)
if batch_norm:
x = layers.BatchNormalization(
momentum=0.9, epsilon=1e-5, name=name + "_bn"
)(x)
if activation is not None:
x = layers.Activation(activation, name=name + f"_{activation}")(x)
return x
def apply_stem(x, name=None):
"""Implementation of RegNet stem.
(Common to all model variants)
Args:
x: Tensor, input tensor to the stem
name: name prefix
Returns:
Output tensor of the Stem
"""
if name is None:
name = "stem" + str(backend.get_uid("stem"))
x = apply_conv2d_bn(
x=x,
filters=32,
kernel_size=(3, 3),
strides=2,
padding="same",
name=name + "_stem_conv",
)
return x
def apply_x_block(
inputs, filters_in, filters_out, group_width, stride=1, name=None
):
"""Implementation of X Block.
References:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
Args:
inputs: Tensor, input tensor to the block
filters_in: int, filters in the input tensor
filters_out: int, filters in the output tensor
group_width: int, group width
stride: int (or) tuple, stride of Conv layer
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("xblock"))
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output "
f"filters({filters_out}) "
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
# Declare layers
groups = filters_out // group_width
if stride != 1:
skip = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
strides=stride,
activation=None,
name=name + "_skip_1x1",
)
else:
skip = inputs
# Build block
# conv_1x1_1
x = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
name=name + "_conv_1x1_1",
)
# conv_3x3
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(3, 3),
strides=stride,
groups=groups,
padding="same",
name=name + "_conv_3x3",
)
# conv_1x1_2
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(1, 1),
activation=None,
name=name + "_conv_1x1_2",
)
x = layers.Activation("relu", name=name + "_exit_relu")(x + skip)
return x
def apply_y_block(
inputs,
filters_in,
filters_out,
group_width,
stride=1,
squeeze_excite_ratio=0.25,
name=None,
):
"""Implementation of Y Block.
References:
- [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)
Args:
inputs: Tensor, input tensor to the block
filters_in: int, filters in the input tensor
filters_out: int, filters in the output tensor
group_width: int, group width
stride: int (or) tuple, stride of Conv layer
squeeze_excite_ratio: float, expansion ratio for Squeeze and Excite block
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("yblock"))
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output "
f"filters({filters_out}) "
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
groups = filters_out // group_width
if stride != 1:
skip = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
strides=stride,
activation=None,
name=name + "_skip_1x1",
)
else:
skip = inputs
# Build block
# conv_1x1_1
x = apply_conv2d_bn(
x=inputs,
filters=filters_out,
kernel_size=(1, 1),
name=name + "_conv_1x1_1",
)
# conv_3x3
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(3, 3),
strides=stride,
groups=groups,
padding="same",
name=name + "_conv_3x3",
)
# Squeeze-Excitation block
x = SqueezeAndExcite2D(
filters_out,
bottleneck_filters=filters_out * squeeze_excite_ratio,
name=name,
)(x)
# conv_1x1_2
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(1, 1),
activation=None,
name=name + "_conv_1x1_2",
)
x = layers.Activation("relu", name=name + "_exit_relu")(x + skip)
return x
def apply_z_block(
inputs,
filters_in,
filters_out,
group_width,
stride=1,
squeeze_excite_ratio=0.25,
bottleneck_ratio=0.25,
name=None,
):
"""Implementation of Z block.
References:
- [Fast and Accurate Model Scaling](https://arxiv.org/abs/2103.06877).
Args:
inputs: Tensor, input tensor to the block
filters_in: int, filters in the input tensor
filters_out: int, filters in the output tensor
group_width: int, group width
stride: int (or) tuple, stride
squeeze_excite_ratio: float, expansion ration for Squeeze and Excite block
bottleneck_ratio: float, inverted bottleneck ratio
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("zblock"))
if filters_in != filters_out and stride == 1:
raise ValueError(
f"Input filters({filters_in}) and output filters({filters_out})"
f"are not equal for stride {stride}. Input and output filters "
f"must be equal for stride={stride}."
)
groups = filters_out // group_width
inv_btlneck_filters = int(filters_out / bottleneck_ratio)
# Build block
# conv_1x1_1
x = apply_conv2d_bn(
x=inputs,
filters=inv_btlneck_filters,
kernel_size=(1, 1),
name=name + "_conv_1x1_1",
activation="silu",
)
# conv_3x3
x = apply_conv2d_bn(
x=x,
filters=inv_btlneck_filters,
kernel_size=(3, 3),
strides=stride,
groups=groups,
padding="same",
name=name + "_conv_3x3",
activation="silu",
)
# Squeeze-Excitation block
x = SqueezeAndExcite2D(
inv_btlneck_filters,
bottleneck_filter=inv_btlneck_filters * squeeze_excite_ratio,
name=name,
)(x)
# conv_1x1_2
x = apply_conv2d_bn(
x=x,
filters=filters_out,
kernel_size=(1, 1),
activation=None,
name=name + "_conv_1x1_2",
)
if stride != 1:
return x
else:
return x + inputs
def apply_stage(
x, block_type, depth, group_width, filters_in, filters_out, name=None
):
"""Implementation of Stage in RegNet.
Args:
x: Tensor, input tensor to the stage
block_type: must be one of "X", "Y", "Z"
depth: int, depth of stage, number of blocks to use
group_width: int, group width of all blocks in this stage
filters_in: int, input filters to this stage
filters_out: int, output filters from this stage
name: str, name prefix
Returns:
Output tensor of the block
"""
if name is None:
name = str(backend.get_uid("stage"))
if block_type == "X":
x = apply_x_block(
x,
filters_in,
filters_out,
group_width,
stride=2,
name=f"{name}_XBlock_0",
)
for i in range(1, depth):
x = apply_x_block(
x,
filters_out,
filters_out,
group_width,
name=f"{name}_XBlock_{i}",
)
elif block_type == "Y":
x = apply_y_block(
x,
filters_in,
filters_out,
group_width,
stride=2,
name=name + "_YBlock_0",
)
for i in range(1, depth):
x = apply_y_block(
x,
filters_out,
filters_out,
group_width,
name=f"{name}_YBlock_{i}",
)
elif block_type == "Z":
x = apply_z_block(
x,
filters_in,
filters_out,
group_width,
stride=2,
name=f"{name}_ZBlock_0",
)
for i in range(1, depth):
x = apply_z_block(
x,
filters_out,
filters_out,
group_width,
name=f"{name}_ZBlock_{i}",
)
else:
raise NotImplementedError(
f"Block type `{block_type}` not recognized."
f"block_type must be one of (`X`, `Y`, `Z`). "
)
return x
def apply_head(x, num_classes=None, name=None, activation=None):
"""Implementation of classification head of RegNet.
Args:
x: Tensor, input to the head block
num_classes: int, number of classes for Dense layer
name: str, name prefix
Returns:
Output logits tensor.
"""
if name is None:
name = str(backend.get_uid("head"))
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.Dense(
num_classes, name=name + "head_dense", activation=activation
)(x)
return x
@keras.utils.register_keras_serializable(package="keras_cv.models")
class RegNet(keras.Model):
"""
This class represents the architecture of RegNet
Args:
depths: iterable, Contains depths for each individual stages.
widths: iterable, Contains output channel width of each individual
stages
group_width: int, Number of channels to be used in each group. See
grouped convolutions for more information.
block_type: Must be one of `{"X", "Y", "Z"}`. For more details see the
papers "Designing network design spaces" and "Fast and Accurate
Model Scaling"
default_size: tuple (or) list, default input image size.
model_name: str, An optional name for the model.
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, Whether to include the fully-connected
layer at the top of the network.
num_classes: int, Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
weights: str, One of `None` (random initialization), or the path to the
weights file to be loaded, defaults to `None`.
input_tensor: Tensor, Optional Keras tensor (i.e. output of
`layers.Input()`) to use as image input for the model.
input_shape: Optional shape tuple, defaults to (None, None, 3).
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`, defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. Defaults to `"softmax"`.
"""
def __init__(
self,
depths,
widths,
group_width,
block_type,
include_rescaling,
include_top,
num_classes=None,
model_name="regnet",
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
classifier_activation="softmax",
**kwargs,
):
if not (weights is None or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization) "
"or the path to the weights file to be loaded."
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
img_input = utils.parse_model_inputs(input_shape, input_tensor)
x = img_input
if include_rescaling:
x = layers.Rescaling(scale=1.0 / 255.0)(x)
x = apply_stem(x, name=model_name)
in_channels = x.shape[-1] # Output from Stem
NUM_STAGES = 4
for stage_index in range(NUM_STAGES):
depth = depths[stage_index]
out_channels = widths[stage_index]
x = apply_stage(
x,
block_type,
depth,
group_width,
in_channels,
out_channels,
name=model_name + "_Stage_" + str(stage_index),
)
in_channels = out_channels
if include_top:
x = apply_head(
x, num_classes=num_classes, activation=classifier_activation
)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
super().__init__(inputs=img_input, outputs=x, name=model_name, **kwargs)
# Load weights.
if weights is not None:
self.load_weights(weights)
self.depths = depths
self.widths = widths
self.group_width = group_width
self.block_type = block_type
self.include_rescaling = include_rescaling
self.include_top = include_top
self.num_classes = num_classes
self.model_name = model_name
self.input_tensor = input_tensor
self.pooling = pooling
self.classifier_activation = classifier_activation
def get_config(self):
return {
"depths": self.depths,
"widths": self.widths,
"group_width": self.group_width,
"block_type": self.block_type,
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"num_classes": self.num_classes,
"model_name": self.model_name,
"input_tensor": self.input_tensor,
"input_shape": self.input_shape[1:],
"pooling": self.pooling,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
# Instantiating variants
def RegNetX002(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx002",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x002"]["depths"],
MODEL_CONFIGS["x002"]["widths"],
MODEL_CONFIGS["x002"]["group_width"],
MODEL_CONFIGS["x002"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx002"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX004(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx004",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x004"]["depths"],
MODEL_CONFIGS["x004"]["widths"],
MODEL_CONFIGS["x004"]["group_width"],
MODEL_CONFIGS["x004"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx004"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX006(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx006",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x006"]["depths"],
MODEL_CONFIGS["x006"]["widths"],
MODEL_CONFIGS["x006"]["group_width"],
MODEL_CONFIGS["x006"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx006"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX008(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx008",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x008"]["depths"],
MODEL_CONFIGS["x008"]["widths"],
MODEL_CONFIGS["x008"]["group_width"],
MODEL_CONFIGS["x008"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx008"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX016(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx016",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x016"]["depths"],
MODEL_CONFIGS["x016"]["widths"],
MODEL_CONFIGS["x016"]["group_width"],
MODEL_CONFIGS["x016"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx016"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX032(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx032",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x032"]["depths"],
MODEL_CONFIGS["x032"]["widths"],
MODEL_CONFIGS["x032"]["group_width"],
MODEL_CONFIGS["x032"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx032"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX040(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx040",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x040"]["depths"],
MODEL_CONFIGS["x040"]["widths"],
MODEL_CONFIGS["x040"]["group_width"],
MODEL_CONFIGS["x040"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx040"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX064(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx064",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x064"]["depths"],
MODEL_CONFIGS["x064"]["widths"],
MODEL_CONFIGS["x064"]["group_width"],
MODEL_CONFIGS["x064"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx064"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX080(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx080",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x080"]["depths"],
MODEL_CONFIGS["x080"]["widths"],
MODEL_CONFIGS["x080"]["group_width"],
MODEL_CONFIGS["x080"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx080"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX120(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx120",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x120"]["depths"],
MODEL_CONFIGS["x120"]["widths"],
MODEL_CONFIGS["x120"]["group_width"],
MODEL_CONFIGS["x120"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx120"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX160(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx160",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x160"]["depths"],
MODEL_CONFIGS["x160"]["widths"],
MODEL_CONFIGS["x160"]["group_width"],
MODEL_CONFIGS["x160"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx160"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetX320(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnetx320",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["x320"]["depths"],
MODEL_CONFIGS["x320"]["widths"],
MODEL_CONFIGS["x320"]["group_width"],
MODEL_CONFIGS["x320"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnetx320"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY002(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety002",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y002"]["depths"],
MODEL_CONFIGS["y002"]["widths"],
MODEL_CONFIGS["y002"]["group_width"],
MODEL_CONFIGS["y002"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety002"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY004(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety004",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y004"]["depths"],
MODEL_CONFIGS["y004"]["widths"],
MODEL_CONFIGS["y004"]["group_width"],
MODEL_CONFIGS["y004"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety004"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY006(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety006",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y006"]["depths"],
MODEL_CONFIGS["y006"]["widths"],
MODEL_CONFIGS["y006"]["group_width"],
MODEL_CONFIGS["y006"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety006"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY008(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety008",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y008"]["depths"],
MODEL_CONFIGS["y008"]["widths"],
MODEL_CONFIGS["y008"]["group_width"],
MODEL_CONFIGS["y008"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety008"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY016(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety016",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y016"]["depths"],
MODEL_CONFIGS["y016"]["widths"],
MODEL_CONFIGS["y016"]["group_width"],
MODEL_CONFIGS["y016"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety016"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY032(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety032",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y032"]["depths"],
MODEL_CONFIGS["y032"]["widths"],
MODEL_CONFIGS["y032"]["group_width"],
MODEL_CONFIGS["y032"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety032"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY040(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety040",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y040"]["depths"],
MODEL_CONFIGS["y040"]["widths"],
MODEL_CONFIGS["y040"]["group_width"],
MODEL_CONFIGS["y040"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety040"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY064(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety064",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y064"]["depths"],
MODEL_CONFIGS["y064"]["widths"],
MODEL_CONFIGS["y064"]["group_width"],
MODEL_CONFIGS["y064"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety064"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY080(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety080",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y080"]["depths"],
MODEL_CONFIGS["y080"]["widths"],
MODEL_CONFIGS["y080"]["group_width"],
MODEL_CONFIGS["y080"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety080"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY120(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety120",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y120"]["depths"],
MODEL_CONFIGS["y120"]["widths"],
MODEL_CONFIGS["y120"]["group_width"],
MODEL_CONFIGS["y120"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety120"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY160(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety160",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y160"]["depths"],
MODEL_CONFIGS["y160"]["widths"],
MODEL_CONFIGS["y160"]["group_width"],
MODEL_CONFIGS["y160"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety160"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def RegNetY320(
*,
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_tensor=None,
input_shape=(None, None, 3),
pooling=None,
model_name="regnety320",
classifier_activation="softmax",
**kwargs,
):
return RegNet(
MODEL_CONFIGS["y320"]["depths"],
MODEL_CONFIGS["y320"]["widths"],
MODEL_CONFIGS["y320"]["group_width"],
MODEL_CONFIGS["y320"]["block_type"],
model_name=model_name,
include_top=include_top,
include_rescaling=include_rescaling,
weights=parse_weights(weights, include_top, "regnety320"),
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
RegNetX002.__doc__ = BASE_DOCSTRING.format(name="RegNetX002")
RegNetX004.__doc__ = BASE_DOCSTRING.format(name="RegNetX004")
RegNetX006.__doc__ = BASE_DOCSTRING.format(name="RegNetX006")
RegNetX008.__doc__ = BASE_DOCSTRING.format(name="RegNetX008")
RegNetX016.__doc__ = BASE_DOCSTRING.format(name="RegNetX016")
RegNetX032.__doc__ = BASE_DOCSTRING.format(name="RegNetX032")
RegNetX040.__doc__ = BASE_DOCSTRING.format(name="RegNetX040")
RegNetX064.__doc__ = BASE_DOCSTRING.format(name="RegNetX064")
RegNetX080.__doc__ = BASE_DOCSTRING.format(name="RegNetX080")
RegNetX120.__doc__ = BASE_DOCSTRING.format(name="RegNetX120")
RegNetX160.__doc__ = BASE_DOCSTRING.format(name="RegNetX160")
RegNetX320.__doc__ = BASE_DOCSTRING.format(name="RegNetX320")
RegNetY002.__doc__ = BASE_DOCSTRING.format(name="RegNetY002")
RegNetY004.__doc__ = BASE_DOCSTRING.format(name="RegNetY004")
RegNetY006.__doc__ = BASE_DOCSTRING.format(name="RegNetY006")
RegNetY008.__doc__ = BASE_DOCSTRING.format(name="RegNetY008")
RegNetY016.__doc__ = BASE_DOCSTRING.format(name="RegNetY016")
RegNetY032.__doc__ = BASE_DOCSTRING.format(name="RegNetY032")
RegNetY040.__doc__ = BASE_DOCSTRING.format(name="RegNetY040")
RegNetY064.__doc__ = BASE_DOCSTRING.format(name="RegNetY064")
RegNetY080.__doc__ = BASE_DOCSTRING.format(name="RegNetY080")
RegNetY120.__doc__ = BASE_DOCSTRING.format(name="RegNetY120")
RegNetY160.__doc__ = BASE_DOCSTRING.format(name="RegNetY160")
RegNetY320.__doc__ = BASE_DOCSTRING.format(name="RegNetY320")
| keras-cv/keras_cv/models/legacy/regnet.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/regnet.py",
"repo_id": "keras-cv",
"token_count": 21728
} | 79 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StableDiffusion Noise scheduler
Adapted from https://github.com/huggingface/diffusers/blob/v0.3.0/src/diffusers/schedulers/scheduling_ddpm.py#L56
""" # noqa: E501
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.backend import random
@keras_cv_export("keras_cv.models.stable_diffusion.NoiseScheduler")
class NoiseScheduler:
"""
Args:
train_timesteps: number of diffusion steps used to train the model.
beta_start: the starting `beta` value of inference.
beta_end: the final `beta` value.
beta_schedule: the beta schedule, a mapping from a beta range to a
sequence of betas for stepping the model. Choose from `linear` or
`quadratic`.
variance_type: options to clip the variance used when adding noise to
the de-noised sample. Choose from `fixed_small`, `fixed_small_log`,
`fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
clip_sample: option to clip predicted sample between -1 and 1 for
numerical stability.
"""
def __init__(
self,
train_timesteps=1000,
beta_start=0.0001,
beta_end=0.02,
beta_schedule="linear",
variance_type="fixed_small",
clip_sample=True,
):
self.train_timesteps = train_timesteps
if beta_schedule == "linear":
self.betas = ops.linspace(beta_start, beta_end, train_timesteps)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = (
ops.linspace(beta_start**0.5, beta_end**0.5, train_timesteps)
** 2
)
else:
raise ValueError(f"Invalid beta schedule: {beta_schedule}.")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = ops.cumprod(self.alphas)
self.variance_type = variance_type
self.clip_sample = clip_sample
self.seed_generator = random.SeedGenerator(seed=42)
def _get_variance(self, timestep, predicted_variance=None):
alpha_prod = self.alphas_cumprod[timestep]
alpha_prod_prev = (
self.alphas_cumprod[timestep - 1] if timestep > 0 else 1.0
)
variance = (
(1 - alpha_prod_prev) / (1 - alpha_prod) * self.betas[timestep]
)
if self.variance_type == "fixed_small":
variance = ops.clip(variance, x_min=1e-20, x_max=1)
elif self.variance_type == "fixed_small_log":
variance = ops.log(ops.clip(variance, x_min=1e-20, x_max=1))
elif self.variance_type == "fixed_large":
variance = self.betas[timestep]
elif self.variance_type == "fixed_large_log":
variance = ops.log(self.betas[timestep])
elif self.variance_type == "learned":
return predicted_variance
elif self.variance_type == "learned_range":
min_log = variance
max_log = self.betas[timestep]
frac = (predicted_variance + 1) / 2
variance = frac * max_log + (1 - frac) * min_log
else:
raise ValueError(f"Invalid variance type: {self.variance_type}")
return variance
def step(
self,
model_output,
timestep,
sample,
predict_epsilon=True,
):
"""
Predict the sample at the previous timestep by reversing the SDE. Core
function to propagate the diffusion process from the learned model
outputs (usually the predicted noise).
Args:
model_output: a Tensor containing direct output from learned
diffusion model
timestep: current discrete timestep in the diffusion chain.
sample: a Tensor containing the current instance of sample being
created by diffusion process.
predict_epsilon: whether the model is predicting noise (epsilon) or
samples
Returns:
The predicted sample at the previous timestep
"""
if model_output.shape[1] == sample.shape[
1
] * 2 and self.variance_type in [
"learned",
"learned_range",
]:
model_output, predicted_variance = ops.split(
model_output, sample.shape[1], axis=1
)
else:
predicted_variance = None
# 1. compute alphas, betas
alpha_prod = self.alphas_cumprod[timestep]
alpha_prod_prev = (
self.alphas_cumprod[timestep - 1] if timestep > 0 else 1.0
)
beta_prod = 1 - alpha_prod
beta_prod_prev = 1 - alpha_prod_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf # noqa: E501
if predict_epsilon:
pred_original_sample = (
sample - beta_prod ** (0.5) * model_output
) / alpha_prod ** (0.5)
else:
pred_original_sample = model_output
# 3. Clip "predicted x_0"
if self.clip_sample:
pred_original_sample = ops.clip_by_value(
pred_original_sample, -1, 1
)
# 4. Compute coefficients for pred_original_sample x_0 and current
# sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_original_sample_coeff = (
alpha_prod_prev ** (0.5) * self.betas[timestep]
) / beta_prod
current_sample_coeff = (
self.alphas[timestep] ** (0.5) * beta_prod_prev / beta_prod
)
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_prev_sample = (
pred_original_sample_coeff * pred_original_sample
+ current_sample_coeff * sample
)
# 6. Add noise
variance = 0
if timestep > 0:
noise = random.normal(model_output.shape, seed=self.seed_generator)
variance = (
self._get_variance(
timestep, predicted_variance=predicted_variance
)
** 0.5
) * noise
pred_prev_sample = pred_prev_sample + variance
return pred_prev_sample
def add_noise(
self,
original_samples,
noise,
timesteps,
):
sqrt_alpha_prod = ops.take(self.alphas_cumprod, timesteps) ** 0.5
sqrt_one_minus_alpha_prod = (
1 - ops.take(self.alphas_cumprod, timesteps)
) ** 0.5
for _ in range(3):
sqrt_alpha_prod = ops.expand_dims(sqrt_alpha_prod, axis=-1)
sqrt_one_minus_alpha_prod = ops.expand_dims(
sqrt_one_minus_alpha_prod, axis=-1
)
sqrt_alpha_prod = ops.cast(
sqrt_alpha_prod, dtype=original_samples.dtype
)
sqrt_one_minus_alpha_prod = ops.cast(
sqrt_one_minus_alpha_prod, dtype=noise.dtype
)
noisy_samples = (
sqrt_alpha_prod * original_samples
+ sqrt_one_minus_alpha_prod * noise
)
return noisy_samples
def __len__(self):
return self.train_timesteps
| keras-cv/keras_cv/models/stable_diffusion/noise_scheduler.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/noise_scheduler.py",
"repo_id": "keras-cv",
"token_count": 3706
} | 80 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from keras_cv import core
from keras_cv.backend import ops
_TF_INTERPOLATION_METHODS = {
"bilinear": tf.image.ResizeMethod.BILINEAR,
"nearest": tf.image.ResizeMethod.NEAREST_NEIGHBOR,
"bicubic": tf.image.ResizeMethod.BICUBIC,
"area": tf.image.ResizeMethod.AREA,
"lanczos3": tf.image.ResizeMethod.LANCZOS3,
"lanczos5": tf.image.ResizeMethod.LANCZOS5,
"gaussian": tf.image.ResizeMethod.GAUSSIAN,
"mitchellcubic": tf.image.ResizeMethod.MITCHELLCUBIC,
}
def get_interpolation(interpolation):
"""fetches a valid interpolation method from `tf.image.ResizeMethod`.
Args:
interpolation: string representing an interpolation method.
Raises:
NotImplementedError: if the method passed is not recognized
Returns:
An interpolation method from `tf.image.ResizeMethod`
"""
interpolation = interpolation.lower()
if interpolation not in _TF_INTERPOLATION_METHODS:
raise NotImplementedError(
"Value not recognized for `interpolation`: {}. Supported values "
"are: {}".format(interpolation, _TF_INTERPOLATION_METHODS.keys())
)
return _TF_INTERPOLATION_METHODS[interpolation]
def transform_value_range(
images, original_range, target_range, dtype=tf.float32
):
"""transforms values in input tensor from original_range to target_range.
This function is intended to be used in preprocessing layers that
rely upon color values. This allows us to assume internally that
the input tensor is always in the range [0, 255].
Args:
images: the set of images to transform to the target range.
original_range: the value range to transform from.
target_range: the value range to transform to.
dtype: the dtype to compute the conversion with, defaults to tf.float32.
Returns:
a new Tensor with values in the target range.
Usage:
```python
original_range = [0, 1]
target_range = [0, 255]
images = keras_cv.utils.preprocessing.transform_value_range(
images,
original_range,
target_range
)
images = tf.math.minimum(images + 10, 255)
images = keras_cv.utils.preprocessing.transform_value_range(
images,
target_range,
original_range
)
```
"""
if (
original_range[0] == target_range[0]
and original_range[1] == target_range[1]
):
return images
images = tf.cast(images, dtype=dtype)
original_min_value, original_max_value = _unwrap_value_range(
original_range, dtype=dtype
)
target_min_value, target_max_value = _unwrap_value_range(
target_range, dtype=dtype
)
# images in the [0, 1] scale
images = (images - original_min_value) / (
original_max_value - original_min_value
)
scale_factor = target_max_value - target_min_value
return (images * scale_factor) + target_min_value
def _unwrap_value_range(value_range, dtype=tf.float32):
min_value, max_value = value_range
min_value = tf.cast(min_value, dtype=dtype)
max_value = tf.cast(max_value, dtype=dtype)
return min_value, max_value
def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor:
"""Blend image1 and image2 using 'factor'.
FactorSampler should be in the range [0, 1]. A value of 0.0 means only
image1 is used. A value of 1.0 means only image2 is used. A value between
0.0 and 1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type tf.float32 with value range [0, 255].
image2: An image Tensor of type tf.float32 with value range [0, 255].
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
difference = image2 - image1
scaled = factor * difference
temp = image1 + scaled
return tf.clip_by_value(temp, 0.0, 255.0)
def parse_factor(
param, min_value=0.0, max_value=1.0, param_name="factor", seed=None
):
if isinstance(param, dict):
# For all classes missing a `from_config` implementation.
# (RandomHue, RandomShear, etc.)
# To be removed with addition of `keras.__internal__` namespace support
param = keras.utils.deserialize_keras_object(param)
if isinstance(param, core.FactorSampler):
return param
if isinstance(param, float) or isinstance(param, int):
param = (min_value, param)
if param[0] > param[1]:
raise ValueError(
f"`{param_name}[0] > {param_name}[1]`, `{param_name}[0]` must be "
f"<= `{param_name}[1]`. Got `{param_name}={param}`"
)
if (min_value is not None and param[0] < min_value) or (
max_value is not None and param[1] > max_value
):
raise ValueError(
f"`{param_name}` should be inside of range "
f"[{min_value}, {max_value}]. Got {param_name}={param}"
)
if param[0] == param[1]:
return core.ConstantFactorSampler(param[0])
return core.UniformFactorSampler(param[0], param[1], seed=seed)
def random_inversion(random_generator):
"""Randomly returns a -1 or a 1 based on the provided random_generator.
This can be used by KPLs to randomly invert sampled values.
Args:
random_generator: a Keras random number generator. An instance can be
passed from the `self._random_generator` attribute of
a `BaseImageAugmentationLayer`.
Returns:
either -1, or -1.
"""
negate = random_generator.uniform((), 0, 1, dtype=tf.float32) > 0.5
negate = tf.cond(negate, lambda: -1.0, lambda: 1.0)
return negate
def batch_random_inversion(random_generator, batch_size):
"""Same as `random_inversion` but for batched inputs."""
negate = random_generator.uniform((batch_size, 1), 0, 1, dtype=tf.float32)
negate = tf.where(negate > 0.5, -1.0, 1.0)
return negate
def get_rotation_matrix(angles, image_height, image_width, name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch. The rank
must be statically known (the shape is not `TensorShape(None)`).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be
given to operation `image_projective_transform_v2`. If one row of
transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the
*output* point `(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "rotation_matrix"):
x_offset = (
(image_width - 1)
- (
tf.cos(angles) * (image_width - 1)
- tf.sin(angles) * (image_height - 1)
)
) / 2.0
y_offset = (
(image_height - 1)
- (
tf.sin(angles) * (image_width - 1)
+ tf.cos(angles) * (image_height - 1)
)
) / 2.0
num_angles = tf.shape(angles)[0]
return tf.concat(
values=[
tf.cos(angles)[:, None],
-tf.sin(angles)[:, None],
x_offset[:, None],
tf.sin(angles)[:, None],
tf.cos(angles)[:, None],
y_offset[:, None],
tf.zeros((num_angles, 2), tf.float32),
],
axis=1,
)
def get_translation_matrix(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing `[dx, dy]`
to translate for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)` projective transforms which can be
given to `transform`.
"""
with backend.name_scope(name or "translation_matrix"):
num_translations = tf.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.float32),
tf.zeros((num_translations, 1), tf.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.float32),
tf.ones((num_translations, 1), tf.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.float32),
],
axis=1,
)
def transform(
images,
transforms,
fill_mode="reflect",
fill_value=0.0,
interpolation="bilinear",
output_shape=None,
name=None,
):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape
`(num_images, num_rows, num_columns, num_channels)` (NHWC). The rank
must be statically known (the shape is not `TensorShape(None)`).
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
`k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
transform mapping input points to output points. Note that gradients are
not backpropagated into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
output_shape: Output dimension after the transform, `[height, width]`.
If `None`, output is the same size as input image.
name: The name of the op.
Fill mode behavior for each valid value is as follows:
- reflect (d c b a | a b c d | d c b a)
The input is extended by reflecting about the edge of the last pixel.
- constant (k k k k | a b c d | k k k k)
The input is extended by filling all
values beyond the edge with the same constant value k = 0.
- wrap (a b c d | a b c d | a b c d)
The input is extended by wrapping around to the opposite edge.
- nearest (a a a a | a b c d | d d d d)
The input is extended by the nearest pixel.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
ValueError: If output shape is not 1-D int32 Tensor.
"""
with backend.name_scope(name or "transform"):
if output_shape is None:
output_shape = tf.shape(images)[1:3]
if not tf.executing_eagerly():
output_shape_value = tf.get_static_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = tf.convert_to_tensor(
output_shape, tf.int32, name="output_shape"
)
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError(
"output_shape must be a 1-D Tensor of 2 elements: "
"new_height, new_width, instead got "
"{}".format(output_shape)
)
fill_value = tf.convert_to_tensor(
fill_value, tf.float32, name="fill_value"
)
return tf.raw_ops.ImageProjectiveTransformV3(
images=images,
output_shape=output_shape,
fill_value=fill_value,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper(),
)
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not ops.is_tensor(inputs):
inputs = ops.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = ops.cast(inputs, dtype)
return inputs
def check_fill_mode_and_interpolation(fill_mode, interpolation):
if fill_mode not in {"reflect", "wrap", "constant", "nearest"}:
raise NotImplementedError(
" Want fillmode to be one of `reflect`, `wrap`, "
"`constant` or `nearest`. Got `fill_mode` {}. ".format(fill_mode)
)
if interpolation not in {"nearest", "bilinear"}:
raise NotImplementedError(
"Unknown `interpolation` {}. Only `nearest` and "
"`bilinear` are supported.".format(interpolation)
)
| keras-cv/keras_cv/utils/preprocessing.py/0 | {
"file_path": "keras-cv/keras_cv/utils/preprocessing.py",
"repo_id": "keras-cv",
"token_count": 6001
} | 81 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.