text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing stage tests."""
import time
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.engine import base_preprocessing_layer
from tf_keras.layers.preprocessing import preprocessing_stage
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.testing_infra import test_combinations
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class PreprocessingStageTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_adapt(self):
class PL(base_preprocessing_layer.PreprocessingLayer):
def __init__(self, **kwargs):
self.adapt_time = None
self.adapt_count = 0
super().__init__(**kwargs)
def adapt(self, data, reset_state=True):
self.adapt_time = time.time()
self.adapt_count += 1
def call(self, inputs):
return inputs + 1.0
# Test with NumPy array
stage = preprocessing_stage.PreprocessingStage(
[
PL(),
PL(),
PL(),
]
)
stage.adapt(np.ones((3, 4)))
self.assertEqual(stage.layers[0].adapt_count, 1)
self.assertEqual(stage.layers[1].adapt_count, 1)
self.assertEqual(stage.layers[2].adapt_count, 1)
self.assertLessEqual(
stage.layers[0].adapt_time, stage.layers[1].adapt_time
)
self.assertLessEqual(
stage.layers[1].adapt_time, stage.layers[2].adapt_time
)
# Check call
y = stage(tf.ones((3, 4)))
self.assertAllClose(y, np.ones((3, 4)) + 3.0)
# Test with dataset
adapt_data = tf.data.Dataset.from_tensor_slices(np.ones((3, 10)))
adapt_data = adapt_data.batch(2) # 5 batches of 2 samples
stage.adapt(adapt_data)
self.assertEqual(stage.layers[0].adapt_count, 2)
self.assertEqual(stage.layers[1].adapt_count, 2)
self.assertEqual(stage.layers[2].adapt_count, 2)
self.assertLess(stage.layers[0].adapt_time, stage.layers[1].adapt_time)
self.assertLess(stage.layers[1].adapt_time, stage.layers[2].adapt_time)
# Test error with bad data
with self.assertRaisesRegex(ValueError, "requires a "):
stage.adapt(None)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/preprocessing_stage_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/preprocessing_stage_test.py",
"repo_id": "tf-keras",
"token_count": 1282
} | 243 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dropout layer."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
class DropoutTest(test_combinations.TestCase):
def test_dropout(self):
test_utils.layer_test(
keras.layers.Dropout, kwargs={"rate": 0.5}, input_shape=(3, 2)
)
test_utils.layer_test(
keras.layers.Dropout,
kwargs={"rate": 0.5, "noise_shape": [3, 1]},
input_shape=(3, 2),
)
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_dropout_partial_noise_shape(self):
inputs = keras.Input(shape=(5, 10))
layer = keras.layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
out = model(np.ones((20, 5, 10)), training=True)
out_np = keras.backend.get_value(out)
# Test that dropout mask is shared across second dim.
self.assertAllClose(out_np[:, 0, :], out_np[:, 1, :])
@test_utils.run_v2_only
def test_dropout_with_zero_rate(self):
inputs = np.ones((20, 5, 10))
dropout = keras.layers.Dropout(0.0, force_generator=True)
dropout.build((20, 5, 10))
# Make sure we don't use the RNG when the dropout rate is 0
# (for performance).
rng_state_var = tf.constant(
dropout._random_generator._generator._state_var
)
output = dropout(inputs, training=True)
self.assertAllClose(inputs, output)
self.assertAllClose(
rng_state_var, dropout._random_generator._generator._state_var
)
def test_dropout_with_saving(self):
inputs = keras.Input(shape=(5, 10))
layer = keras.layers.Dropout(0.5, force_generator=True)
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
train = model(np.ones((20, 5, 10)), training=True)
predict = model(np.ones((20, 5, 10)))
# Make sure the weights from tf.random.Generator is not present in the
# model which will cause weight loading issue for existing application
# models if it contains dropout layer.
self.assertEmpty(layer.get_weights())
self.assertEmpty(model.get_weights())
# Make sure the layer does dropout value when training
self.assertNotAllClose(train, predict)
with self.subTest("savedmodel"):
model.save(
os.path.join(self.get_temp_dir(), "savedmodel"),
save_format="tf",
)
loaded_model = keras.models.load_model(
os.path.join(self.get_temp_dir(), "savedmodel")
)
predict2 = loaded_model(np.ones((20, 5, 10)))
self.assertAllClose(predict, predict2)
# Make sure the model dropout different value after loading
train2 = loaded_model(np.ones((20, 5, 10)), training=True)
self.assertNotAllClose(train, train2)
self.assertIsNotNone(loaded_model.layers[1]._random_generator)
with self.subTest("keras_v3"):
if not tf.__internal__.tf2.enabled():
self.skipTest(
"TF2 must be enabled to use the new `.keras` saving."
)
model.save(
os.path.join(self.get_temp_dir(), "model.keras"),
save_format="keras_v3",
)
loaded_model = keras.models.load_model(
os.path.join(self.get_temp_dir(), "model.keras")
)
predict2 = loaded_model(np.ones((20, 5, 10)))
self.assertAllClose(predict, predict2)
# Make sure the model dropout different value after loading
train2 = loaded_model(np.ones((20, 5, 10)), training=True)
self.assertNotAllClose(train, train2)
self.assertIsNotNone(loaded_model.layers[1]._random_generator)
with self.subTest("checkpoint"):
# Also make sure the checkpoint doesn't contain any variable from
# the dropout layer, to keep the backward compatibility.
checkpoint = tf.train.Checkpoint(model)
save_path = checkpoint.save(
os.path.join(self.get_temp_dir(), "checkpoint")
)
checkpoint_var_names = [
name_value_tuple[0]
for name_value_tuple in tf.train.list_variables(save_path)
]
for name in checkpoint_var_names:
self.assertNotIn("dropout", name)
# Make sure the checkpoint can be loaded
clone_model = keras.models.clone_model(model)
checkpoint = tf.train.Checkpoint(clone_model)
status = checkpoint.restore(
os.path.join(self.get_temp_dir(), "checkpoint-1")
)
self.assertTrue(status.assert_consumed())
self.assertTrue(status.assert_existing_objects_matched())
# Make sure the output is differnt from the original model, since
# the StateVar is not preserved.
train3 = clone_model(np.ones((20, 5, 10)), training=True)
self.assertNotAllClose(train3, train2)
@test_utils.run_v2_only
def test_state_variable_name(self):
inputs = keras.Input(shape=(5, 10))
layer = keras.layers.Dropout(
0.5, force_generator=True, name="dropout_layer"
)
layer(inputs)
self.assertEqual(
layer._random_generator._generator._state_var.name,
"dropout_layer/StateVar:0",
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/regularization/dropout_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/regularization/dropout_test.py",
"repo_id": "tf-keras",
"token_count": 2909
} | 244 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for flatten layer."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
class FlattenTest(test_combinations.TestCase):
def test_flatten(self):
test_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4)
)
# Test channels_first
inputs = np.random.random((10, 3, 5, 5)).astype("float32")
outputs = test_utils.layer_test(
keras.layers.Flatten,
kwargs={"data_format": "channels_first"},
input_data=inputs,
)
target_outputs = np.reshape(
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3)
)
self.assertAllClose(outputs, target_outputs)
def test_flatten_scalar_channels(self):
test_utils.layer_test(keras.layers.Flatten, kwargs={}, input_shape=(3,))
# Test channels_first
inputs = np.random.random((10,)).astype("float32")
outputs = test_utils.layer_test(
keras.layers.Flatten,
kwargs={"data_format": "channels_first"},
input_data=inputs,
)
target_outputs = np.expand_dims(inputs, -1)
self.assertAllClose(outputs, target_outputs)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/reshaping/flatten_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/reshaping/flatten_test.py",
"repo_id": "tf-keras",
"token_count": 819
} | 245 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras recurrent layers."""
import tensorflow.compat.v2 as tf
from tf_keras.layers.rnn.abstract_rnn_cell import AbstractRNNCell
# Recurrent layers.
from tf_keras.layers.rnn.base_rnn import RNN
from tf_keras.layers.rnn.simple_rnn import SimpleRNN
from tf_keras.layers.rnn.simple_rnn import SimpleRNNCell
from tf_keras.layers.rnn.stacked_rnn_cells import StackedRNNCells
if tf.__internal__.tf2.enabled():
from tf_keras.layers.rnn.gru import GRU
from tf_keras.layers.rnn.gru import GRUCell
from tf_keras.layers.rnn.gru_v1 import GRU as GRUV1
from tf_keras.layers.rnn.gru_v1 import GRUCell as GRUCellV1
from tf_keras.layers.rnn.lstm import LSTM
from tf_keras.layers.rnn.lstm import LSTMCell
from tf_keras.layers.rnn.lstm_v1 import LSTM as LSTMV1
from tf_keras.layers.rnn.lstm_v1 import LSTMCell as LSTMCellV1
GRUV2 = GRU
GRUCellV2 = GRUCell
LSTMV2 = LSTM
LSTMCellV2 = LSTMCell
else:
from tf_keras.layers.rnn.gru import GRU as GRUV2
from tf_keras.layers.rnn.gru import GRUCell as GRUCellV2
from tf_keras.layers.rnn.gru_v1 import GRU
from tf_keras.layers.rnn.gru_v1 import GRUCell
from tf_keras.layers.rnn.lstm import LSTM as LSTMV2
from tf_keras.layers.rnn.lstm import LSTMCell as LSTMCellV2
from tf_keras.layers.rnn.lstm_v1 import LSTM
from tf_keras.layers.rnn.lstm_v1 import LSTMCell
GRUV1 = GRU
GRUCellV1 = GRUCell
LSTMV1 = LSTM
LSTMCellV1 = LSTMCell
# Wrapper functions.
from tf_keras.layers.rnn.base_wrapper import Wrapper
from tf_keras.layers.rnn.bidirectional import Bidirectional
# RNN Cell wrappers.
from tf_keras.layers.rnn.cell_wrappers import DeviceWrapper
from tf_keras.layers.rnn.cell_wrappers import DropoutWrapper
from tf_keras.layers.rnn.cell_wrappers import ResidualWrapper
# Convolutional-recurrent layers.
from tf_keras.layers.rnn.conv_lstm1d import ConvLSTM1D
from tf_keras.layers.rnn.conv_lstm2d import ConvLSTM2D
from tf_keras.layers.rnn.conv_lstm3d import ConvLSTM3D
from tf_keras.layers.rnn.cudnn_gru import CuDNNGRU
# cuDNN recurrent layers.
from tf_keras.layers.rnn.cudnn_lstm import CuDNNLSTM
from tf_keras.layers.rnn.time_distributed import TimeDistributed
| tf-keras/tf_keras/layers/rnn/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/__init__.py",
"repo_id": "tf-keras",
"token_count": 1142
} | 246 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional recurrent layers."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
class ConvLSTM1DTest(test_combinations.TestCase):
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
data_format=["channels_first", "channels_last"],
return_sequences=[True, False],
)
)
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
filters = 3
num_samples = 1
input_channel = 2
input_num_row = 5
sequence_len = 2
if data_format == "channels_first":
inputs = np.random.rand(
num_samples, sequence_len, input_channel, input_num_row
)
else:
inputs = np.random.rand(
num_samples, sequence_len, input_num_row, input_channel
)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {
"data_format": data_format,
"return_sequences": return_sequences,
"return_state": True,
"stateful": True,
"filters": filters,
"kernel_size": num_row,
"padding": "valid",
}
layer = keras.layers.ConvLSTM1D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(
keras.backend.eval(layer.states[0]), state, atol=1e-4
)
# test for output shape:
test_utils.layer_test(
keras.layers.ConvLSTM1D,
kwargs={
"data_format": data_format,
"return_sequences": return_sequences,
"filters": filters,
"kernel_size": num_row,
"padding": "valid",
},
input_shape=inputs.shape,
)
@test_combinations.run_all_keras_modes
class ConvLSTM2DTest(test_combinations.TestCase):
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
data_format=["channels_first", "channels_last"],
return_sequences=[True, False],
)
)
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
if data_format == "channels_first":
inputs = np.random.rand(
num_samples,
sequence_len,
input_channel,
input_num_row,
input_num_col,
)
else:
inputs = np.random.rand(
num_samples,
sequence_len,
input_num_row,
input_num_col,
input_channel,
)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {
"data_format": data_format,
"return_sequences": return_sequences,
"return_state": True,
"stateful": True,
"filters": filters,
"kernel_size": (num_row, num_col),
"padding": "valid",
}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(
keras.backend.eval(layer.states[0]), state, atol=1e-4
)
# test for output shape:
test_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={
"data_format": data_format,
"return_sequences": return_sequences,
"filters": filters,
"kernel_size": (num_row, num_col),
"padding": "valid",
},
input_shape=inputs.shape,
)
def test_conv_lstm_statefulness(self):
# Tests for statefulness
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(
num_samples,
sequence_len,
input_num_row,
input_num_col,
input_channel,
)
with self.cached_session():
model = keras.models.Sequential()
kwargs = {
"data_format": "channels_last",
"return_sequences": False,
"filters": filters,
"kernel_size": (num_row, num_col),
"stateful": True,
"batch_input_shape": inputs.shape,
"padding": "same",
}
layer = keras.layers.ConvLSTM2D(**kwargs)
model.add(layer)
model.compile(optimizer="sgd", loss="mse")
out1 = model.predict(np.ones_like(inputs))
# train once so that the states change
model.train_on_batch(
np.ones_like(inputs), np.random.random(out1.shape)
)
out2 = model.predict(np.ones_like(inputs))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out3.max(), out2.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones_like(inputs))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out4.max(), out5.max())
def test_conv_lstm_regularizers(self):
# check regularizers
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(
num_samples,
sequence_len,
input_num_row,
input_num_col,
input_channel,
)
with self.cached_session():
kwargs = {
"data_format": "channels_last",
"return_sequences": False,
"kernel_size": (num_row, num_col),
"stateful": True,
"filters": filters,
"batch_input_shape": inputs.shape,
"kernel_regularizer": keras.regularizers.L1L2(l1=0.01),
"recurrent_regularizer": keras.regularizers.L1L2(l1=0.01),
"activity_regularizer": "l2",
"bias_regularizer": "l2",
"kernel_constraint": "max_norm",
"recurrent_constraint": "max_norm",
"bias_constraint": "max_norm",
"padding": "same",
}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones(inputs.shape)))
self.assertEqual(len(layer.losses), 4)
def test_conv_lstm_dropout(self):
# check dropout
with self.cached_session():
test_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={
"data_format": "channels_last",
"return_sequences": False,
"filters": 2,
"kernel_size": (3, 3),
"padding": "same",
"dropout": 0.1,
"recurrent_dropout": 0.1,
},
input_shape=(1, 2, 5, 5, 2),
)
def test_conv_lstm_cloning(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.ConvLSTM2D(5, 3, input_shape=(None, 5, 5, 3))
)
test_inputs = np.random.random((2, 4, 5, 5, 3))
reference_outputs = model.predict(test_inputs)
weights = model.get_weights()
# Use a new graph to clone the model
with self.cached_session():
clone = keras.models.clone_model(model)
clone.set_weights(weights)
outputs = clone.predict(test_inputs)
self.assertAllClose(reference_outputs, outputs, atol=1e-5)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Skipping the test as OOM occurred with 1 GB budget.",
)
def test_conv_lstm_with_initial_state(self):
num_samples = 32
sequence_len = 5
encoder_inputs = keras.layers.Input((None, 32, 32, 3))
encoder = keras.layers.ConvLSTM2D(
filters=32,
kernel_size=(3, 3),
padding="same",
return_sequences=False,
return_state=True,
)
_, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = keras.layers.Input((None, 32, 32, 4))
decoder_lstm = keras.layers.ConvLSTM2D(
filters=32,
kernel_size=(3, 3),
padding="same",
return_sequences=False,
return_state=False,
)
decoder_outputs = decoder_lstm(
decoder_inputs, initial_state=encoder_states
)
output = keras.layers.Conv2D(
1, (3, 3), padding="same", activation="relu"
)(decoder_outputs)
model = keras.Model([encoder_inputs, decoder_inputs], output)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
x_1 = np.random.rand(num_samples, sequence_len, 32, 32, 3)
x_2 = np.random.rand(num_samples, sequence_len, 32, 32, 4)
y = np.random.rand(num_samples, 32, 32, 1)
model.fit([x_1, x_2], y)
model.predict([x_1, x_2])
@test_combinations.run_all_keras_modes
class ConvLSTM3DTest(test_combinations.TestCase):
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
data_format=["channels_first", "channels_last"],
return_sequences=[True, False],
)
)
def test_conv_lstm(self, data_format, return_sequences):
num_height = 3
num_width = 3
num_depth = 3
filters = 3
num_samples = 1
input_channel = 2
input_height = 5
input_width = 5
input_depth = 5
sequence_len = 2
if data_format == "channels_first":
inputs = np.random.rand(
num_samples,
sequence_len,
input_channel,
input_height,
input_width,
input_depth,
)
else:
inputs = np.random.rand(
num_samples,
sequence_len,
input_height,
input_width,
input_depth,
input_channel,
)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {
"data_format": data_format,
"return_sequences": return_sequences,
"return_state": True,
"stateful": True,
"filters": filters,
"kernel_size": (num_height, num_width, num_depth),
"padding": "same",
}
layer = keras.layers.ConvLSTM3D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(
keras.backend.eval(layer.states[0]), state, atol=1e-4
)
# test for output shape:
test_utils.layer_test(
keras.layers.ConvLSTM3D,
kwargs={
"data_format": data_format,
"return_sequences": return_sequences,
"filters": filters,
"kernel_size": (num_height, num_width, num_depth),
"padding": "valid",
},
input_shape=inputs.shape,
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/rnn/conv_lstm_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/conv_lstm_test.py",
"repo_id": "tf-keras",
"token_count": 7265
} | 247 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Long Short-Term Memory V1 layer."""
from tf_keras import activations
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.rnn import lstm
from tf_keras.layers.rnn import rnn_utils
from tf_keras.layers.rnn.base_rnn import RNN
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.layers.LSTMCell"])
class LSTMCell(lstm.LSTMCell):
"""Cell class for the LSTM layer.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="hard_sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
**kwargs
):
super().__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=kwargs.pop("implementation", 1),
**kwargs
)
@keras_export(v1=["keras.layers.LSTM"])
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="hard_sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs
):
implementation = kwargs.pop("implementation", 1)
if implementation == 0:
logging.warning(
"`implementation=0` has been deprecated, "
"and now defaults to `implementation=1`."
"Please update your layer call."
)
if "enable_caching_device" in kwargs:
cell_kwargs = {
"enable_caching_device": kwargs.pop("enable_caching_device")
}
else:
cell_kwargs = {}
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
dtype=kwargs.get("dtype"),
trainable=kwargs.get("trainable", True),
name="lstm_cell",
**cell_kwargs
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs
)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super().call(
inputs, mask=mask, training=training, initial_state=initial_state
)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"unit_forget_bias": self.unit_forget_bias,
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"implementation": self.implementation,
}
config.update(rnn_utils.config_for_enable_caching_device(self.cell))
base_config = super().get_config()
del base_config["cell"]
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if "implementation" in config and config["implementation"] == 0:
config["implementation"] = 1
return cls(**config)
| tf-keras/tf_keras/layers/rnn/lstm_v1.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/lstm_v1.py",
"repo_id": "tf-keras",
"token_count": 6344
} | 248 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the convolutional layer classes and their functional aliases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow.compat.v2 as tf
from tf_keras import layers as keras_layers
from tf_keras.legacy_tf_layers import base
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.__internal__.legacy.layers.Conv1D"])
class Conv1D(keras_layers.Conv1D, base.Layer):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
conv = tf.compat.v1.layers.Conv1D(filters=3, kernel_size=3)
```
After:
```python
conv = tf.keras.layers.Conv1D(filters=3, kernels_size=3)
```
@end_compatibility
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.conv1d"])
def conv1d(
inputs,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for 1D convolution (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.conv1d(x, filters=3, kernel_size=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.Conv1D(filters=3, kernels_size=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.conv1d` is deprecated and "
"will be removed in a future version. "
"Please Use `tf.keras.layers.Conv1D` instead.",
stacklevel=2,
)
layer = Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.Conv2D"])
class Conv2D(keras_layers.Conv2D, base.Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
conv = tf.compat.v1.layers.Conv2D(filters=3, kernel_size=3)
```
After:
```python
conv = tf.keras.layers.Conv2D(filters=3, kernels_size=3)
```
@end_compatibility
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.conv2d"])
def conv2d(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for the 2D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.conv2d(x, filters=3, kernel_size=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.Conv2D(filters=3, kernels_size=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.conv2d` is deprecated and "
"will be removed in a future version. "
"Please Use `tf.keras.layers.Conv2D` instead.",
stacklevel=2,
)
layer = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.Conv3D"])
class Conv3D(keras_layers.Conv3D, base.Layer):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
conv = tf.compat.v1.layers.Conv3D(filters=3, kernel_size=3)
```
After:
```python
conv = tf.keras.layers.Conv3D(filters=3, kernels_size=3)
```
@end_compatibility
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.conv3d"])
def conv3d(
inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for the 3D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is `tf.keras.layers.Conv3D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.conv3d(x, filters=3, kernel_size=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.Conv3D(filters=3, kernels_size=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.conv3d` is deprecated and "
"will be removed in a future version. "
"Please Use `tf.keras.layers.Conv3D` instead.",
stacklevel=2,
)
layer = Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.SeparableConv1D"])
class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final
output.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution
kernel.
pointwise_initializer: An initializer for the pointwise convolution
kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
conv = tf.compat.v1.layers.SeparableConv1D(filters=3, kernel_size=3)
```
After:
```python
conv = tf.keras.layers.SeparableConv1D(filters=3, kernels_size=3)
```
@end_compatibility
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.SeparableConv2D"])
class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer):
"""Depthwise separable 2D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value
for all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution
kernel.
pointwise_initializer: An initializer for the pointwise convolution
kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
conv = tf.compat.v1.layers.SeparableConv2D(filters=3, kernel_size=3)
```
After:
```python
conv = tf.keras.layers.SeparableConv2D(filters=3, kernels_size=3)
```
@end_compatibility
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.separable_conv1d"])
def separable_conv1d(
inputs,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for the depthwise separable 1D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.
Args:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution
kernel.
pointwise_initializer: An initializer for the pointwise convolution
kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.separable_conv1d(x, filters=3, kernel_size=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.SeparableConv1D(filters=3, kernels_size=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.separable_conv1d` is deprecated and "
"will be removed in a future version. "
"Please Use `tf.keras.layers.SeparableConv1D` instead.",
stacklevel=2,
)
layer = SeparableConv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.separable_conv2d"])
def separable_conv2d(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for the depthwise separable 2D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.
Args:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value
for all spatial dimensions. Specifying any `stride` value != 1 is
incompatible with specifying any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution
kernel.
pointwise_initializer: An initializer for the pointwise convolution
kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.SeparableConv2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.separable_conv2d(x, filters=3, kernel_size=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.SeparableConv2D(filters=3, kernels_size=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.separable_conv2d` is deprecated and "
"will be removed in a future version. "
"Please Use `tf.keras.layers.SeparableConv2D` instead.",
stacklevel=2,
)
layer = SeparableConv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.Conv2DTranspose"])
class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer):
"""Transposed 2D convolution layer (sometimes called 2D Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value
for all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv2DTranspose`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
conv = tf.compat.v1.layers.Conv2DTranspose(filters=3, kernel_size=3)
```
After:
```python
conv = tf.keras.layers.Conv2DTranspose(filters=3, kernels_size=3)
```
@end_compatibility
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format="channels_last",
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.conv2d_transpose"])
def conv2d_transpose(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format="channels_last",
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for transposed 2D convolution layer.
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Args:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value
for all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to `None` to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If `None`, the
default initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv2DTranspose`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.conv2d_transpose(x, filters=3, kernel_size=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.Conv2DTranspose(filters=3, kernels_size=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.conv2d_transpose` is deprecated and "
"will be removed in a future version. "
"Please Use `tf.keras.layers.Conv2DTranspose` instead.",
stacklevel=2,
)
layer = Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.Conv3DTranspose"])
class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer):
"""Transposed 3D convolution layer (sometimes called 3D Deconvolution).
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for all spatial
dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides
of the convolution along the depth, height and width.
Can be a single integer to specify the same value for all spatial
dimensions.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
activation: Activation function. Set it to `None` to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If `None`, the
default initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv3DTranspose`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
conv = tf.compat.v1.layers.Conv3DTranspose(filters=3, kernel_size=3)
```
After:
```python
conv = tf.keras.layers.Conv3DTranspose(filters=3, kernels_size=3)
```
@end_compatibility
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format="channels_last",
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.conv3d_transpose"])
def conv3d_transpose(
inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format="channels_last",
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.compat.v1.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None,
):
"""Functional interface for transposed 3D convolution layer.
Args:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 3 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 3 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value
for all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.Conv3DTranspose`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.conv3d_transpose(x, filters=3, kernel_size=3)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.Conv3DTranspose(filters=3, kernels_size=3)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.conv3d_transpose` is deprecated and "
"will be removed in a future version. "
"Please Use `tf.keras.layers.Conv3DTranspose` instead.",
stacklevel=2,
)
layer = Conv3DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name,
)
return layer(inputs)
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose
Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose
convolution1d = conv1d
convolution2d = conv2d
convolution3d = conv3d
separable_convolution2d = separable_conv2d
convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose
convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose
| tf-keras/tf_keras/legacy_tf_layers/convolutional.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/convolutional.py",
"repo_id": "tf-keras",
"token_count": 30954
} | 249 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Accuracy metrics."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.dtensor import utils as dtensor_utils
from tf_keras.metrics import base_metric
from tf_keras.utils import metrics_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.metrics.Accuracy")
class Accuracy(base_metric.MeanMetricWrapper):
"""Calculates how often predictions equal labels.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result().numpy()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
... sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Accuracy()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="accuracy", dtype=None):
super().__init__(accuracy, name, dtype=dtype)
@keras_export("keras.metrics.BinaryAccuracy")
class BinaryAccuracy(base_metric.MeanMetricWrapper):
"""Calculates how often predictions match binary labels.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `binary accuracy`: an idempotent
operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
Standalone usage:
>>> m = tf.keras.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result().numpy()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryAccuracy()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="binary_accuracy", dtype=None, threshold=0.5):
super().__init__(
metrics_utils.binary_matches, name, dtype=dtype, threshold=threshold
)
@keras_export("keras.metrics.CategoricalAccuracy")
class CategoricalAccuracy(base_metric.MeanMetricWrapper):
"""Calculates how often predictions match one-hot labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `categorical accuracy`: an idempotent
operation that simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities,
rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as
a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.CategoricalAccuracy()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="categorical_accuracy", dtype=None):
super().__init__(
lambda y_true, y_pred: metrics_utils.sparse_categorical_matches(
tf.math.argmax(y_true, axis=-1), y_pred
),
name,
dtype=dtype,
)
@keras_export("keras.metrics.SparseCategoricalAccuracy")
class SparseCategoricalAccuracy(base_metric.MeanMetricWrapper):
"""Calculates how often predictions match integer labels.
```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used
to compute the frequency with which `y_pred` matches `y_true`. This
frequency is ultimately returned as `sparse categorical accuracy`: an
idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="sparse_categorical_accuracy", dtype=None):
super().__init__(
metrics_utils.sparse_categorical_matches, name, dtype=dtype
)
_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING = """Accumulates metric statistics.
For sparse categorical metrics, the shapes of `y_true` and `y_pred` are
different.
Args:
y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or
shape = `[batch_size, d0, .. dN-1, 1]`.
y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`.
sample_weight: Optional `sample_weight` acts as a
coefficient for the metric. If a scalar is provided, then the metric is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the metric for each sample of the batch is rescaled
by the corresponding element in the `sample_weight` vector. If the shape
of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
to this shape), then each metric element of `y_pred` is scaled by the
corresponding value of `sample_weight`. (Note on `dN-1`: all metric
functions reduce by 1 dimension, usually the last axis (-1)).
Returns:
Update op.
"""
SparseCategoricalAccuracy.update_state.__doc__ = (
_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING
)
@keras_export("keras.metrics.TopKCategoricalAccuracy")
class TopKCategoricalAccuracy(base_metric.MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, k=5, name="top_k_categorical_accuracy", dtype=None):
super().__init__(
lambda yt, yp, k: metrics_utils.sparse_top_k_categorical_matches(
tf.math.argmax(yt, axis=-1), yp, k
),
name,
dtype=dtype,
k=k,
)
@keras_export("keras.metrics.SparseTopKCategoricalAccuracy")
class SparseTopKCategoricalAccuracy(base_metric.MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
@dtensor_utils.inject_mesh
def __init__(
self, k=5, name="sparse_top_k_categorical_accuracy", dtype=None
):
super().__init__(
metrics_utils.sparse_top_k_categorical_matches,
name,
dtype=dtype,
k=k,
)
SparseTopKCategoricalAccuracy.update_state.__doc__ = (
_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING
)
def accuracy(y_true, y_pred):
[
y_pred,
y_true,
], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true]
)
y_true.shape.assert_is_compatible_with(y_pred.shape)
if y_true.dtype != y_pred.dtype:
y_pred = tf.cast(y_pred, y_true.dtype)
return tf.cast(tf.equal(y_true, y_pred), backend.floatx())
@keras_export("keras.metrics.binary_accuracy")
@tf.__internal__.dispatch.add_dispatch_support
def binary_accuracy(y_true, y_pred, threshold=0.5):
"""Calculates how often predictions match binary labels.
Standalone usage:
>>> y_true = [[1], [1], [0], [0]]
>>> y_pred = [[1], [1], [0], [0]]
>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
>>> assert m.shape == (4,)
>>> m.numpy()
array([1., 1., 1., 1.], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
Returns:
Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
"""
# Note: calls metrics_utils.binary_matches with mean reduction. This
# maintains public facing binary_accuracy behavior and seperates it from the
# vital behavior of the binary_matches method needed in backend
# dependencies.
return tf.reduce_mean(
metrics_utils.binary_matches(y_true, y_pred, threshold), axis=-1
)
@keras_export("keras.metrics.categorical_accuracy")
@tf.__internal__.dispatch.add_dispatch_support
def categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions match one-hot labels.
Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: One-hot ground truth values.
y_pred: The prediction values.
Returns:
Categorical accuracy values.
"""
# Note: wraps metrics_utils.categorical_matches. This seperates public
# facing categorical_accuracy behavior from the vital behavior of the
# categorical_matches method needed in backend dependencies.
return metrics_utils.sparse_categorical_matches(
tf.math.argmax(y_true, axis=-1), y_pred
)
@keras_export("keras.metrics.sparse_categorical_accuracy")
@tf.__internal__.dispatch.add_dispatch_support
def sparse_categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions match integer labels.
Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: Integer ground truth values.
y_pred: The prediction values.
Returns:
Sparse categorical accuracy values.
"""
# Note: wraps metrics_utils.sparse_categorical_matches method and checks for
# squeezing to align with expected public facing behavior. This seperates
# public facing sparse_categorical_accuracy behavior from the vital behavior
# of the sparse_categorical_matches method needed in backend dependencies.
matches = metrics_utils.sparse_categorical_matches(y_true, y_pred)
# if shape is (num_samples, 1) squeeze
if matches.shape.ndims > 1 and matches.shape[-1] == 1:
matches = tf.squeeze(matches, [-1])
return matches
@keras_export("keras.metrics.top_k_categorical_accuracy")
@tf.__internal__.dispatch.add_dispatch_support
def top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often targets are in the top `K` predictions.
Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)
Args:
y_true: The ground truth values.
y_pred: The prediction values.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
Returns:
Top K categorical accuracy value.
"""
# Note: wraps metrics_utils.top_k_categorical_matches. This seperates
# public facing top_k_categorical_accuracy behavior from the vital behavior
# of the top_k_categorical_matches method needed in backend dependencies.
return metrics_utils.sparse_top_k_categorical_matches(
tf.math.argmax(y_true, axis=-1), y_pred, k
)
@keras_export("keras.metrics.sparse_top_k_categorical_accuracy")
@tf.__internal__.dispatch.add_dispatch_support
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often integer targets are in the top `K` predictions.
Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
... y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
Returns:
Sparse top K categorical accuracy value.
"""
# Note: wraps metrics_utils.sparse_top_k_categorical_matches. This seperates
# public facing sparse_top_k_categorical_accuracy behavior from the vital
# behavior of the sparse_top_k_categorical_matches method needed in backend
# dependencies.
return metrics_utils.sparse_top_k_categorical_matches(y_true, y_pred, k)
| tf-keras/tf_keras/metrics/accuracy_metrics.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/accuracy_metrics.py",
"repo_id": "tf-keras",
"token_count": 7108
} | 250 |
# Copyright 2023 The TF-Keras Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for Python-based metrics"""
import types
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
from tf_keras.metrics import base_metric
@keras_export("keras.metrics.experimental.PyMetric", v1=[])
class PyMetric(base_metric.Metric):
"""Metric which runs in Python, compiled outside of the TensorFlow graph.
Args:
name: (Optional) string name of the PyMetric instance.
dtype: (Optional) data type of the PyMetric result.
**kwargs: Additional layer keywords arguments.
Usage of `PyMetric` is generally identical to `keras.metrics.Metric`.
It can be used in isolation, or in tandem with the `compile()` API. For more
information about the usage of `PyMetric`, see `keras.metrics.Metric`.
Unlike regular metrics, `PyMetric` instances are outside-compiled
with respect to the TensorFlow graph during training or evaluation.
They have access to the same
inputs of a standard in-graph metric, but they run in a Python interpreter
on the host CPU. Any data stored in a `PyMetric` is located on the main
memory of the host CPU, and any TensorFlow ops used in a PyMetric are
run eagerly on the host CPU.
As a result, `PyMetric` instances are generally not as performant
as in-graph metrics, and should only be used in cases where computing
the metric inside of the TensorFlow graph is either impossible
or prohibitively expensive.
**Note:** Due to the use of `tf.py_function`, PyMetrics
are incompatible with XLA and therefore TPUs.
Methods to be implemented by subclasses:
* `update_state()`: Handles updates to internal state variables
* `result()`: Computes and returns a scalar value or a dict of scalar values
for the metric from the state variables.
* `reset_state()`: Computes and returns a scalar value for the metric from
the state variables.
This subclass implementation is similar to that of `keras.metrics.Metric`,
with two notable differences:
* Inputs to `update_state()` in a `PyMetric` are eager tensors, and both
`update_state()` and `result()` run outside of the TensorFlow graph,
executing any TensorFlow ops eagerly.
* `reset_state()` is also called at initialization time to initialize the
Python state of the metric.
* `result()` can only return a single scalar. It does not support returning
a dictionary of results like `keras.metrics.Metric`.
Example subclass implementation using sklearn's Jaccard Score:
```python
from sklearn.metrics import jaccard_score
import tensorflow as tf
class JaccardScore(tf.keras.metrics.experimental.PyMetric):
def __init__(self, name='jaccard_score', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
self.jaccard_sum += jaccard_score(y_pred, y_true, average="macro")
self.count += 1
def reset_state(self):
self.jaccard_sum = 0.
self.count = 0.
def result(self):
return self.jaccard_sum / self.count
```
"""
def __init__(self, name=None, dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype, **kwargs)
self.reset_state()
def __new__(cls, *args, **kwargs):
obj = super(base_metric.Metric, cls).__new__(cls)
# Wrap the update_state function in a py_function and scope it to /cpu:0
obj_update_state = obj.update_state
def update_state_on_cpu(y_true, y_pred, sample_weight=None):
with tf.device("/cpu:0"):
return obj_update_state(y_true, y_pred, sample_weight)
obj.update_state_on_cpu = update_state_on_cpu
def update_state_fn(self, y_true, y_pred, sample_weight=None):
eager_inputs = [y_true, y_pred]
if sample_weight is not None:
eager_inputs.append(sample_weight)
return tf.py_function(
func=self.update_state_on_cpu, inp=eager_inputs, Tout=[]
)
obj.update_state = types.MethodType(update_state_fn, obj)
# Wrap the result function in a py_function and scope it to /cpu:0
obj_result = obj.result
def result_on_host_cpu():
with tf.device("/cpu:0"):
return obj_result()
obj.result_on_host_cpu = result_on_host_cpu
def result_fn(self):
return tf.py_function(
self.result_on_host_cpu, inp=[], Tout=obj.dtype
)
obj.result = types.MethodType(result_fn, obj)
return obj
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates statistics for the metric.
**Note:** This function is executed outside of the TensorFlow graph
on the CPU host.
This means:
a) Inputs are eager tensors.
b) Any TensorFlow ops run in this method are run eagerly.
c) Any Tensors created are allocated to the CPU's main memory.
Args:
y_true: Target output
y_pred: Predicted output
sample_weight: (Optional) weights for the individual samples in
`y_true` and `y_pred`
"""
raise NotImplementedError("Subclasses should implement `update_state`")
def merge_state(self, metrics):
"""Merges the state from one or more metrics.
`PyMetric` instances that intend to support merging state must override
this method, as the default implementation
in `keras.metrics.Metric` does not apply to `PyMetric`.
"""
raise NotImplementedError("Subclasses should implement `merge_state`")
def reset_state(self):
"""Resets all of the metric state variables.
This function is called between epochs when a metric is evaluated during
training. It's also called when the metric is initialized.
"""
raise NotImplementedError("Subclasses should implement `reset_state`")
def result(self):
"""Computes and returns the scalar metric value.
**Note:** This function is executed outside of the TensorFlow graph
on the CPU host. This means any TensorFlow ops run in this method
are run eagerly.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
Returns:
A Python scalar.
"""
raise NotImplementedError("Subclasses should implement `result`")
| tf-keras/tf_keras/metrics/py_metric.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/py_metric.py",
"repo_id": "tf-keras",
"token_count": 2624
} | 251 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
import contextlib
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer_utils
from tf_keras.mixed_precision import device_compatibility_check
from tf_keras.mixed_precision import loss_scale_optimizer
from tf_keras.saving import serialization_lib
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.mixed_precision.Policy", v1=[])
class Policy:
"""A dtype policy for a TF-Keras layer.
A dtype policy determines a layer's computation and variable dtypes. Each
layer has a policy. Policies can be passed to the `dtype` argument of layer
constructors, or a global policy can be set with
`tf.keras.mixed_precision.set_global_policy`.
Args:
name: The policy name, which determines the compute and variable dtypes.
Can be any dtype name, such as `'float32'` or `'float64'`, which causes
both the compute and variable dtypes will be that dtype. Can also be the
string `'mixed_float16'` or `'mixed_bfloat16'`, which causes the compute
dtype to be float16 or bfloat16 and the variable dtype to be float32.
Typically you only need to interact with dtype policies when using mixed
precision, which is the use of float16 or bfloat16 for computations and
float32 for variables. This is why the term `mixed_precision` appears in the
API name. Mixed precision can be enabled by passing `'mixed_float16'` or
`'mixed_bfloat16'` to `tf.keras.mixed_precision.set_global_policy`. See [the
mixed precision
guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
information on how to use mixed precision.
>>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
>>> layer1 = tf.keras.layers.Dense(10)
>>> layer1.dtype_policy # `layer1` will automatically use mixed precision
<Policy "mixed_float16">
>>> # Can optionally override layer to use float32
>>> # instead of mixed precision.
>>> layer2 = tf.keras.layers.Dense(10, dtype='float32')
>>> layer2.dtype_policy
<Policy "float32">
>>> # Set policy back to initial float32 for future examples.
>>> tf.keras.mixed_precision.set_global_policy('float32')
In the example above, passing `dtype='float32'` to the layer is equivalent
to passing `dtype=tf.keras.mixed_precision.Policy('float32')`. In general,
passing a dtype policy name to a layer is equivalent to passing the
corresponding policy, so it is never necessary to explicitly construct a
`Policy` object.
Note: `Model.compile` will automatically wrap an optimizer with a
`tf.keras.mixed_precision.LossScaleOptimizer` if you use the
`'mixed_float16'` policy. If you use a custom training loop instead of
calling `Model.compile`, you should explicitly use a
`tf.keras.mixed_precision.LossScaleOptimizer` to avoid numeric underflow
with float16.
### How a layer uses its policy's compute dtype
A layer casts its inputs to its compute dtype. This causes the layer's
computations and output to also be in the compute dtype. For example:
>>> x = tf.ones((4, 4, 4, 4), dtype='float64')
>>> # `layer`'s policy defaults to float32.
>>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
>>> layer.compute_dtype # Equivalent to layer.dtype_policy.compute_dtype
'float32'
>>> # `layer` casts its inputs to its compute dtype and does computations in
>>> # that dtype.
>>> y = layer(x)
>>> y.dtype
tf.float32
Note that the base `tf.keras.layers.Layer` class inserts the casts. If
subclassing your own layer, you do not have to insert any casts.
Currently, only tensors in the first argument to the layer's `call` method
are casted (although this will likely be changed in a future minor release).
For example:
>>> class MyLayer(tf.keras.layers.Layer):
... # Bug! `b` will not be casted.
... def call(self, a, b):
... return a + 1., b + 1.
>>> a = tf.constant(1., dtype="float32")
>>> b = tf.constant(1., dtype="float32")
>>> layer = MyLayer(dtype="float64")
>>> x, y = layer(a, b)
>>> x.dtype
tf.float64
>>> y.dtype
tf.float32
If writing your own layer with multiple inputs, you should either explicitly
cast other tensors to `self.compute_dtype` in `call` or accept all tensors
in the first argument as a list.
The casting only occurs in TensorFlow 2. If
`tf.compat.v1.disable_v2_behavior()` has been called, you can enable the
casting behavior with
`tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`.
### How a layer uses its policy's variable dtype
The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
is the layer's policy's variable dtype.
If a layer's compute and variable dtypes differ, `add_weight` will wrap
floating-point variables with a special wrapper called an
`AutoCastVariable`. `AutoCastVariable` is identical to the original
variable except it casts itself to the layer's compute dtype when used
within `Layer.call`. This means if you are writing a layer, you do not have
to explicitly cast the variables to the layer's compute dtype. For example:
>>> class SimpleDense(tf.keras.layers.Layer):
...
... def build(self, input_shape):
... # With mixed precision, self.kernel is a float32 AutoCastVariable
... self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
...
... def call(self, inputs):
... # With mixed precision, self.kernel will be casted to float16
... return tf.linalg.matmul(inputs, self.kernel)
...
>>> layer = SimpleDense(dtype='mixed_float16')
>>> y = layer(tf.ones((10, 10)))
>>> y.dtype
tf.float16
>>> layer.kernel.dtype
tf.float32
A layer author can prevent a variable from being wrapped with an
`AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`,
which is useful if the float32 value of the variable must be accessed within
the layer.
### How to write a layer that supports mixed precision and float64.
For the most part, layers will automatically support mixed precision and
float64 without any additional work, due to the fact the base layer
automatically casts inputs, creates variables of the correct type, and in
the case of mixed precision, wraps variables with `AutoCastVariables`.
The primary case where you need extra work to support mixed precision or
float64 is when you create a new tensor, such as with `tf.ones` or
`tf.random.normal`, In such cases, you must create the tensor of the correct
dtype. For example, if you call `tf.random.normal`, you must pass the
compute dtype, which is the dtype the inputs have been casted to:
>>> class AddRandom(tf.keras.layers.Layer):
...
... def call(self, inputs):
... # We must pass `dtype=inputs.dtype`, otherwise a TypeError may
... # occur when adding `inputs` to `rand`.
... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
... return inputs + rand
>>> layer = AddRandom(dtype='mixed_float16')
>>> y = layer(x)
>>> y.dtype
tf.float16
If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a
`TypeError` would have occurred. This is because the `tf.random.normal`'s
dtype defaults to `"float32"`, but the input dtype is float16. You cannot
add a float32 tensor with a float16 tensor.
"""
def __init__(self, name):
if isinstance(name, tf.DType):
raise TypeError(
"'name' must be a string, not a DType. "
f"Instead, pass DType.name. Received: name={name.name}"
)
elif not isinstance(name, str):
raise TypeError(f"'name' must be a string, but got: {name}")
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if name in ("mixed_float16", "mixed_bfloat16"):
device_compatibility_check.log_device_compatibility_check(name)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name.endswith("_float32_vars"):
error_msg = (
"Policies ending in '_float32_vars' have been removed "
"from TensorFlow."
)
if name in ("infer_float32_vars", "infer_with_float32_vars"):
error_msg += (
" Please use the 'mixed_float16' or 'mixed_bfloat16' "
"policy instead."
)
elif name == "float16_with_float32_vars":
error_msg += " Please use the 'mixed_float16' policy instead."
elif name == "bfloat16_with_float32_vars":
error_msg += " Please use the 'mixed_bfloat16' policy instead."
error_msg += f" Got policy name: '{name}'"
raise ValueError(error_msg)
if name == "mixed_float16":
return "float16", "float32"
elif name == "mixed_bfloat16":
return "bfloat16", "float32"
elif name == "_infer":
# The "_infer" policy exists only for compatibility with TF 1, where
# "_infer" is the default. The behavior matches the behavior of TF
# 1's behavior before policies were introduced. With "_infer", the
# computation and variable dtype are inferred from the first input
# the first time the layer is called. Once the layer is called for
# the first time, the layer's policy will change to the dtype of the
# first input, and it will no longer have the "_infer" policy.
#
# The infer policy should be considered an implementation detail and
# may be removed in the future.
return None, None
try:
dtype = tf.as_dtype(name).name
except TypeError:
raise ValueError(
f"Cannot convert value {name} to a mixed precision Policy. "
"Valid policies include 'mixed_float16', 'mixed_bfloat16', "
"and the name of any dtype such as 'float32'."
)
return dtype, dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype
to avoid type errors.
Variable regularizers are run in the variable dtype, not the compute
dtype.
Returns:
The variable dtype of this policy, as a string.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in. Typically layers
output tensors with the compute dtype as well.
Note that even if the compute dtype is float16 or bfloat16, hardware
devices may not do individual adds, multiplies, and other fundamental
operations in float16 or bfloat16, but instead may do some of them in
float32 for numeric stability. The compute dtype is the dtype of the
inputs and outputs of the TensorFlow ops that the layer executes.
Internally, many TensorFlow ops will do certain internal calculations in
float32 or some other device-internal intermediate format with higher
precision than float16/bfloat16, to increase numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to `tf.linalg.matmul`.
But, `tf.linalg.matmul` will do use float32 intermediate math. The
performance benefit of float16 is still apparent, due to increased
memory bandwidth and the fact modern GPUs have specialized hardware for
computing matmuls on float16 inputs while still keeping intermediate
computations in float32.
Returns:
The compute dtype of this policy, as a string.
"""
return self._compute_dtype
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return f'<Policy "{self._name}">'
def get_config(self):
return {"name": self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
del custom_objects
if "loss_scale" in config:
config = config.copy()
# Policy.get_config in TensorFlow 2.3 and below had a loss_scale. We
# silently drop it.
del config["loss_scale"]
return cls(**config)
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "_infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export("keras.mixed_precision.global_policy", v1=[])
def global_policy():
"""Returns the global dtype policy.
The global policy is the default `tf.keras.mixed_precision.Policy` used for
layers, if no policy is passed to the layer constructor. If no policy has
been set with `keras.mixed_precision.set_global_policy`, this will return a
policy constructed from `tf.keras.backend.floatx()` (floatx defaults to
float32).
>>> tf.keras.mixed_precision.global_policy()
<Policy "float32">
>>> tf.keras.layers.Dense(10).dtype_policy # Defaults to the global policy
<Policy "float32">
If TensorFlow 2 behavior has been disabled with
`tf.compat.v1.disable_v2_behavior()`, this will instead return a special
"_infer" policy which infers the dtype from the dtype of the first input the
first time the layer is called. This behavior matches the behavior that
existed in TensorFlow 1.
See `tf.keras.mixed_precision.Policy` for more information on policies.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy("_infer")
return _global_policy
def _check_if_mixed_precision_graph_rewrite_is_enabled(policy):
if tf.__internal__.train.is_mixed_precision_graph_rewrite_enabled():
raise ValueError(
'The global dtype policy cannot be set to "{policy.name}", because '
"the mixed precision graph rewrite has already been enabled.\n"
"At most, one of the following can be called:\n\n"
" 1. tf.compat.v1.train.enable_mixed_precision_graph_rewrite() "
"(You called this first)\n"
" 2. tf.keras.mixed_precision.set_global_policy() with a mixed "
"precision policy (You called this second)\n\n"
"You called both functions, which is an error, because both "
"functions enable you to use mixed precision. If in doubt which "
"function to use, use the second, as it supports Eager execution "
"and is more customizable.".format(policy=policy)
)
@keras_export("keras.mixed_precision.set_global_policy", v1=[])
def set_global_policy(policy):
"""Sets the global dtype policy.
The global policy is the default `tf.keras.mixed_precision.Policy` used for
layers, if no policy is passed to the layer constructor.
>>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
>>> tf.keras.mixed_precision.global_policy()
<Policy "mixed_float16">
>>> tf.keras.layers.Dense(10).dtype_policy
<Policy "mixed_float16">
>>> # Global policy is not used if a policy
>>> # is directly passed to constructor
>>> tf.keras.layers.Dense(10, dtype='float64').dtype_policy
<Policy "float64">
>>> tf.keras.mixed_precision.set_global_policy('float32')
If no global policy is set, layers will instead default to a Policy
constructed from `tf.keras.backend.floatx()`.
To use mixed precision, the global policy should be set to `'mixed_float16'`
or `'mixed_bfloat16'`, so that every layer uses a 16-bit compute dtype and
float32 variable dtype by default.
Only floating point policies can be set as the global policy, such as
`'float32'` and `'mixed_float16'`. Non-floating point policies such as
`'int32'` and `'complex64'` cannot be set as the global policy because most
layers do not support such policies.
See `tf.keras.mixed_precision.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy. Can also
be None, in which case the global policy will be constructed from
`tf.keras.backend.floatx()`
"""
global _global_policy
if not base_layer_utils.v2_dtype_behavior_enabled():
raise ValueError(
"The global policy can only be set in TensorFlow 2 or if "
"V2 dtype behavior has been set. To enable V2 dtype "
"behavior, call "
'"tf.compat.v1.keras.layers.enable_v2_dtype_behavior()"'
)
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
is_mixed_policy = (
policy is not None and policy.compute_dtype != policy.variable_dtype
)
if is_mixed_policy:
_check_if_mixed_precision_graph_rewrite_is_enabled(policy)
if (
policy is not None
and policy.compute_dtype is not None
and not tf.as_dtype(policy.compute_dtype).is_floating
):
raise ValueError(
"set_global_policy can only be used to set the global "
'policy to floating-point policies, such as "float32" and '
f'"mixed_float16", but got policy: {policy.name}'
)
_global_policy = policy
tf.__internal__.train.set_using_mixed_precision_policy(is_mixed_policy)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_global_policy(policy)
yield
finally:
set_global_policy(old_policy)
def get_policy(identifier):
if isinstance(identifier, Policy):
dtype_policy = identifier
elif isinstance(identifier, dict):
dtype_policy = deserialize(identifier)
elif isinstance(identifier, str) and identifier in (
"mixed_float16",
"mixed_bfloat16",
):
# The isinstance check is required since np.dtype raises an error if
# compared to a non-dtype string.
dtype_policy = Policy(identifier)
elif identifier:
dtype_policy = Policy(tf.as_dtype(identifier).name)
else:
dtype_policy = global_policy()
if (
dtype_policy.name == "mixed_float16"
and not loss_scale_optimizer.strategy_supports_loss_scaling()
):
# Although only loss scaling doesn't support certain strategies, to
# avoid confusion, we disallow the 'mixed_float16' policy with
# unsupported strategies. This is because 'mixed_float16' requires
# loss scaling for numeric stability.
strategy = tf.distribute.get_strategy()
raise ValueError(
"Mixed precision is not supported with the "
f"tf.distribute.Strategy: {strategy.__class__.__name__}. "
"Either stop using mixed precision by removing the use of "
f"the {dtype_policy.name} policy or "
"use a different Strategy, e.g. a MirroredStrategy."
)
return dtype_policy
def _is_convertible_to_dtype(dtype):
try:
tf.as_dtype(dtype)
return True
except TypeError:
return False
def _policy_equivalent_to_dtype(policy):
"""Returns True if the Policy is equivalent to a single dtype.
A policy is equivalent to a single dtype if the policy's compute and
variable dtypes are the same and the policy's type is Policy and not a
subclass of Policy.
The "_infer" policy is considered equivalent to a single dtype.
Args:
policy: A Policy.
Returns:
True, if the policy is equivalent to a single dtype.
"""
# We use type() instead of isinstance because a subclass of Policy is never
# equivalent to a dtype.
return type(policy) == Policy and ( # noqa: E721
policy.name == "_infer" or _is_convertible_to_dtype(policy.name)
)
def serialize(policy):
if _policy_equivalent_to_dtype(policy):
# We return either None or the policy name for compatibility with older
# versions of TF-Keras. If the policy name is returned, it is a dtype
# string such as 'float32'.
return None if policy.name == "_infer" else policy.name
return serialization_lib.serialize_keras_object(policy)
def deserialize(config, custom_objects=None):
if isinstance(config, str) and _is_convertible_to_dtype(config):
return Policy(config)
if config is None:
return Policy("_infer")
# PolicyV1 was an old version of Policy that was removed. Deserializing it
# turns it into a (non-V1) Policy.
module_objects = {"Policy": Policy, "PolicyV1": Policy}
return serialization_lib.deserialize_keras_object(
config,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name="dtype policy",
)
| tf-keras/tf_keras/mixed_precision/policy.py/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/policy.py",
"repo_id": "tf-keras",
"token_count": 8424
} | 252 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sharpness Aware Minimization implementation."""
import copy
import tensorflow.compat.v2 as tf
from tf_keras.engine import data_adapter
from tf_keras.layers import deserialize as deserialize_layer
from tf_keras.models import Model
from tf_keras.saving.object_registration import register_keras_serializable
from tf_keras.saving.serialization_lib import serialize_keras_object
# isort: off
from tensorflow.python.util.tf_export import keras_export
@register_keras_serializable()
@keras_export("keras.models.experimental.SharpnessAwareMinimization", v1=[])
class SharpnessAwareMinimization(Model):
"""Sharpness aware minimization (SAM) training flow.
Sharpness-aware minimization (SAM) is a technique that improves the model
generalization and provides robustness to label noise. Mini-batch splitting
is proven to improve the SAM's performance, so users can control how mini
batches are split via setting the `num_batch_splits` argument.
Args:
model: `tf.keras.Model` instance. The inner model that does the
forward-backward pass.
rho: float. The gradients scaling factor. Defaults to `0.05`.
num_batch_splits: int. The number of mini batches to
split into from each data batch. If None, batches are not split into
sub-batches. Defaults to `None`.
name: string. The name of the SAM model. Defaults to `None`.
Reference:
[Pierre Foret et al., 2020](https://arxiv.org/abs/2010.01412)
"""
def __init__(self, model, rho=0.05, num_batch_splits=None, name=None):
super().__init__(name=name)
self.model = model
self.rho = rho
self.num_batch_splits = num_batch_splits
def train_step(self, data):
"""The logic of one SAM training step.
Args:
data: A nested structure of `Tensor`s. It should be of structure
(x, y, sample_weight) or (x, y).
Returns:
A dict mapping metric names to running average values.
"""
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
if self.num_batch_splits is not None:
x_split = tf.split(x, self.num_batch_splits)
y_split = tf.split(y, self.num_batch_splits)
else:
x_split = [x]
y_split = [y]
gradients_all_batches = []
pred_all_batches = []
for x_batch, y_batch in zip(x_split, y_split):
epsilon_w_cache = []
with tf.GradientTape() as tape:
pred = self.model(x_batch)
loss = self.compiled_loss(y_batch, pred)
pred_all_batches.append(pred)
trainable_variables = self.model.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
gradients_order2_norm = self._gradients_order2_norm(gradients)
scale = self.rho / (gradients_order2_norm + 1e-12)
for gradient, variable in zip(gradients, trainable_variables):
epsilon_w = gradient * scale
self._distributed_apply_epsilon_w(
variable, epsilon_w, tf.distribute.get_strategy()
)
epsilon_w_cache.append(epsilon_w)
with tf.GradientTape() as tape:
pred = self(x_batch)
loss = self.compiled_loss(y_batch, pred)
gradients = tape.gradient(loss, trainable_variables)
if len(gradients_all_batches) == 0:
for gradient in gradients:
gradients_all_batches.append([gradient])
else:
for gradient, gradient_all_batches in zip(
gradients, gradients_all_batches
):
gradient_all_batches.append(gradient)
for variable, epsilon_w in zip(
trainable_variables, epsilon_w_cache
):
# Restore the variable to its original value before
# `apply_gradients()`.
self._distributed_apply_epsilon_w(
variable, -epsilon_w, tf.distribute.get_strategy()
)
gradients = []
for gradient_all_batches in gradients_all_batches:
gradients.append(tf.reduce_sum(gradient_all_batches, axis=0))
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
pred = tf.concat(pred_all_batches, axis=0)
self.compiled_metrics.update_state(y, pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
def call(self, inputs):
"""Forward pass of SAM.
SAM delegates the forward pass call to the wrapped model.
Args:
inputs: Tensor. The model inputs.
Returns:
A Tensor, the outputs of the wrapped model for given `inputs`.
"""
return self.model(inputs)
def get_config(self):
config = super().get_config()
config.update(
{
"model": serialize_keras_object(self.model),
"rho": self.rho,
}
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
# Avoid mutating the input dict.
config = copy.deepcopy(config)
model = deserialize_layer(
config.pop("model"), custom_objects=custom_objects
)
config["model"] = model
return super().from_config(config, custom_objects)
def _distributed_apply_epsilon_w(self, var, epsilon_w, strategy):
# Helper function to apply epsilon_w on model variables.
if isinstance(
tf.distribute.get_strategy(),
(
tf.distribute.experimental.ParameterServerStrategy,
tf.distribute.experimental.CentralStorageStrategy,
),
):
# Under PSS and CSS, the AggregatingVariable has to be kept in sync.
def distribute_apply(strategy, var, epsilon_w):
strategy.extended.update(
var,
lambda x, y: x.assign_add(y),
args=(epsilon_w,),
group=False,
)
tf.__internal__.distribute.interim.maybe_merge_call(
distribute_apply, tf.distribute.get_strategy(), var, epsilon_w
)
else:
var.assign_add(epsilon_w)
def _gradients_order2_norm(self, gradients):
norm = tf.norm(
tf.stack([tf.norm(grad) for grad in gradients if grad is not None])
)
return norm
| tf-keras/tf_keras/models/sharpness_aware_minimization.py/0 | {
"file_path": "tf-keras/tf_keras/models/sharpness_aware_minimization.py",
"repo_id": "tf-keras",
"token_count": 3206
} | 253 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad optimizer implementation."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend_config
from tf_keras.optimizers.legacy import optimizer_v2
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.optimizers.legacy.Adagrad",
v1=["keras.optimizers.Adagrad", "keras.optimizers.legacy.Adagrad"],
)
class Adagrad(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Args:
learning_rate: Initial value for the learning rate:
either a floating point value,
or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
Note that `Adagrad` tends to benefit from higher initial learning rate
values compared to other optimizers.
To match the exact form in the original paper, use 1.0.
Defaults to `0.001`.
initial_accumulator_value: Floating point value.
Starting value for the accumulators (per-parameter momentum values).
Must be non-negative.
epsilon: Small floating point value used to maintain numerical stability.
name: Optional name prefix for the operations created when applying
gradients. Defaults to `"Adagrad"`.
**kwargs: keyword arguments. Allowed arguments are `clipvalue`,
`clipnorm`, `global_clipnorm`.
If `clipvalue` (float) is set, the gradient of each weight
is clipped to be no higher than this value.
If `clipnorm` (float) is set, the gradient of each weight
is individually clipped so that its norm is no higher than this value.
If `global_clipnorm` (float) is set the gradient of all weights is
clipped so that their global norm is no higher than this value..
Reference:
- [Duchi et al., 2011](
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
name="Adagrad",
**kwargs
):
if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value must be non-negative: %s"
% initial_accumulator_value
)
if epsilon is None:
epsilon = backend_config.epsilon()
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon or backend_config.epsilon()
def _create_slots(self, var_list):
for var in var_list:
dtype = var.dtype.base_dtype
init = tf.compat.v1.constant_initializer(
self._initial_accumulator_value, dtype=dtype
)
self.add_slot(var, "accumulator", init)
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)].update(
dict(
epsilon=tf.convert_to_tensor(self.epsilon, var_dtype),
neg_lr_t=-apply_state[(var_device, var_dtype)]["lr_t"],
zero=tf.zeros((), dtype=tf.int64),
)
)
def set_weights(self, weights):
params = self.weights
# Override set_weights for backward compatibility of TF-Keras V1
# optimizer since it does not include iteration at head of the weight
# list. Set iteration to 0.
if len(params) == len(weights) + 1:
weights = [np.array(0)] + weights
super().set_weights(weights)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional
Python objects used to create this optimizer, such as a function
used for a hyperparameter.
Returns:
An optimizer instance.
"""
if "initial_accumulator_value" not in config:
config["initial_accumulator_value"] = 0.1
if "lr" in config:
config["learning_rate"] = config.pop("lr")
return cls(**config)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
acc = self.get_slot(var, "accumulator")
return tf.raw_ops.ResourceApplyAdagradV2(
var=var.handle,
accum=acc.handle,
lr=coefficients["lr_t"],
epsilon=coefficients["epsilon"],
grad=grad,
use_locking=self._use_locking,
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
acc = self.get_slot(var, "accumulator")
return tf.raw_ops.ResourceSparseApplyAdagradV2(
var=var.handle,
accum=acc.handle,
lr=coefficients["lr_t"],
epsilon=coefficients["epsilon"],
grad=grad,
indices=indices,
use_locking=self._use_locking,
)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
"learning_rate"
),
"decay": self._initial_decay,
"initial_accumulator_value": self._initial_accumulator_value,
"epsilon": self.epsilon,
}
)
return config
| tf-keras/tf_keras/optimizers/legacy/adagrad.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/adagrad.py",
"repo_id": "tf-keras",
"token_count": 3015
} | 254 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
import functools
import tensorflow.compat.v2 as tf
from tf_keras.optimizers.schedules import learning_rate_schedule
# isort: off
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.exponential_decay"])
def exponential_decay(
learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None,
):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step / decay_steps` is
an integer division and the decayed learning rate follows a staircase
function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate,
global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python
number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must
be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate, decay_steps, decay_rate, staircase=staircase, name=name
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.piecewise_constant_decay", "train.piecewise_constant"])
def piecewise_constant(x, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.compat.v1.train.piecewise_constant(
global_step, boundaries, values)
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or `float`s or `int`s that specifies the
values for the intervals defined by `boundaries`. It should have one
more element than `boundaries`, and all elements should have the same
type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
Raises:
ValueError: if types of `x` and `boundaries` do not match, or types of all
`values` do not match or
the number of elements in the lists does not match.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
boundaries = tf.nest.map_structure(
tf.convert_to_tensor, tf.nest.flatten(boundaries)
)
values = tf.nest.map_structure(
tf.convert_to_tensor, tf.nest.flatten(values)
)
x_recomp = tf.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We can promote int32 boundaries to int64 without loss of
# precision. This covers the most common case where the user passes
# in boundaries as an array of Python integers.
if (
b.dtype.base_dtype == tf.int32
and x_recomp.dtype.base_dtype == tf.int64
):
b = tf.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
else:
raise ValueError(
f"`boundaries` ({b.dtype.base_dtype}) must have the same "
f"dtype as x ({x_recomp.dtype.base_dtype})."
)
for v in values[1:]:
if v.dtype.base_dtype != values[0].dtype.base_dtype:
raise ValueError(
"`values` must have elements all with the same dtype "
f"({values[0].dtype.base_dtype} vs {v.dtype.base_dtype})."
)
decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(
boundaries, values, name=name
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(x)
else:
decayed_lr = functools.partial(decayed_lr, x)
return decayed_lr
@tf_export(v1=["train.polynomial_decay"])
def polynomial_decay(
learning_rate,
global_step,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None,
):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training
step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate = tf.compat.v1.train.polynomial_decay(starter_learning_rate,
global_step,
decay_steps, end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python
number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must
be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python
number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a Python number. The
power of the polynomial. Defaults to `1.0`.
cycle: A boolean, whether it should cycle beyond decay_steps. Defaults to
`False`.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.PolynomialDecay(
learning_rate,
decay_steps,
end_learning_rate=end_learning_rate,
power=power,
cycle=cycle,
name=name,
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.natural_exp_decay"])
def natural_exp_decay(
learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None,
):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay
function to a provided initial learning rate. It requires an `global_step`
value to compute the decayed learning rate. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * \
floor(global_step / decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate,
global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python
number. The initial learning rate.
global_step: A Python number. Global step to use for the decay
computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
natural_exp_rate = tf.exp(tf.negative(decay_rate))
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate,
decay_steps,
natural_exp_rate,
staircase=staircase,
name=name,
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.inverse_time_decay"])
def inverse_time_decay(
learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None,
):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * \
floor(global_step / decay_step))
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate = tf.compat.v1.train.inverse_time_decay(learning_rate,
global_step,
decay_steps, decay_rate)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python
number. The initial learning rate.
global_step: A Python number. Global step to use for the decay
computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps, decay_rate, staircase=staircase, name=name
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.cosine_decay"])
def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None):
"""Applies cosine decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum
learning rate value as a fraction of learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
References:
Stochastic Gradient Descent with Warm Restarts:
[Loshchilov et al., 2017]
(https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx)
([pdf](https://openreview.net/pdf?id=Skq89Scxx))
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.CosineDecay(
learning_rate, decay_steps, alpha=alpha, name=name
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.cosine_decay_restarts"])
def cosine_decay_restarts(
learning_rate,
global_step,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None,
):
"""Applies cosine decay with restarts to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function with
restarts to a provided initial learning rate. It requires a `global_step`
value to compute the decayed learning rate. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns the decayed learning rate while taking into account
possible warm restarts. The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm restart is
performed. Each new warm restart runs for `t_mul` times more steps and with
`m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed = cosine_decay_restarts(learning_rate, global_step,
first_decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used
to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum
learning rate value as a fraction of the learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
References:
Stochastic Gradient Descent with Warm Restarts:
[Loshchilov et al., 2017]
(https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx)
([pdf](https://openreview.net/pdf?id=Skq89Scxx))
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.CosineDecayRestarts(
learning_rate,
first_decay_steps,
t_mul=t_mul,
m_mul=m_mul,
alpha=alpha,
name=name,
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.linear_cosine_decay"])
def linear_cosine_decay(
learning_rate,
global_step,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None,
):
"""Applies linear cosine decay to the learning rate.
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a linear cosine decay
function to a provided initial learning rate. It requires a `global_step`
value to compute the decayed learning rate. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = linear_cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay. See
computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
References:
Neural Optimizer Search with Reinforcement Learning:
[Bello et al., 2017](http://proceedings.mlr.press/v70/bello17a.html)
([pdf](http://proceedings.mlr.press/v70/bello17a/bello17a.pdf))
Stochastic Gradient Descent with Warm Restarts:
[Loshchilov et al., 2017]
(https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx)
([pdf](https://openreview.net/pdf?id=Skq89Scxx))
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.LinearCosineDecay(
learning_rate,
decay_steps,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name,
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.noisy_linear_cosine_decay"])
def noisy_linear_cosine_decay(
learning_rate,
global_step,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None,
):
"""Applies noisy linear cosine decay to the learning rate.
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a noisy linear
cosine decay function to a provided initial learning rate.
It requires a `global_step` value to compute the decayed learning rate.
You can just pass a TensorFlow variable that you increment at each
training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed = noisy_linear_cosine_decay(
learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay. See
computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
References:
Neural Optimizer Search with Reinforcement Learning:
[Bello et al., 2017](http://proceedings.mlr.press/v70/bello17a.html)
([pdf](http://proceedings.mlr.press/v70/bello17a/bello17a.pdf))
Stochastic Gradient Descent with Warm Restarts:
[Loshchilov et al., 2017]
(https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx)
([pdf](https://openreview.net/pdf?id=Skq89Scxx))
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for
changing the learning rate value across different invocations of optimizer
functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.NoisyLinearCosineDecay(
learning_rate,
decay_steps,
initial_variance=initial_variance,
variance_decay=variance_decay,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name,
)
if not tf.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
| tf-keras/tf_keras/optimizers/legacy_learning_rate_decay.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy_learning_rate_decay.py",
"repo_id": "tf-keras",
"token_count": 11364
} | 255 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer utilities."""
import tensorflow.compat.v2 as tf
# isort: off
from tensorflow.python.platform import tf_logging as logging
def all_reduce_sum_gradients(grads_and_vars):
"""Returns all-reduced gradients aggregated via summation.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
List of (gradient, variable) pairs where gradients have been all-reduced.
"""
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
if tf.__internal__.distribute.strategy_supports_no_merge_call():
grads = [pair[0] for pair in filtered_grads_and_vars]
reduced = tf.distribute.get_replica_context().all_reduce(
tf.distribute.ReduceOp.SUM, grads
)
else:
# TODO(b/183257003): Remove this branch
reduced = tf.distribute.get_replica_context().merge_call(
_all_reduce_sum_fn, args=(filtered_grads_and_vars,)
)
else:
reduced = []
# Copy 'reduced' but add None gradients back in
reduced_with_nones = []
reduced_pos = 0
for g, v in grads_and_vars:
if g is None:
reduced_with_nones.append((None, v))
else:
reduced_with_nones.append((reduced[reduced_pos], v))
reduced_pos += 1
assert reduced_pos == len(reduced), "Failed to add all gradients"
return reduced_with_nones
def filter_empty_gradients(grads_and_vars):
"""Filter out `(grad, var)` pairs that have a gradient equal to `None`."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
variable = ([v.name for _, v in grads_and_vars],)
raise ValueError(
f"No gradients provided for any variable: {variable}. "
f"Provided `grads_and_vars` is {grads_and_vars}."
)
if vars_with_empty_grads:
logging.warning(
"Gradients do not exist for variables %s when minimizing the "
"loss. If you're using `model.compile()`, did you forget to "
"provide a `loss` argument?",
([v.name for v in vars_with_empty_grads]),
)
return filtered
def make_gradient_clipnorm_fn(clipnorm):
"""Creates a gradient transformation function for clipping by norm."""
if clipnorm is None:
return lambda grads_and_vars: grads_and_vars
def gradient_clipnorm_fn(grads_and_vars):
if isinstance(
tf.distribute.get_strategy(),
(
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
),
):
raise ValueError(
"`clipnorm` is not supported with `CenteralStorageStrategy`. "
f"The strategy used is {tf.distribute.get_strategy()}."
)
clipped_grads_and_vars = [
(tf.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars
]
return clipped_grads_and_vars
return gradient_clipnorm_fn
def make_global_gradient_clipnorm_fn(clipnorm):
"""Creates a gradient transformation function for clipping by norm."""
if clipnorm is None:
return lambda grads_and_vars: grads_and_vars
def gradient_clipnorm_fn(grads_and_vars):
if isinstance(
tf.distribute.get_strategy(),
(
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
),
):
raise ValueError(
"`global_clipnorm` is not supported with "
"`CenteralStorageStrategy`. "
f"The strategy used is {tf.distribute.get_strategy()}."
)
grads, variables = zip(*grads_and_vars)
clipped_grads, _ = tf.clip_by_global_norm(grads, clipnorm)
clipped_grads_and_vars = list(zip(clipped_grads, variables))
return clipped_grads_and_vars
return gradient_clipnorm_fn
def make_gradient_clipvalue_fn(clipvalue):
"""Creates a gradient transformation function for clipping by value."""
if clipvalue is None:
return lambda grads_and_vars: grads_and_vars
def gradient_clipvalue_fn(grads_and_vars):
if isinstance(
tf.distribute.get_strategy(),
(
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
),
):
raise ValueError(
"`clipvalue` is not supported with `CenteralStorageStrategy`. "
f"The strategy used is {tf.distribute.get_strategy()}."
)
clipped_grads_and_vars = [
(tf.clip_by_value(g, -clipvalue, clipvalue), v)
for g, v in grads_and_vars
]
return clipped_grads_and_vars
return gradient_clipvalue_fn
def _all_reduce_sum_fn(distribution, grads_and_vars):
return distribution.extended.batch_reduce_to(
tf.distribute.ReduceOp.SUM, grads_and_vars
)
| tf-keras/tf_keras/optimizers/utils.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/utils.py",
"repo_id": "tf-keras",
"token_count": 2687
} | 256 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file is a copy of the TensorBoard ProjectorConfig proto.
// Keep this file in sync with the source proto definition at
// https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/projector/projector_config.proto
syntax = "proto3";
package third_party.py.tf_keras.protobuf;
message SpriteMetadata {
string image_path = 1;
// [width, height] of a single image in the sprite.
repeated uint32 single_image_dim = 2;
}
message EmbeddingInfo {
string tensor_name = 1;
string metadata_path = 2;
string bookmarks_path = 3;
// Shape of the 2D tensor [N x D]. If missing, it will be inferred from the
// model checkpoint.
repeated uint32 tensor_shape = 4;
SpriteMetadata sprite = 5;
// Path to the TSV file holding the tensor values. If missing, the tensor
// is assumed to be stored in the model checkpoint.
string tensor_path = 6;
}
message ProjectorConfig {
// Path to the checkpoint file. Use either this or model_checkpoint_dir.
string model_checkpoint_path = 1;
repeated EmbeddingInfo embeddings = 2;
// Path to the checkpoint directory. The directory will be scanned for the
// latest checkpoint file.
string model_checkpoint_dir = 3;
}
| tf-keras/tf_keras/protobuf/projector_config.proto/0 | {
"file_path": "tf-keras/tf_keras/protobuf/projector_config.proto",
"repo_id": "tf-keras",
"token_count": 518
} | 257 |
# TF-Keras SavedModel
For questions, feedback, and feature requests please file a bug/contact kathywu@
## TensorFlow Core SavedModel implementation
In TensorFlow 2.0, all saving and loading implementations revolve around the
object graph generated from a root trackable object, and all trackable objects
connected to it through attributes. Program building blocks such as variables,
assets, and tables, and high level objects like Optimizers and Layers all
subclass the trackable class. Other objects like TensorFlow functions and
concrete functions are also saved as nodes in the object graph. When loading a
SavedModel, the object graph is used to recreate the structure of the original
object.
Please see the links below for more details:
- [Saved Model Guide](https://www.tensorflow.org/guide/saved_model)
- [Checkpoint Guide](https://www.tensorflow.org/guide/checkpoint)
## TF-Keras SavedModel implementation
### Overview
Keras object serialization is built on top of the core serialization.
All attributes that impact model execution or inspection are saved to the
SavedModel to allow the model to be recreated. These attributes are divided into
three categories:
1. python properties (e.g., layer name, layer config)
2. objects (e.g. data structures like list of variables or layers)
3. functions (e.g. call function, loss functions)
Trackable objects and TensorFlow functions are represented as nodes in the
trackable object graph, and each node in the graph stores information about
their python properties.
Since many attributes in TF-Keras Layers/Models are not Trackable objects or
tf.functions, these attributes are wrapped as trackable objects/tf.functions at
serialization time. For example, `layer.variables` is implemented as a python
property that appends the lists of trainable/nontrainable variables. During
serialization, a new Trackable List object is created and saved to the
`variables` attribute. Another example is the call function. Most models do not
decorate their call function with `tf.function`, since TF-Keras will take care of
the graph/function management. When the model is saved, the call function is
wrapped in a `tf.function` and added to the `__call__` attribute.
### `keras_api` attribute
Many attributes are only relevant for revivability. Instead of attaching these
directly to the exported object, they are saved to a new `keras_api` trackable
object that is then attached to the exported object. This avoids cluttering the
exported object with objects/functions that are only used by the TF-Keras library.
For example, `__call__` and `call_and_return_conditional_losses` are functions
saved for all models. The `__call__` function is attached directly to the
exported object, while `call_and_return_conditional_losses` is attached to a
separate object. Say a user saves the model, then loads the SavedModel using the
core loader (tf.saved_model.load which does not rely on the TF-Keras library to
revive the model).
The loaded object will have a structure that looks like:
```
loaded object -- __call__
-- keras_api -- __call__
-- call_and_return_conditional_losses
```
The two call functions may be accessed through:
- `loaded.__call__` or `loaded.keras_api.__call__`
- `loaded.keras_api.call_and_return_conditional_losses`.
### Saving details
Keras Layers use a helper abstract class and an attribute validator class to
define and standardize the serialization implementation:
- [`SerializationImpl`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tf_keras/saving/saved_model/base_serialization.py):
Ensures that layer python properties are saved as a serialized JSON string in
the metadata field, and gathers all attributes to save with the TF-Keras object.
Please see the docstrings in each of the abstract methods/properties to see what
is required.
- [`SerializedAttributes`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tf_keras/saving/saved_model/serialized_attributes.py?):
Tracks all of the attributes that must be saved with a TF-Keras object. Objects and
functions may be specified to be "keras_only", meaning that they will only
appear in the `keras_api` attribute.
The base `Layer` serialization is defined in
[`layer_serialization.py`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tf_keras/saving/saved_model/layer_serialization.py).
See `LayerAttributes` and `LayerSerializationImpl`.
**Adding a new attribute to base Layer SavedModel**
1. Add a new attributes to `LayerAttributes`.
2. Modify `LayerSerializationImpl` internal methods:
a. If adding a python property, add the key-value item to the dictionary
returned by `_python_properties_internal`
b.If adding a new object/function, modify the dictionary returned by
`_get_serialized_attributes_internal`.
**Adding custom serialization for a Layer subclass.**
1. Create a new attribute validator by copying `LayerAttributes`, and add any
new attributes to serialize.
2. Subclass `LayerSerializationImpl`
3. Implement `_python_properties_internal` and/or
`_get_serialized_attributes_internal` to return the new attributes.
Unless you are modifying the loader (see section below on loading), please keep
the `object_identifier` the same.
These instructions also carry over for modifying
[Model](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tf_keras/saving/saved_model/model_serialization.py)
and
[Network](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tf_keras/saving/saved_model/network_serialization.py)
serialization.
### Loading details
TODO(kathywu): Will write this section when the loading code is moved into
\*_serialization.py files.
| tf-keras/tf_keras/saving/legacy/saved_model/README.md/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/README.md",
"repo_id": "tf-keras",
"token_count": 1605
} | 258 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras SavedModel serialization.
TODO (kathywu): Move to layer_serialization.py. Some model-specific logic should
go to model_serialization.py.
"""
import functools
import threading
import weakref
import tensorflow.compat.v1.logging as logging
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer_utils
from tf_keras.engine import input_spec
from tf_keras.mixed_precision import autocast_variable
from tf_keras.saving.legacy import saving_utils
from tf_keras.saving.legacy.saved_model import constants
from tf_keras.saving.legacy.saved_model import load as keras_load
from tf_keras.saving.legacy.saved_model import serialized_attributes
from tf_keras.saving.legacy.saved_model import utils
from tf_keras.utils import layer_utils
from tf_keras.utils import tf_contextlib
from tf_keras.utils import tf_utils
from tf_keras.utils import version_utils
from tf_keras.utils.generic_utils import LazyLoader
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
base_layer = LazyLoader("base_layer", globals(), "tf_keras.engine.base_layer")
metrics = LazyLoader("metrics", globals(), "tf_keras.metrics")
input_layer = LazyLoader(
"input_layer", globals(), "tf_keras.engine.input_layer"
)
training_lib = LazyLoader("training_lib", globals(), "tf_keras.engine.training")
sequential_lib = LazyLoader(
"sequential_lib", globals(), "tf_keras.engine.sequential"
)
def should_skip_serialization(layer):
"""Skip serializing extra objects and functions if layer inputs aren't
set."""
saved_model_input_spec_set = (
isinstance(layer, training_lib.Model)
and layer._saved_model_inputs_spec is not None
)
if not layer.built and not saved_model_input_spec_set:
logging.warning(
"Skipping full serialization of TF-Keras layer {}, because "
"it is not built.".format(layer)
)
return True
return False
def _filter_shards(variables):
return [var for var in variables if not hasattr(var, "_sharded_container")]
def wrap_layer_objects(layer, serialization_cache):
"""Returns extra trackable objects to attach to the serialized layer.
Args:
layer: TF-Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all checkpointable objects from a
SerializedAttributes object. See LayerAttributes and ModelAttributes for
entire list of objects
"""
# Wrap all regularization losses as tf.functions.
# First, generate list of all regularization losses in this layer and
# sublayers.
all_losses = layer._callable_losses[:]
for child_layer in utils.list_all_layers(layer):
all_losses.extend(child_layer._callable_losses)
# Next, wrap all loss functions as tf.functions. Use the serialization cache
# to store already-wrapped functions.
keras_loss_cache = serialization_cache.setdefault("keras_losses", {})
wrapped_loss_functions = []
for loss_fn in all_losses:
if loss_fn in keras_loss_cache:
wrapped_loss_functions.append(keras_loss_cache[loss_fn])
else:
wrapped_loss = _wrap_unconditional_loss(
loss_fn, len(keras_loss_cache)
)
keras_loss_cache[loss_fn] = wrapped_loss
wrapped_loss_functions.append(wrapped_loss)
wrapped_layer_losses = [
keras_loss_cache[fn] for fn in layer._callable_losses[:]
]
layer_metrics = tf.__internal__.tracking.wrap(
{m.name: m for m in layer._metrics}
)
# Avoid duplicate creation of shard Variables on loading.
# `layer.variables` will return the shard Variables rather than the
# ShardedVariables (b/224541446), but TF-Keras loading will create new
# ShardedVariables (and thus shard Variables) from TF-Keras metadata if
# needed. There's no need to also save the shard Variables here, so filter
# them out.
variables = _filter_shards(layer.variables)
trainable_variables = _filter_shards(layer.trainable_variables)
non_trainable_variables = _filter_shards(layer.non_trainable_variables)
return dict(
variables=tf.__internal__.tracking.wrap(variables),
trainable_variables=tf.__internal__.tracking.wrap(trainable_variables),
non_trainable_variables=tf.__internal__.tracking.wrap(
non_trainable_variables
),
layers=tf.__internal__.tracking.wrap(utils.list_all_layers(layer)),
metrics=tf.__internal__.tracking.wrap(layer.metrics),
regularization_losses=tf.__internal__.tracking.wrap(
wrapped_loss_functions
),
layer_regularization_losses=tf.__internal__.tracking.wrap(
wrapped_layer_losses
),
layer_metrics=layer_metrics,
)
def wrap_layer_functions(layer, serialization_cache):
"""Returns dict of wrapped layer call function and losses in tf.functions.
Args:
layer: TF-Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all keras tf.functions to serialize. See
LayerAttributes and ModelAttributes for the list of all attributes.
"""
# Since Sequential models may be modified in place using model.add() or
# model.pop(), don't use saved functions.
if isinstance(layer, keras_load.RevivedLayer) and not isinstance(
layer, sequential_lib.Sequential
):
return {
fn_name: getattr(layer.keras_api, fn_name, None)
for fn_name in serialized_attributes.LayerAttributes.all_functions
}
# Reset the losses of the layer and its children. The call function in each
# child layer is replaced with tf.functions.
original_fns = _replace_child_layer_functions(layer, serialization_cache)
original_losses = _reset_layer_losses(layer)
# Wrap all the layer call and activity regularizer functions.
# Use LayerCallCollection to ensure that all layer call functions (__call__,
# call with losses) are traced with the same inputs.
call_collection = LayerCallCollection(layer)
call_fn_with_losses = call_collection.add_function(
_wrap_call_and_conditional_losses(layer),
f"{layer.name}_layer_call_and_return_conditional_losses",
# If any of this layer's child layers use the training arg, the traced
# call functions of this layer will have a training keyword argument. If
# the original layer does not expect the training arg, then it will have
# to be removed (by setting `match_layer_training_arg`).
match_layer_training_arg=True,
)
call_fn = call_collection.add_function(
_extract_outputs_from_fn(layer, call_fn_with_losses),
f"{layer.name}_layer_call_fn",
# Since `call_fn` wraps call_fn_with_losses and not the original call
# function, `match_layer_training_arg` should be set to False.
match_layer_training_arg=False,
)
fns = {
"call_and_return_conditional_losses": call_fn_with_losses,
"__call__": call_fn,
}
if layer._activity_regularizer is not None:
fns["activity_regularizer_fn"] = _wrap_activity_regularizer(layer)
fns[
"call_and_return_all_conditional_losses"
] = call_collection.add_function(
_append_activity_regularizer_loss(
layer, call_fn_with_losses, fns["activity_regularizer_fn"]
),
f"{layer.name}_layer_call_and_return_all_conditional_losses",
match_layer_training_arg=False,
)
else:
fns["activity_regularizer_fn"] = None
fns["call_and_return_all_conditional_losses"] = call_fn_with_losses
# Manually trigger traces before restoring the overwritten functions. The
# functions are traced within the layer call context to ensure that layer
# functions (e.g. add_loss) behave as though running in graph mode.
with tracing_scope():
call_collection.trace_with_input_signature()
with base_layer_utils.call_context().enter(
layer, inputs=None, build_graph=True, training=None, saving=True
):
for fn in fns.values():
if fn is not None and not isinstance(fn, LayerCall):
fn.get_concrete_function()
# Restore overwritten functions and losses
_restore_child_layer_functions(original_fns)
_restore_layer_losses(original_losses)
return fns
def default_save_signature(layer):
original_losses = _reset_layer_losses(layer)
fn = saving_utils.trace_model_call(layer)
_restore_layer_losses(original_losses)
return fn
def _replace_child_layer_functions(layer, serialization_cache):
"""Replaces functions in the children layers with wrapped tf.functions.
This step allows functions from parent layers to reference the wrapped
functions from their children layers instead of retracing the ops.
This function also resets all losses stored in the layer. These are stored
in the returned dictionary. Use `_restore_child_layer_functions` to restore
the original attributes.
Args:
layer: TF-Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
Dictionary mapping layer objects -> original functions and losses:
{ Child layer 1: {
'losses': Original losses,
'call': Original call function
'_activity_regularizer': Original activity regularizer},
Child layer 2: ...
}
"""
original_fns = {}
def replace_layer_functions(child_layer, serialized_fns):
"""Replaces layer call and activity regularizer with wrapped
functions."""
original_fns[child_layer] = {
"call": child_layer.call,
"_activity_regularizer": child_layer._activity_regularizer,
}
with utils.no_automatic_dependency_tracking_scope(child_layer):
try:
child_layer._activity_regularizer = serialized_fns.get(
"activity_regularizer_fn"
)
except AttributeError:
# Some layers have an unsettable activity regularizer.
pass
child_layer.call = utils.use_wrapped_call(
child_layer,
serialized_fns["call_and_return_conditional_losses"],
child_layer._call_spec,
default_training_value=False,
)
def replace_metric_functions(child_layer, serialized_fns):
"""Replaces metric functions with wrapped functions."""
original_fns[child_layer] = {
"__call__": child_layer.__call__,
"result": child_layer.result,
"update_state": child_layer.update_state,
}
with utils.no_automatic_dependency_tracking_scope(child_layer):
child_layer.__call__ = serialized_fns["__call__"]
child_layer.result = serialized_fns["result"]
child_layer.update_state = serialized_fns["update_state"]
for child_layer in utils.list_all_layers(layer):
if isinstance(child_layer, input_layer.InputLayer):
continue
if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:
serialized_functions = child_layer._trackable_saved_model_saver._get_serialized_attributes( # noqa: E501
serialization_cache
).functions
else:
serialized_functions = serialization_cache[
constants.KERAS_CACHE_KEY
][child_layer].functions
if not serialized_functions:
# This indicates either:
# - circular dependency, which means the current layer's functions
# should be wrapped first.
# - Child layer's inputs are not defined, so its functions have
# not been wrapped. In this case, no replacement is necessary so
# move on to the next child.
continue
if isinstance(child_layer, metrics.Metric):
replace_metric_functions(child_layer, serialized_functions)
else:
replace_layer_functions(child_layer, serialized_functions)
return original_fns
def _restore_child_layer_functions(original_fns):
"""Restores attributes replaced with `_replace_child_layer_functions`."""
for child_layer, fns in original_fns.items():
with utils.no_automatic_dependency_tracking_scope(child_layer):
for fn_name, fn in fns.items():
try:
setattr(child_layer, fn_name, fn)
except AttributeError:
# In the case of _activity_regularizer, setting the
# attribute may be disallowed.
pass
def _reset_layer_losses(parent_layer):
"""Resets losses of layer and its sublayers, and returns original losses."""
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {
"losses": layer._losses[:],
"eager_losses": layer._eager_losses[:],
}
with utils.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
def _restore_layer_losses(losses_dict):
for layer in losses_dict:
with utils.no_automatic_dependency_tracking_scope(layer):
layer._losses = losses_dict[layer]["losses"]
layer._eager_losses = losses_dict[layer]["eager_losses"]
class LayerTracingContext(threading.local):
def __init__(self):
super().__init__()
self.enable_call_tracing = False
self.trace_queue = []
_thread_local_data = LayerTracingContext()
@tf_contextlib.contextmanager
def tracing_scope():
"""Enables tracing scope."""
# This enables the LayerCallCollection's tracing mechanism to trace all call
# functions in the collection.
previous_value = _thread_local_data.enable_call_tracing
previous_queue = _thread_local_data.trace_queue
try:
_thread_local_data.enable_call_tracing = True
_thread_local_data.trace_queue = []
yield
finally:
# Run traces from the queue.
while _thread_local_data.trace_queue:
fn, args, kwargs, training = _thread_local_data.trace_queue.pop(0)
if training is not None:
with backend.deprecated_internal_learning_phase_scope(training):
fn.get_concrete_function(*args, **kwargs)
else:
fn.get_concrete_function(*args, **kwargs)
_thread_local_data.trace_queue = previous_queue
_thread_local_data.enable_call_tracing = previous_value
def add_trace_to_queue(fn, args, kwargs, training=None):
if tracing_enabled():
_thread_local_data.trace_queue.append(
(fn, args[:], kwargs.copy(), training)
)
def tracing_enabled():
"""Whether to add extra traces to the queue."""
return _thread_local_data.enable_call_tracing
class LayerCallCollection:
"""Groups wrapped layer call functions.
This is used to ensure that all layer call functions are traced with the
same inputs-
- call
- call_and_return_conditional_losses
- call_and_return_all_conditional_losses
"""
def __init__(self, layer):
self.layer = layer
self.layer_call_method = _get_layer_call_method(layer)
self._expects_training_arg = utils.layer_uses_training_bool(layer)
self._call_spec = layer._call_spec
# Create new call spec if the layer itself does not accept a training
# arg, but one of its child layers does. When this layer's call
# functions are traced, they will be traced with an added `training`
# keyword argument.
if not self.layer._expects_training_arg and self._expects_training_arg:
arg_spec = utils.set_training_arg_spec(
self._call_spec.full_argspec, False
)
self._call_spec = layer_utils.CallFunctionSpec(arg_spec)
self._layer_inputs = self._get_layer_inputs(layer)
self._functions = weakref.WeakValueDictionary()
# Get the input argument name from the args.
if self._call_spec.arg_names:
self._input_arg_name = self._call_spec.arg_names[0]
else:
# Layer could be defined with only varargs, in which case use a
# default name.
self._input_arg_name = "inputs"
def _get_layer_inputs(self, layer):
"""Inspects layer object and returns the inferred input signature.
Args:
layer: Layer object.
Returns:
List of possibly nested TensorSpecs of the layer call function inputs
in the form of `(args, kwargs)`
"""
if (
isinstance(layer.call, tf.__internal__.function.Function)
and layer.call.input_signature is not None
):
return layer.call.input_signature, {}
elif isinstance(layer, training_lib.Model):
return saving_utils.model_call_inputs(layer)
elif (
layer.input_spec is not None
and layer._use_input_spec_as_call_signature
):
def to_tensor_spec_or_none(x):
spec = input_spec.to_tensor_spec(x, layer._compute_dtype)
# If the shape is too general (e.g. multiple dimensions are
# allowed), return None so that separate functions can be
# generated for each inferred input signature.
# TODO(b/134962016): currently partial signatures are not
# supported.
if spec.shape == tf.TensorShape(None):
return None, None
return spec
input_signature = [
tf.nest.map_structure(to_tensor_spec_or_none, layer.input_spec)
]
return input_signature, {}
else:
return None, None
def add_trace(self, *args, **kwargs):
"""Traces all functions with the same args and kwargs.
Args:
*args: Positional args passed to the original function.
**kwargs: Keyword args passed to the original function.
"""
args = list(args)
kwargs = kwargs.copy()
for fn in self._functions.values():
# TODO(kathywu): Replace arguments with broader shapes defined in
# the input signature.
if self._expects_training_arg:
def trace_with_training(value, fn=fn):
nonlocal args, kwargs
(args, kwargs,) = self._call_spec.set_arg_value(
"training", value, args, kwargs, inputs_in_args=True
)
add_trace_to_queue(fn, args, kwargs, value)
trace_with_training(True)
trace_with_training(False)
else:
add_trace_to_queue(fn, args, kwargs)
def training_arg_was_passed(self, args, kwargs):
return self._call_spec.arg_was_passed(
"training", args, kwargs, inputs_in_args=True
)
def get_training_arg_value(self, args, kwargs):
try:
return self._call_spec.get_arg_value(
"training", args, kwargs, inputs_in_args=True
)
except KeyError: # Training is not in args or kwargs.
return None
def get_input_arg_value(self, args, kwargs):
return self._call_spec.get_arg_value(
self._input_arg_name, args, kwargs, inputs_in_args=True
)
def _maybe_wrap_with_training_arg(self, call_fn, match_layer_training_arg):
"""Wraps call function with added training argument if necessary."""
if not self.layer._expects_training_arg and self._expects_training_arg:
# Add training arg to wrapper function.
def wrap_with_training_arg(*args, **kwargs):
if match_layer_training_arg:
# Remove the training value, since the original call_fn does
# not expect a training arg. Instead, the training value
# will be propagated using the call context created in
# LayerCall.
args = list(args)
kwargs = kwargs.copy()
(args, kwargs,) = self._call_spec.set_arg_value(
"training",
None,
args,
kwargs,
inputs_in_args=True,
pop_kwarg_if_none=True,
)
return call_fn(*args, **kwargs)
return tf.__internal__.decorator.make_decorator(
target=call_fn,
decorator_func=wrap_with_training_arg,
decorator_argspec=self._call_spec.full_argspec,
)
return call_fn
def add_function(self, call_fn, name, match_layer_training_arg):
"""Adds a layer call function to the collection.
Args:
call_fn: a python function
name: Name of call function
match_layer_training_arg: If True, removes the `training` from the
function arguments when calling `call_fn`.
Returns:
LayerCall (tf.function)
"""
fn = LayerCall(
self,
self._maybe_wrap_with_training_arg(
call_fn, match_layer_training_arg
),
name,
)
self._functions[name] = fn.wrapped_call
return fn
def trace_with_input_signature(self):
"""Trace with the layer/models inferred input signature if possible."""
if self._layer_inputs[0] is None:
return
args, kwargs = self._layer_inputs
if self._expects_training_arg:
args, kwargs = self._call_spec.set_arg_value(
"training", False, args, kwargs, inputs_in_args=True
)
if None not in tf.nest.flatten([args, kwargs]):
# Manually add traces for layers that have keyword arguments and
# have a fully defined input signature.
self.add_trace(*args, **kwargs)
def _filtered_inputs(inputs):
return list(filter(tf_utils.is_tensor_or_variable, tf.nest.flatten(inputs)))
def layer_call_wrapper(call_collection, method, name):
"""Ensures layer losses are kept the same, and runs method in call
context."""
# Create wrapper that deals with losses and call context.
def wrapper(*args, **kwargs):
"""Calls method within call context."""
layer = call_collection.layer
training = None
inputs = _filtered_inputs([args, kwargs])
if (args or kwargs) and call_collection.training_arg_was_passed(
args, kwargs
):
training = call_collection.get_training_arg_value(args, kwargs)
original_losses = _reset_layer_losses(layer)
with base_layer_utils.call_context().enter(
layer,
inputs=inputs,
build_graph=False,
training=training,
saving=True,
):
with autocast_variable.enable_auto_cast_variables(
layer._compute_dtype_object
):
ret = method(*args, **kwargs)
_restore_layer_losses(original_losses)
return ret
# Rename to `name`, since tf.function doesn't have a name argument. Without
# this, all functions returned by this method will be named "call", which
# would be a nightmare to debug.
fn = tf.__internal__.decorator.make_decorator(
target=method, decorator_func=wrapper
)
fn.__name__ = name
return fn
class LayerCall:
"""Function that triggers traces of other functions in the same
collection."""
def __init__(self, call_collection, call_fn, name):
"""Initializes a LayerCall object.
Args:
call_collection: a LayerCallCollection, which contains the other layer
call functions (e.g. call_with_conditional_losses, call). These
functions should be traced with the same arguments.
call_fn: A call function.
name: Name of the call function.
"""
self.call_collection = call_collection
self.wrapped_call = tf.function(
layer_call_wrapper(call_collection, call_fn, name)
)
def _maybe_trace(self, args, kwargs):
# Trigger traces of other call functions + extra training-arg traces.
if tracing_enabled():
self.call_collection.add_trace(*args, **kwargs)
def __call__(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call(*args, **kwargs)
def get_concrete_function(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call.get_concrete_function(*args, **kwargs)
def _wrap_call_and_conditional_losses(layer):
"""Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call
function. Unconditional losses (e.g. weight regularizeration) are wrapped
separately.
Args:
layer: a TF-Keras layer object
Returns:
python call function that returns outputs and conditional losses --
excludes activity regularizer
"""
# Create function that generates both outputs and losses
layer_call = _get_layer_call_method(layer)
def call_and_return_conditional_losses(*args, **kwargs):
"""Returns layer (call_output, conditional losses) tuple."""
call_output = layer_call(*args, **kwargs)
if version_utils.is_v1_layer_or_model(layer):
conditional_losses = layer.get_losses_for(
_filtered_inputs([args, kwargs])
)
else:
conditional_losses = [
l for l in layer.losses if not hasattr(l, "_unconditional_loss")
]
return call_output, conditional_losses
return _create_call_fn_decorator(layer, call_and_return_conditional_losses)
def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):
"""Returns a function that returns only call function outputs."""
if isinstance(layer, keras_load.RevivedLayer):
return layer.keras_api.__call__
def call(inputs, *args, **kwargs):
return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]
return _create_call_fn_decorator(layer, call)
def _append_activity_regularizer_loss(
layer, call_fn_with_losses, activity_regularizer_fn
):
"""Appends activity regularizer loss to losses returned by the wrapped
fn."""
def fn(inputs, *args, **kwargs):
outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)
losses.append(activity_regularizer_fn(outputs))
return outputs, losses
return _create_call_fn_decorator(layer, fn)
def _create_call_fn_decorator(layer, wrapped_call):
call_fn = _get_layer_call_method(layer)
fn, arg_spec = utils.maybe_add_training_arg(
layer._call_spec,
wrapped_call,
layer._expects_training_arg,
default_training_value=False,
)
return tf.__internal__.decorator.make_decorator(
target=call_fn, decorator_func=fn, decorator_argspec=arg_spec
)
def _wrap_unconditional_loss(loss_fn, index):
"""Wraps callable/unconditional loss, returning a serializable function."""
# Extract original loss function from partial function
fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn
if isinstance(fn, tf.__internal__.function.Function):
return fn
else:
return tf.__internal__.function.Function(
fn, f"loss_fn_{index}", input_signature=[]
)
def _wrap_activity_regularizer(layer):
"""Wraps the activity regularizer."""
if isinstance(
layer._activity_regularizer, tf.__internal__.function.Function
):
return layer._activity_regularizer
return tf.__internal__.function.Function(
layer._activity_regularizer,
f"{layer.name}_activity_regularizer",
input_signature=[
tf.TensorSpec(None, layer._compute_dtype or backend.floatx())
],
)
def _get_layer_call_method(layer):
if isinstance(layer.call, (tf.__internal__.function.Function)):
return layer.call.python_function
return layer.call
| tf-keras/tf_keras/saving/legacy/saved_model/save_impl.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/save_impl.py",
"repo_id": "tf-keras",
"token_count": 12268
} | 259 |
# Description:
# Contains the TF-Keras testing infrastructure.
# Placeholder: load unaliased py_library
# Placeholder: load unaliased py_test
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = ["//tf_keras:friends"],
licenses = ["notice"],
)
py_library(
name = "test_combinations",
srcs = [
"test_combinations.py",
],
srcs_version = "PY3",
deps = [
":test_utils",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
py_library(
name = "test_utils",
srcs = [
"test_utils.py",
],
srcs_version = "PY3",
deps = [
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/engine:base_layer_utils",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/utils:tf_contextlib",
"//tf_keras/utils:tf_inspect",
],
)
# TODO(mattdangerw): For now, TF-Keras will maintain its own doc checker.
# If TensorFlow exposes one, we could consider depending on that directly.
py_library(
name = "keras_doctest_lib",
srcs = ["keras_doctest_lib.py"],
srcs_version = "PY3",
deps = [
"//:expect_numpy_installed",
],
)
py_test(
name = "keras_doctest_lib_test",
srcs = ["keras_doctest_lib_test.py"],
python_version = "PY3",
tags = [
"noasan",
"nomsan",
"notsan",
],
deps = [
":keras_doctest_lib",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
],
)
tf_py_test(
name = "test_combinations_test",
size = "small",
srcs = ["test_combinations_test.py"],
python_version = "PY3",
tags = ["notsan"],
deps = [
":test_combinations",
":test_utils",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
| tf-keras/tf_keras/testing_infra/BUILD/0 | {
"file_path": "tf-keras/tf_keras/testing_infra/BUILD",
"repo_id": "tf-keras",
"token_count": 1065
} | 260 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TF-Keras."""
import os
import random
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras import utils
from tf_keras.layers.rnn import legacy_cells
from tf_keras.legacy_tf_layers import base as base_layer
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class KerasIntegrationTest(test_combinations.TestCase):
def _save_and_reload_model(self, model):
self.temp_dir = self.get_temp_dir()
fpath = os.path.join(
self.temp_dir, f"test_model_{random.randint(0, 10000000.0)}"
)
if tf.executing_eagerly():
save_format = "tf"
else:
if (
not isinstance(model, keras.Sequential)
and not model._is_graph_network
):
return model # Not supported
save_format = "h5"
model.save(fpath, save_format=save_format)
model = keras.models.load_model(fpath)
return model
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
class VectorClassificationIntegrationTest(test_combinations.TestCase):
def test_vector_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=100, test_samples=0, input_shape=(10,), num_classes=2
)
y_train = utils.to_categorical(y_train)
model = test_utils.get_model_from_layers(
[
keras.layers.Dense(16, activation="relu"),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation="softmax"),
],
input_shape=x_train.shape[1:],
)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["acc"],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
self.assertGreater(history.history["val_acc"][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
def test_vector_classification_shared_model(self):
# Test that Sequential models that feature internal updates
# and internal losses can be shared.
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=100, test_samples=0, input_shape=(10,), num_classes=2
)
y_train = utils.to_categorical(y_train)
base_model = test_utils.get_model_from_layers(
[
keras.layers.Dense(
16,
activation="relu",
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5),
),
keras.layers.BatchNormalization(),
],
input_shape=x_train.shape[1:],
)
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation="softmax")(y)
model = keras.models.Model(x, y)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["acc"],
run_eagerly=test_utils.should_run_eagerly(),
)
self.assertLen(model.losses, 2)
if not tf.executing_eagerly():
self.assertLen(model.get_updates_for(x), 2)
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
self.assertGreater(history.history["val_acc"][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@test_combinations.run_all_keras_modes
class SequentialIntegrationTest(KerasIntegrationTest):
def test_sequential_save_and_pop(self):
# Test the following sequence of actions:
# - construct a Sequential model and train it
# - save it
# - load it
# - pop its last layer and add a new layer instead
# - continue training
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=100, test_samples=0, input_shape=(10,), num_classes=2
)
y_train = utils.to_categorical(y_train)
model = keras.Sequential(
[
keras.layers.Dense(16, activation="relu"),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation="softmax"),
]
)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["acc"],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
x_train,
y_train,
epochs=1,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
model = self._save_and_reload_model(model)
model.pop()
model.add(keras.layers.Dense(y_train.shape[-1], activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["acc"],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
self.assertGreater(history.history["val_acc"][-1], 0.7)
model = self._save_and_reload_model(model)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
# See b/122473407
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TimeseriesClassificationIntegrationTest(test_combinations.TestCase):
@test_combinations.run_with_all_model_types
def test_timeseries_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2,
)
y_train = utils.to_categorical(y_train)
layers = [
keras.layers.LSTM(5, return_sequences=True),
keras.layers.GRU(y_train.shape[-1], activation="softmax"),
]
model = test_utils.get_model_from_layers(
layers, input_shape=x_train.shape[1:]
)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["acc"],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(
x_train,
y_train,
epochs=15,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
self.assertGreater(history.history["val_acc"][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
def test_timeseries_classification_sequential_tf_rnn(self):
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2,
)
y_train = utils.to_categorical(y_train)
with base_layer.keras_style_scope():
model = keras.models.Sequential()
model.add(
keras.layers.RNN(
legacy_cells.LSTMCell(5),
return_sequences=True,
input_shape=x_train.shape[1:],
)
)
model.add(
keras.layers.RNN(
legacy_cells.GRUCell(
y_train.shape[-1],
activation="softmax",
dtype=tf.float32,
)
)
)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["acc"],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(
x_train,
y_train,
epochs=15,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
self.assertGreater(history.history["val_acc"][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
class ImageClassificationIntegrationTest(test_combinations.TestCase):
def test_image_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10, 10, 3),
num_classes=2,
)
y_train = utils.to_categorical(y_train)
layers = [
keras.layers.Conv2D(4, 3, padding="same", activation="relu"),
keras.layers.Conv2D(8, 3, padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(8, 3, padding="same"),
keras.layers.Flatten(),
keras.layers.Dense(y_train.shape[-1], activation="softmax"),
]
model = test_utils.get_model_from_layers(
layers, input_shape=x_train.shape[1:]
)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["acc"],
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
self.assertGreater(history.history["val_acc"][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@test_combinations.run_all_keras_modes
class ActivationV2IntegrationTest(test_combinations.TestCase):
"""Tests activation function V2 in model exporting and loading.
This test is to verify in TF 2.x, when 'tf.nn.softmax' is used as an
activation function, its model exporting and loading work as expected.
Check b/123041942 for details.
"""
def test_serialization_v2_model(self):
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=100, test_samples=0, input_shape=(10,), num_classes=2
)
y_train = utils.to_categorical(y_train)
model = keras.Sequential(
[
keras.layers.Flatten(input_shape=x_train.shape[1:]),
keras.layers.Dense(10, activation=tf.nn.relu),
# To mimic 'tf.nn.softmax' used in TF 2.x.
keras.layers.Dense(
y_train.shape[-1], activation=tf.math.softmax
),
]
)
# Check if 'softmax' is in model.get_config().
last_layer_activation = model.get_layer(index=2).get_config()[
"activation"
]
self.assertEqual(last_layer_activation, "softmax")
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.legacy.adam.Adam(0.005),
metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
x_train,
y_train,
epochs=2,
batch_size=10,
validation_data=(x_train, y_train),
verbose=2,
)
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(output_path)
self.assertEqual(model.summary(), loaded_model.summary())
@test_combinations.run_with_all_model_types
@test_utils.run_v2_only
class TokenClassificationIntegrationTest(test_combinations.TestCase):
"""Tests a very simple token classification model.
The main purpose of this test is to verify that everything works as expected
when input sequences have variable length, and batches are padded only to
the maximum length of each batch. This is very common in NLP, and results in
the sequence dimension varying with each batch step for both the features
and the labels.
"""
def test_token_classification(self):
def densify(x, y):
return x.to_tensor(), y.to_tensor()
utils.set_random_seed(1337)
data = tf.ragged.stack(
[
np.random.randint(low=0, high=16, size=random.randint(4, 16))
for _ in range(100)
]
)
labels = tf.ragged.stack(
[np.random.randint(low=0, high=3, size=len(arr)) for arr in data]
)
features_dataset = tf.data.Dataset.from_tensor_slices(data)
labels_dataset = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))
dataset = dataset.batch(batch_size=10)
dataset = dataset.map(densify) # Pads with 0 values by default
layers = [
keras.layers.Embedding(16, 4),
keras.layers.Conv1D(4, 5, padding="same", activation="relu"),
keras.layers.Conv1D(8, 5, padding="same"),
keras.layers.BatchNormalization(),
keras.layers.Conv1D(3, 5, padding="same", activation="softmax"),
]
model = test_utils.get_model_from_layers(layers, input_shape=(None,))
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["acc"],
)
history = model.fit(
dataset, epochs=10, validation_data=dataset, verbose=2
)
self.assertGreater(history.history["val_acc"][-1], 0.5)
_, val_acc = model.evaluate(dataset)
self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
predictions = model.predict(dataset)
self.assertIsInstance(predictions, tf.RaggedTensor)
self.assertEqual(predictions.shape[0], len(dataset) * 10)
self.assertEqual(predictions.shape[-1], 3)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/tests/integration_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/integration_test.py",
"repo_id": "tf-keras",
"token_count": 8246
} | 261 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow.compat.v2 as tf
from tf_keras.engine import training
from tf_keras.layers import core
from tf_keras.optimizers.legacy import adam
# isort: off
from tensorflow.compiler.tests import xla_test
from tensorflow.python.checkpoint import (
checkpoint as trackable_utils,
)
class NonLayerTrackable(tf.Module):
def __init__(self):
super().__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[]
)
class Subclassed(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super().__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class CheckpointingTests(xla_test.XLATestCase):
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
for training_continuation in range(3):
with self.test_scope():
model = Subclassed()
optimizer = adam.Adam(0.001)
root = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
root, checkpoint_directory, max_to_keep=2
)
root.restore(manager.latest_checkpoint)
for _ in range(num_training_steps):
input_value = tf.constant([[3.0]])
with tf.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
manager.save()
self.assertEqual(
(training_continuation + 1) * num_training_steps,
root.optimizer.iterations.numpy(),
)
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| tf-keras/tf_keras/tests/tracking_util_xla_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/tracking_util_xla_test.py",
"repo_id": "tf-keras",
"token_count": 1236
} | 262 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by convolution layers."""
import itertools
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
def convert_data_format(data_format, ndim):
if data_format == "channels_last":
if ndim == 3:
return "NWC"
elif ndim == 4:
return "NHWC"
elif ndim == 5:
return "NDHWC"
else:
raise ValueError(
f"Input rank not supported: {ndim}. "
"Expected values are [3, 4, 5]"
)
elif data_format == "channels_first":
if ndim == 3:
return "NCW"
elif ndim == 4:
return "NCHW"
elif ndim == 5:
return "NCDHW"
else:
raise ValueError(
f"Input rank not supported: {ndim}. "
"Expected values are [3, 4, 5]"
)
else:
raise ValueError(
f"Invalid data_format: {data_format}. "
'Expected values are ["channels_first", "channels_last"]'
)
def normalize_tuple(value, n, name, allow_zero=False):
"""Transforms non-negative/positive integer/integers into an integer tuple.
Args:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
allow_zero: A ValueError will be raised if zero is received
and this param is False. Defaults to `False`.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof or a
negative value is
passed.
"""
error_msg = (
f"The `{name}` argument must be a tuple of {n} "
f"integers. Received: {value}"
)
if isinstance(value, int):
value_tuple = (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError(error_msg)
if len(value_tuple) != n:
raise ValueError(error_msg)
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
error_msg += (
f"including element {single_value} of "
f"type {type(single_value)}"
)
raise ValueError(error_msg)
if allow_zero:
unqualified_values = {v for v in value_tuple if v < 0}
req_msg = ">= 0"
else:
unqualified_values = {v for v in value_tuple if v <= 0}
req_msg = "> 0"
if unqualified_values:
error_msg += (
f" including {unqualified_values}"
f" that does not satisfy the requirement `{req_msg}`."
)
raise ValueError(error_msg)
return value_tuple
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Args:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {"same", "valid", "full", "causal"}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ["same", "causal"]:
output_length = input_length
elif padding == "valid":
output_length = input_length - dilated_filter_size + 1
elif padding == "full":
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Args:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {"same", "valid", "full"}
if padding == "same":
pad = filter_size // 2
elif padding == "valid":
pad = 0
elif padding == "full":
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(
input_length,
filter_size,
padding,
output_padding=None,
stride=0,
dilation=1,
):
"""Determines output length of a transposed convolution given input length.
Args:
input_length: Integer.
filter_size: Integer.
padding: one of `"same"`, `"valid"`, `"full"`.
output_padding: Integer, amount of padding along the output dimension.
Can be set to `None` in which case the output length is inferred.
stride: Integer.
dilation: Integer.
Returns:
The output length (integer).
"""
assert padding in {"same", "valid", "full"}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == "valid":
length = input_length * stride + max(filter_size - stride, 0)
elif padding == "full":
length = input_length * stride - (stride + filter_size - 2)
elif padding == "same":
length = input_length * stride
else:
if padding == "same":
pad = filter_size // 2
elif padding == "valid":
pad = 0
elif padding == "full":
pad = filter_size - 1
length = (
(input_length - 1) * stride + filter_size - 2 * pad + output_padding
)
return length
def normalize_data_format(value):
if value is None:
value = backend.image_data_format()
data_format = value.lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
f'"channels_first", "channels_last". Received: {value}'
)
return data_format
def normalize_padding(value):
if isinstance(value, (list, tuple)):
return value
padding = value.lower()
if padding not in {"valid", "same", "causal"}:
raise ValueError(
"The `padding` argument must be a list/tuple or one of "
'"valid", "same" (or "causal", only for `Conv1D). '
f"Received: {padding}"
)
return padding
def conv_kernel_mask(input_shape, kernel_shape, strides, padding):
"""Compute a mask representing the connectivity of a convolution operation.
Assume a convolution with given parameters is applied to an input having N
spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an
output with shape `(d_out1, ..., d_outN)`. This method returns a boolean
array of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True`
entries indicating pairs of input and output locations that are connected by
a weight.
Example:
>>> input_shape = (4,)
>>> kernel_shape = (2,)
>>> strides = (1,)
>>> padding = "valid"
>>> conv_kernel_mask(input_shape, kernel_shape, strides, padding)
array([[ True, False, False],
[ True, True, False],
[False, True, True],
[False, False, True]])
where rows and columns correspond to inputs and outputs respectively.
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
Returns:
A boolean 2N-D `np.ndarray` of shape
`(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)`
is the spatial shape of the output. `True` entries in the mask represent
pairs of input-output locations that are connected by a weight.
Raises:
ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the
same number of dimensions.
NotImplementedError: if `padding` is not in {`"same"`, `"valid"`}.
"""
if padding not in {"same", "valid"}:
raise NotImplementedError(
f"Padding type {padding} not supported. "
'Only "valid" and "same" are implemented.'
)
in_dims = len(input_shape)
if isinstance(kernel_shape, int):
kernel_shape = (kernel_shape,) * in_dims
if isinstance(strides, int):
strides = (strides,) * in_dims
kernel_dims = len(kernel_shape)
stride_dims = len(strides)
if kernel_dims != in_dims or stride_dims != in_dims:
raise ValueError(
"Number of strides, input and kernel dimensions must all "
f"match. Received: stride_dims={stride_dims}, "
f"in_dims={in_dims}, kernel_dims={kernel_dims}"
)
output_shape = conv_output_shape(
input_shape, kernel_shape, strides, padding
)
mask_shape = input_shape + output_shape
mask = np.zeros(mask_shape, bool)
output_axes_ticks = [range(dim) for dim in output_shape]
for output_position in itertools.product(*output_axes_ticks):
input_axes_ticks = conv_connected_inputs(
input_shape, kernel_shape, output_position, strides, padding
)
for input_position in itertools.product(*input_axes_ticks):
mask[input_position + output_position] = True
return mask
def conv_kernel_idxs(
input_shape,
kernel_shape,
strides,
padding,
filters_in,
filters_out,
data_format,
):
"""Yields output-input tuples of indices in a CNN layer.
The generator iterates over all `(output_idx, input_idx)` tuples, where
`output_idx` is an integer index in a flattened tensor representing a single
output image of a convolutional layer that is connected (via the layer
weights) to the respective single input image at `input_idx`
Example:
>>> input_shape = (2, 2)
>>> kernel_shape = (2, 1)
>>> strides = (1, 1)
>>> padding = "valid"
>>> filters_in = 1
>>> filters_out = 1
>>> data_format = "channels_last"
>>> list(conv_kernel_idxs(input_shape, kernel_shape, strides, padding,
... filters_in, filters_out, data_format))
[(0, 0), (0, 2), (1, 1), (1, 3)]
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
filters_in: `int`, number if filters in the input to the layer.
filters_out: `int', number if filters in the output of the layer.
data_format: string, "channels_first" or "channels_last".
Yields:
The next tuple `(output_idx, input_idx)`, where `output_idx` is an integer
index in a flattened tensor representing a single output image of a
convolutional layer that is connected (via the layer weights) to the
respective single input image at `input_idx`.
Raises:
ValueError: if `data_format` is neither `"channels_last"` nor
`"channels_first"`, or if number of strides, input, and kernel number
of dimensions do not match.
NotImplementedError: if `padding` is neither `"same"` nor `"valid"`.
"""
if padding not in ("same", "valid"):
raise NotImplementedError(
f"Padding type {padding} not supported. "
'Only "valid" and "same" are implemented.'
)
in_dims = len(input_shape)
if isinstance(kernel_shape, int):
kernel_shape = (kernel_shape,) * in_dims
if isinstance(strides, int):
strides = (strides,) * in_dims
kernel_dims = len(kernel_shape)
stride_dims = len(strides)
if kernel_dims != in_dims or stride_dims != in_dims:
raise ValueError(
"Number of strides, input and kernel dimensions must all "
f"match. Received: stride_dims={stride_dims}, "
f"in_dims={in_dims}, kernel_dims={kernel_dims}"
)
output_shape = conv_output_shape(
input_shape, kernel_shape, strides, padding
)
output_axes_ticks = [range(dim) for dim in output_shape]
if data_format == "channels_first":
concat_idxs = (
lambda spatial_idx, filter_idx: (filter_idx,) + spatial_idx
)
elif data_format == "channels_last":
concat_idxs = lambda spatial_idx, filter_idx: spatial_idx + (
filter_idx,
)
else:
raise ValueError(
f"Data format `{data_format}` not recognized."
'`data_format` must be "channels_first" or "channels_last".'
)
for output_position in itertools.product(*output_axes_ticks):
input_axes_ticks = conv_connected_inputs(
input_shape, kernel_shape, output_position, strides, padding
)
for input_position in itertools.product(*input_axes_ticks):
for f_in in range(filters_in):
for f_out in range(filters_out):
out_idx = np.ravel_multi_index(
multi_index=concat_idxs(output_position, f_out),
dims=concat_idxs(output_shape, filters_out),
)
in_idx = np.ravel_multi_index(
multi_index=concat_idxs(input_position, f_in),
dims=concat_idxs(input_shape, filters_in),
)
yield (out_idx, in_idx)
def conv_connected_inputs(
input_shape, kernel_shape, output_position, strides, padding
):
"""Return locations of the input connected to an output position.
Assume a convolution with given parameters is applied to an input having N
spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method
returns N ranges specifying the input region that was convolved with the
kernel to produce the output at position
`output_position = (p_out1, ..., p_outN)`.
Example:
>>> input_shape = (4, 4)
>>> kernel_shape = (2, 1)
>>> output_position = (1, 1)
>>> strides = (1, 1)
>>> padding = "valid"
>>> conv_connected_inputs(input_shape, kernel_shape, output_position,
... strides, padding)
[range(1, 3), range(1, 2)]
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
output_position: tuple of size N: `(p_out1, ..., p_outN)`, a single
position in the output of the convolution.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
Returns:
N ranges `[[p_in_left1, ..., p_in_right1], ...,
[p_in_leftN, ..., p_in_rightN]]` specifying the region in the
input connected to output_position.
"""
ranges = []
ndims = len(input_shape)
for d in range(ndims):
left_shift = int(kernel_shape[d] / 2)
right_shift = kernel_shape[d] - left_shift
center = output_position[d] * strides[d]
if padding == "valid":
center += left_shift
start = max(0, center - left_shift)
end = min(input_shape[d], center + right_shift)
ranges.append(range(start, end))
return ranges
def conv_output_shape(input_shape, kernel_shape, strides, padding):
"""Return the output shape of an N-D convolution.
Forces dimensions where input is empty (size 0) to remain empty.
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
Returns:
tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.
"""
dims = range(len(kernel_shape))
output_shape = [
conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d])
for d in dims
]
output_shape = tuple(
[0 if input_shape[d] == 0 else output_shape[d] for d in dims]
)
return output_shape
def squeeze_batch_dims(inp, op, inner_rank):
"""Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
"""
with tf.name_scope("squeeze_batch_dims"):
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = tf.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = tf.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tf.TensorShape):
inp_reshaped = tf.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = tf.reshape(
inp, tf.concat(([-1], inner_shape), axis=-1)
)
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = tf.shape(out_reshaped)[-inner_rank:]
out = tf.reshape(
out_reshaped, tf.concat((batch_shape, out_inner_shape), axis=-1)
)
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
| tf-keras/tf_keras/utils/conv_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/conv_utils.py",
"repo_id": "tf-keras",
"token_count": 8460
} | 263 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to disk I/O."""
import os
import sys
import threading
from absl import logging
from tf_keras.utils import keras_logging
# isort: off
from tensorflow.python.util.tf_export import keras_export
INTERACTIVE_LOGGING = threading.local()
INTERACTIVE_LOGGING.enable = keras_logging.INTERACTIVE_LOGGING_DEFAULT
@keras_export("keras.utils.enable_interactive_logging")
def enable_interactive_logging():
"""Turn on interactive logging.
When interactive logging is enabled, TF-Keras displays logs via stdout.
This provides the best experience when using TF-Keras in an interactive
environment such as a shell or a notebook.
"""
INTERACTIVE_LOGGING.enable = True
@keras_export("keras.utils.disable_interactive_logging")
def disable_interactive_logging():
"""Turn off interactive logging.
When interactive logging is disabled, TF-Keras sends logs to `absl.logging`.
This is the best option when using TF-Keras in a non-interactive
way, such as running a training or inference job on a server.
"""
INTERACTIVE_LOGGING.enable = False
@keras_export("keras.utils.is_interactive_logging_enabled")
def is_interactive_logging_enabled():
"""Check if interactive logging is enabled.
To switch between writing logs to stdout and `absl.logging`, you may use
`keras.utils.enable_interactive_logging()` and
`keras.utils.disable_interactive_logging()`.
Returns:
Boolean (True if interactive logging is enabled and False otherwise).
"""
# Use `getattr` in case `INTERACTIVE_LOGGING`
# does not have the `enable` attribute.
return getattr(
INTERACTIVE_LOGGING, "enable", keras_logging.INTERACTIVE_LOGGING_DEFAULT
)
@logging.skip_log_prefix
def print_msg(message, line_break=True):
"""Print the message to absl logging or stdout."""
if is_interactive_logging_enabled():
if line_break:
sys.stdout.write(message + "\n")
else:
sys.stdout.write(message)
sys.stdout.flush()
else:
logging.info(message)
def path_to_string(path):
"""Convert `PathLike` objects to their string representation.
If given a non-string typed path object, converts it to its string
representation.
If the object passed to `path` is not among the above, then it is
returned unchanged. This allows e.g. passthrough of file objects
through this function.
Args:
path: `PathLike` object that represents a path
Returns:
A string representation of the path argument, if Python support exists.
"""
if isinstance(path, os.PathLike):
return os.fspath(path)
return path
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = (
input(f"[WARNING] {filepath} already exists - overwrite? [y/n]")
.strip()
.lower()
)
while overwrite not in ("y", "n"):
overwrite = (
input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
)
if overwrite == "n":
return False
print_msg("[TIP] Next time specify overwrite=True!")
return True
| tf-keras/tf_keras/utils/io_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/io_utils.py",
"repo_id": "tf-keras",
"token_count": 1360
} | 264 |
"""Utilities for collecting objects based on "is" comparison."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import weakref
# LINT.IfChange
class _ObjectIdentityWrapper:
"""Wraps an object, mapping __eq__ on wrapper to "is" on wrapped.
Since __eq__ is based on object identity, it's safe to also define __hash__
based on object ids. This lets us add unhashable types like trackable
_ListWrapper objects to object-identity collections.
"""
__slots__ = ["_wrapped", "__weakref__"]
def __init__(self, wrapped):
self._wrapped = wrapped
@property
def unwrapped(self):
return self._wrapped
def _assert_type(self, other):
if not isinstance(other, _ObjectIdentityWrapper):
raise TypeError(
"Cannot compare wrapped object with unwrapped object. "
"Expect the object to be `_ObjectIdentityWrapper`. "
f"Got: {other}"
)
def __lt__(self, other):
self._assert_type(other)
return id(self._wrapped) < id(other._wrapped)
def __gt__(self, other):
self._assert_type(other)
return id(self._wrapped) > id(other._wrapped)
def __eq__(self, other):
if other is None:
return False
self._assert_type(other)
return self._wrapped is other._wrapped
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# Wrapper id() is also fine for weakrefs. In fact, we rely on
# id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is
# weakref.ref(a) in _WeakObjectIdentityWrapper.
return id(self._wrapped)
def __repr__(self):
return f"<{type(self).__name__} wrapping {self._wrapped!r}>"
class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):
__slots__ = ()
def __init__(self, wrapped):
super().__init__(weakref.ref(wrapped))
@property
def unwrapped(self):
return self._wrapped()
class Reference(_ObjectIdentityWrapper):
"""Reference that refers an object.
```python
x = [1]
y = [1]
x_ref1 = Reference(x)
x_ref2 = Reference(x)
y_ref2 = Reference(y)
print(x_ref1 == x_ref2)
==> True
print(x_ref1 == y)
==> False
```
"""
__slots__ = ()
# Disabling super class' unwrapped field.
unwrapped = property()
def deref(self):
"""Returns the referenced object.
```python
x_ref = Reference(x)
print(x is x_ref.deref())
==> True
```
"""
return self._wrapped
class ObjectIdentityDictionary(collections.abc.MutableMapping):
"""A mutable mapping data structure which compares using "is".
This is necessary because we have trackable objects (_ListWrapper) which
have behavior identical to built-in Python lists (including being unhashable
and comparing based on the equality of their contents by default).
"""
__slots__ = ["_storage"]
def __init__(self):
self._storage = {}
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __getitem__(self, key):
return self._storage[self._wrap_key(key)]
def __setitem__(self, key, value):
self._storage[self._wrap_key(key)] = value
def __delitem__(self, key):
del self._storage[self._wrap_key(key)]
def __len__(self):
return len(self._storage)
def __iter__(self):
for key in self._storage:
yield key.unwrapped
def __repr__(self):
return f"ObjectIdentityDictionary({repr(self._storage)})"
class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):
"""Like weakref.WeakKeyDictionary, but compares objects with "is"."""
__slots__ = ["__weakref__"]
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len(list(self._storage))
def __iter__(self):
keys = self._storage.keys()
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
del self[key]
else:
yield unwrapped
class ObjectIdentitySet(collections.abc.MutableSet):
"""Like the built-in set, but compares objects with "is"."""
__slots__ = ["_storage", "__weakref__"]
def __init__(self, *args):
self._storage = set(self._wrap_key(obj) for obj in list(*args))
@staticmethod
def _from_storage(storage):
result = ObjectIdentitySet()
result._storage = storage
return result
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __contains__(self, key):
return self._wrap_key(key) in self._storage
def discard(self, key):
self._storage.discard(self._wrap_key(key))
def add(self, key):
self._storage.add(self._wrap_key(key))
def update(self, items):
self._storage.update([self._wrap_key(item) for item in items])
def clear(self):
self._storage.clear()
def intersection(self, items):
return self._storage.intersection(
[self._wrap_key(item) for item in items]
)
def difference(self, items):
return ObjectIdentitySet._from_storage(
self._storage.difference([self._wrap_key(item) for item in items])
)
def __len__(self):
return len(self._storage)
def __iter__(self):
keys = list(self._storage)
for key in keys:
yield key.unwrapped
class ObjectIdentityWeakSet(ObjectIdentitySet):
"""Like weakref.WeakSet, but compares objects with "is"."""
__slots__ = ()
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len([_ for _ in self])
def __iter__(self):
keys = list(self._storage)
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
self.discard(key)
else:
yield unwrapped
# LINT.ThenChange(//tensorflow/python/util/object_identity.py)
| tf-keras/tf_keras/utils/object_identity.py/0 | {
"file_path": "tf-keras/tf_keras/utils/object_identity.py",
"repo_id": "tf-keras",
"token_count": 2769
} | 265 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for traceback_utils."""
import tensorflow.compat.v2 as tf
from tf_keras import layers
from tf_keras.utils import traceback_utils
class TracebackUtilsTest(tf.test.TestCase):
def test_info_injection_basics(self):
def error_fn(arg_1, arg_2, keyword_arg_1=None, keyword_arg_2=None):
raise ValueError("Original message")
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(
error_fn, "ObjName"
)(1, 2, keyword_arg_1=3, keyword_arg_2=4)
self.assertIn("Original message", str(e.exception))
self.assertIn(
"Exception encountered when calling ObjName", str(e.exception)
)
self.assertIn("Call arguments received", str(e.exception))
self.assertIn("arg_1=1", str(e.exception))
self.assertIn("arg_2=2", str(e.exception))
self.assertIn("keyword_arg_1=3", str(e.exception))
self.assertIn("keyword_arg_2=4", str(e.exception))
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(error_fn)(
1, 2, keyword_arg_1=3, keyword_arg_2=4
)
self.assertIn(
"Exception encountered when calling error_fn", str(e.exception)
)
def test_info_injection_no_args(self):
def error_fn():
raise ValueError("Original message")
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(error_fn)()
self.assertEqual(str(e.exception).count("Call arguments received"), 0)
def test_info_injection_unbindable(self):
def error_fn(arg_1, keyword_arg_1=1):
return arg_1 + keyword_arg_1
with self.assertRaises(TypeError) as e:
traceback_utils.inject_argument_info_in_traceback(error_fn)()
self.assertIn(
"missing 1 required positional argument", str(e.exception)
)
def test_info_injection_nested(self):
def inner_fn(arg_1):
raise ValueError("Original message")
def outer_fn(arg_1):
return inner_fn(arg_1)
with self.assertRaises(ValueError) as e:
traceback_utils.inject_argument_info_in_traceback(outer_fn)(1)
self.assertEqual(str(e.exception).count("Call arguments received"), 1)
def test_info_injection_tf_op_error(self):
def error_fn(arg_1, keyword_arg_1=1):
return arg_1 + keyword_arg_1 + tf.zeros((2, 3))
with self.assertRaises(tf.errors.InvalidArgumentError) as e:
traceback_utils.inject_argument_info_in_traceback(error_fn)(
tf.zeros((3, 3))
)
self.assertIn("Incompatible shapes", str(e.exception))
self.assertIn("Call arguments received", str(e.exception))
class LayerCallInfoInjectionTest(tf.test.TestCase):
def assert_info_injected(self, fn):
tf.debugging.enable_traceback_filtering()
try:
fn()
except Exception as e:
# Info should be injected exactly once.
self.assertEqual(str(e).count("Call arguments received"), 1)
def test_custom_layer_call_nested(self):
class InnerLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
class OuterLayer(layers.Layer):
def __init__(self):
super().__init__()
self.inner = InnerLayer()
def call(self, inputs, training=True):
return self.inner(inputs)
def fn():
layer = OuterLayer()
layer(tf.zeros((3, 5)), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_eager_dense_input(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(tf.zeros((3, 5)), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_eager_sparse_input(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(
tf.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[3, 5]
),
training=False,
)
self.assert_info_injected(fn)
def test_custom_layer_call_eager_ragged_input(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(tf.ragged.constant([[0, 0, 0], [0, 0]]), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_symbolic(self):
class MyLayer(layers.Layer):
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(layers.Input((3, 5)), training=False)
self.assert_info_injected(fn)
def test_custom_layer_call_unbindable(self):
class MyLayer(layers.Layer):
def __init__(self):
super().__init__()
self.input_spec = layers.InputSpec(shape=(3, 4))
def call(self, inputs, training=False, mask=None):
return inputs + tf.zeros((3, 4))
def fn():
layer = MyLayer()
layer(bad=True, arguments=True)
with self.assertRaisesRegex(
ValueError, "The first argument to `Layer.call` must always"
):
fn()
if __name__ == "__main__":
if tf.__internal__.tf2.enabled():
tf.test.main()
| tf-keras/tf_keras/utils/traceback_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/traceback_utils_test.py",
"repo_id": "tf-keras",
"token_count": 3006
} | 266 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from autokeras.engine import analyser
class TargetAnalyser(analyser.Analyser):
def __init__(self, name=None, **kwargs):
super().__init__(**kwargs)
self.name = name
class ClassificationAnalyser(TargetAnalyser):
def __init__(self, num_classes=None, multi_label=False, **kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
self.label_encoder = None
self.multi_label = multi_label
self.labels = set()
def update(self, data):
super().update(data)
if len(self.shape) > 2:
raise ValueError(
"Expect the target data for {name} to have shape "
"(batch_size, num_classes), "
"but got {shape}.".format(name=self.name, shape=self.shape)
)
if len(self.shape) > 1 and self.shape[1] > 1:
return
self.labels = self.labels.union(set(np.unique(data.numpy())))
def finalize(self):
# TODO: support raw string labels for multi-label.
self.labels = sorted(list(self.labels))
# Infer the num_classes if not specified.
if not self.num_classes:
if self.encoded:
# Single column with 0s and 1s.
if len(self.shape) == 1 or self.shape[1:] == [1]:
self.num_classes = 2
else:
self.num_classes = self.shape[1]
else:
self.num_classes = len(self.labels)
if self.num_classes < 2:
raise ValueError(
"Expect the target data for {name} to have "
"at least 2 classes, but got {num_classes}.".format(
name=self.name, num_classes=self.num_classes
)
)
# Check shape equals expected shape.
expected = self.get_expected_shape()
actual = self.shape[1:]
if len(actual) == 0:
actual = [1]
if self.encoded and actual != expected:
raise ValueError(
"Expect the target data for {name} to have "
"shape {expected}, but got {actual}.".format(
name=self.name, expected=expected, actual=self.shape[1:]
)
)
def get_expected_shape(self):
# Compute expected shape from num_classes.
if self.num_classes == 2 and not self.multi_label:
return [1]
return [self.num_classes]
@property
def encoded(self):
return self.encoded_for_sigmoid or self.encoded_for_softmax
@property
def encoded_for_sigmoid(self):
if len(self.labels) != 2:
return False
return sorted(self.labels) == [0, 1]
@property
def encoded_for_softmax(self):
return len(self.shape) > 1 and self.shape[1] > 1
class RegressionAnalyser(TargetAnalyser):
def __init__(self, output_dim=None, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
def finalize(self):
if self.output_dim and (self.expected_dim() != self.output_dim):
raise ValueError(
"Expect the target data for {name} to have shape "
"(batch_size, {output_dim}), "
"but got {shape}.".format(
name=self.name, output_dim=self.output_dim, shape=self.shape
)
)
def expected_dim(self):
if len(self.shape) == 1:
return 1
return self.shape[1]
| autokeras/autokeras/analysers/output_analysers.py/0 | {
"file_path": "autokeras/autokeras/analysers/output_analysers.py",
"repo_id": "autokeras",
"token_count": 1879
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers.experimental import preprocessing
import autokeras as ak
from autokeras import keras_layers
from autokeras import test_utils
from autokeras.engine import tuner as tuner_module
from autokeras.tuners import greedy
def called_with_early_stopping(func):
callbacks = func.call_args_list[0][1]["callbacks"]
return any(
[
isinstance(callback, keras.callbacks.EarlyStopping)
for callback in callbacks
]
)
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_final_fit_with_specified_epochs(_, final_fit, super_search, tmp_path):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(), directory=tmp_path
)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(x=None, epochs=10, validation_data=None)
assert final_fit.call_args_list[0][1]["epochs"] == 10
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_tuner_call_super_with_early_stopping(
_, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(), directory=tmp_path
)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(x=None, epochs=10, validation_data=None)
assert called_with_early_stopping(super_search)
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch(
"autokeras.engine.tuner.AutoTuner.get_best_models",
return_value=[mock.Mock()],
)
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
@mock.patch("autokeras.pipeline.load_pipeline")
@mock.patch("keras_tuner.Oracle.get_best_trials", return_value=[mock.Mock()])
def test_no_final_fit_without_epochs_and_fov(
_, _1, _2, get_best_models, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(), directory=tmp_path
)
tuner.search(x=None, epochs=None, validation_data=None)
final_fit.assert_not_called()
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch(
"autokeras.engine.tuner.AutoTuner._get_best_trial_epochs", return_value=2
)
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_final_fit_best_epochs_if_epoch_unspecified(
_, best_epochs, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(), directory=tmp_path
)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(
x=mock.Mock(),
epochs=None,
validation_split=0.2,
validation_data=mock.Mock(),
)
assert final_fit.call_args_list[0][1]["epochs"] == 2
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch(
"autokeras.engine.tuner.AutoTuner._get_best_trial_epochs", return_value=2
)
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_super_with_1k_epochs_if_epoch_unspecified(
_, best_epochs, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(), directory=tmp_path
)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(
x=mock.Mock(),
epochs=None,
validation_split=0.2,
validation_data=mock.Mock(),
)
assert super_search.call_args_list[0][1]["epochs"] == 1000
assert called_with_early_stopping(super_search)
@mock.patch("keras_tuner.engine.base_tuner.BaseTuner.search")
@mock.patch("autokeras.engine.tuner.AutoTuner.final_fit")
@mock.patch("autokeras.engine.tuner.AutoTuner._prepare_model_build")
def test_tuner_not_call_super_search_with_overwrite(
_, final_fit, super_search, tmp_path
):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(), directory=tmp_path
)
final_fit.return_value = mock.Mock(), mock.Mock(), mock.Mock()
tuner.search(x=None, epochs=10, validation_data=None)
tuner.save()
super_search.reset_mock()
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(), directory=tmp_path
)
tuner.search(x=None, epochs=10, validation_data=None)
super_search.assert_not_called()
def test_tuner_does_not_crash_with_distribution_strategy(tmp_path):
tuner = greedy.Greedy(
hypermodel=test_utils.build_graph(),
directory=tmp_path,
distribution_strategy=tf.distribute.MirroredStrategy(),
)
tuner.hypermodel.build(tuner.oracle.hyperparameters)
def test_preprocessing_adapt_with_cat_to_int_and_norm():
x = np.array([["a", 5], ["b", 6]]).astype(str)
y = np.array([[1, 2], [3, 4]]).astype(str)
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(32)
model = keras.models.Sequential()
model.add(keras.Input(shape=(2,), dtype=tf.string))
model.add(keras_layers.MultiCategoryEncoding(["int", "none"]))
model.add(preprocessing.Normalization(axis=-1))
tuner_module.AutoTuner.adapt(model, dataset)
def test_preprocessing_adapt_with_text_vec():
class MockLayer(preprocessing.TextVectorization):
def adapt(self, *args, **kwargs):
super().adapt(*args, **kwargs)
self.is_called = True
x_train = test_utils.generate_text_data()
y_train = np.random.randint(0, 2, (100,))
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
layer1 = MockLayer(
max_tokens=5000, output_mode="int", output_sequence_length=40
)
model = keras.models.Sequential()
model.add(keras.Input(shape=(1,), dtype=tf.string))
model.add(layer1)
model.add(keras.layers.Embedding(50001, 10))
model.add(keras.layers.Dense(1))
tuner_module.AutoTuner.adapt(model, dataset)
assert layer1.is_called
def test_adapt_with_model_with_preprocessing_layer_only():
input_node = keras.Input(shape=(10,))
output_node = keras.layers.experimental.preprocessing.Normalization()(
input_node
)
model = keras.Model(input_node, output_node)
greedy.Greedy.adapt(
model,
tf.data.Dataset.from_tensor_slices(
(np.random.rand(100, 10), np.random.rand(100, 10))
).batch(32),
)
def test_build_block_in_blocks_with_same_name(tmp_path):
class Block1(ak.Block):
def build(self, hp, inputs):
hp.Boolean("a")
return keras.layers.Dense(3)(tf.nest.flatten(inputs)[0])
class Block2(ak.Block):
def build(self, hp, inputs):
hp.Boolean("b")
return Block1().build(hp, inputs)
inputs = ak.Input()
outputs = Block2()(inputs)
outputs = ak.RegressionHead()(outputs)
auto_model = ak.AutoModel(inputs, outputs, max_trials=5, directory=tmp_path)
auto_model.fit(np.random.rand(100, 5), np.random.rand(100, 1), epochs=1)
trials = [
trial for trial_id, trial in auto_model.tuner.oracle.trials.items()
]
for trial in trials:
assert len(trial.hyperparameters.values) == len(
trials[0].hyperparameters.values
)
| autokeras/autokeras/engine/tuner_test.py/0 | {
"file_path": "autokeras/autokeras/engine/tuner_test.py",
"repo_id": "autokeras",
"token_count": 3375
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from autokeras import analysers
from autokeras import keras_layers
from autokeras import preprocessors
from autokeras.engine import preprocessor
from autokeras.utils import data_utils
class LambdaPreprocessor(preprocessor.Preprocessor):
"""Build Preprocessor with a map function.
# Arguments
func: a callable function for the dataset to map.
"""
def __init__(self, func, **kwargs):
super().__init__(**kwargs)
self.func = func
def transform(self, dataset):
return dataset.map(self.func)
class AddOneDimension(LambdaPreprocessor):
"""Append one dimension of size one to the dataset shape."""
def __init__(self, **kwargs):
super().__init__(lambda x: tf.expand_dims(x, axis=-1), **kwargs)
class CastToInt32(preprocessor.Preprocessor):
"""Cast the dataset shape to tf.int32."""
def transform(self, dataset):
return dataset.map(lambda x: tf.cast(x, tf.int32))
class CastToString(preprocessor.Preprocessor):
"""Cast the dataset shape to tf.string."""
def transform(self, dataset):
return dataset.map(data_utils.cast_to_string)
class SlidingWindow(preprocessor.Preprocessor):
"""Apply sliding window to the dataset.
It groups the consecutive data items together. Therefore, it inserts one
more dimension of size `lookback` to the dataset shape after the batch_size
dimension. It also reduce the number of instances in the dataset by
(lookback - 1).
# Arguments
lookback: Int. The window size. The number of data items to group
together.
batch_size: Int. The batch size of the dataset.
"""
def __init__(self, lookback, batch_size, **kwargs):
super().__init__(**kwargs)
self.lookback = lookback
self.batch_size = batch_size
def transform(self, dataset):
dataset = dataset.unbatch()
dataset = dataset.window(self.lookback, shift=1, drop_remainder=True)
dataset = dataset.flat_map(
lambda x: x.batch(self.lookback, drop_remainder=True)
)
dataset = dataset.batch(self.batch_size)
return dataset
def get_config(self):
return {"lookback": self.lookback, "batch_size": self.batch_size}
class CategoricalToNumericalPreprocessor(preprocessor.Preprocessor):
"""Encode the categorical features to numerical features.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the
data. Defaults to None. If None, it will be obtained from the
header of the csv file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should
either be 'numerical' or 'categorical', indicating the type of that
column. Defaults to None. If not None, the column_names need to be
specified. If None, it will be inferred from the data.
"""
def __init__(self, column_names, column_types, **kwargs):
super().__init__(**kwargs)
self.column_names = column_names
self.column_types = column_types
encoding = []
for column_name in self.column_names:
column_type = self.column_types[column_name]
if column_type == analysers.CATEGORICAL:
# TODO: Search to use one-hot or int.
encoding.append(keras_layers.INT)
else:
encoding.append(keras_layers.NONE)
self.layer = keras_layers.MultiCategoryEncoding(encoding)
def fit(self, dataset):
self.layer.adapt(dataset)
def transform(self, dataset):
return dataset.map(self.layer)
def get_config(self):
vocab = []
for encoding_layer in self.layer.encoding_layers:
if encoding_layer is None:
vocab.append([])
else:
vocab.append(encoding_layer.get_vocabulary())
return {
"column_types": self.column_types,
"column_names": self.column_names,
"encoding_layer": preprocessors.serialize(self.layer),
"encoding_vocab": vocab,
}
@classmethod
def from_config(cls, config):
init_config = {
"column_types": config["column_types"],
"column_names": config["column_names"],
}
obj = cls(**init_config)
obj.layer = preprocessors.deserialize(config["encoding_layer"])
for encoding_layer, vocab in zip(
obj.layer.encoding_layers, config["encoding_vocab"]
):
if encoding_layer is not None:
encoding_layer.set_vocabulary(vocab)
return obj
| autokeras/autokeras/preprocessors/common.py/0 | {
"file_path": "autokeras/autokeras/preprocessors/common.py",
"repo_id": "autokeras",
"token_count": 2075
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autokeras.prototype import base_block
from autokeras.prototype import graph_state
class Preprocessor(base_block.BaseBlock):
def _build_wrapper(self, hp, inputs, *args, **kwargs):
# Accept only Dataset.
# Return a Dataset.
# Register ConcretePreprocessor.
# How to register it to the right input node?
# How do we register the ConcretePreprocessor?
# Just get the return value of .build(). Register it to graph state
# together with the input and output dataset.
# What do we do when there are Preprocessors within Preprocessors?
# We don't register all of them. Only register the outter most one.
# It is more convenient to just have this one preprocessor to do all the
# inside steps.
# To judge if the current one is the outter most one, we need to use the
# "with" statement to create a scope when a HyperModel.build() is
# called. Record a stack of HyperModel, whose .build() is running. The
# lower in the stack, the outter the HyperModel is.
concrete_preprocessor = super()._build_wrapper(
hp, inputs, *args, **kwargs
)
outputs = concrete_preprocessor.fit_transform(inputs)
state = graph_state.get_state()
if not any([isinstance(block, Preprocessor) for block in state.blocks]):
state.register_preprocessor(inputs, outputs, concrete_preprocessor)
return concrete_preprocessor
def build(self, hp, dataset):
# Should return a ConcretePreprocessor.
pass
| autokeras/autokeras/prototype/preprocessor.py/0 | {
"file_path": "autokeras/autokeras/prototype/preprocessor.py",
"repo_id": "autokeras",
"token_count": 720
} | 3 |
import os
import pathlib
import shutil
import keras_autodoc
import tutobooks
PAGES = {
"image_classifier.md": [
"autokeras.ImageClassifier",
"autokeras.ImageClassifier.fit",
"autokeras.ImageClassifier.predict",
"autokeras.ImageClassifier.evaluate",
"autokeras.ImageClassifier.export_model",
],
"image_regressor.md": [
"autokeras.ImageRegressor",
"autokeras.ImageRegressor.fit",
"autokeras.ImageRegressor.predict",
"autokeras.ImageRegressor.evaluate",
"autokeras.ImageRegressor.export_model",
],
"text_classifier.md": [
"autokeras.TextClassifier",
"autokeras.TextClassifier.fit",
"autokeras.TextClassifier.predict",
"autokeras.TextClassifier.evaluate",
"autokeras.TextClassifier.export_model",
],
"text_regressor.md": [
"autokeras.TextRegressor",
"autokeras.TextRegressor.fit",
"autokeras.TextRegressor.predict",
"autokeras.TextRegressor.evaluate",
"autokeras.TextRegressor.export_model",
],
"structured_data_classifier.md": [
"autokeras.StructuredDataClassifier",
"autokeras.StructuredDataClassifier.fit",
"autokeras.StructuredDataClassifier.predict",
"autokeras.StructuredDataClassifier.evaluate",
"autokeras.StructuredDataClassifier.export_model",
],
"structured_data_regressor.md": [
"autokeras.StructuredDataRegressor",
"autokeras.StructuredDataRegressor.fit",
"autokeras.StructuredDataRegressor.predict",
"autokeras.StructuredDataRegressor.evaluate",
"autokeras.StructuredDataRegressor.export_model",
],
"auto_model.md": [
"autokeras.AutoModel",
"autokeras.AutoModel.fit",
"autokeras.AutoModel.predict",
"autokeras.AutoModel.evaluate",
"autokeras.AutoModel.export_model",
],
"base.md": [
"autokeras.Node",
"autokeras.Block",
"autokeras.Block.build",
"autokeras.Head",
],
"node.md": [
"autokeras.ImageInput",
"autokeras.Input",
"autokeras.StructuredDataInput",
"autokeras.TextInput",
],
"block.md": [
"autokeras.ConvBlock",
"autokeras.DenseBlock",
"autokeras.Embedding",
"autokeras.Merge",
"autokeras.ResNetBlock",
"autokeras.RNNBlock",
"autokeras.SpatialReduction",
"autokeras.TemporalReduction",
"autokeras.XceptionBlock",
"autokeras.ImageBlock",
"autokeras.StructuredDataBlock",
"autokeras.TextBlock",
"autokeras.ImageAugmentation",
"autokeras.Normalization",
"autokeras.TextToIntSequence",
"autokeras.TextToNgramVector",
"autokeras.CategoricalToNumerical",
"autokeras.ClassificationHead",
"autokeras.RegressionHead",
],
"utils.md": [
"autokeras.image_dataset_from_directory",
"autokeras.text_dataset_from_directory",
],
}
aliases_needed = [
"tensorflow.keras.callbacks.Callback",
"tensorflow.keras.losses.Loss",
"tensorflow.keras.metrics.Metric",
"tensorflow.data.Dataset",
]
ROOT = "http://autokeras.com/"
autokeras_dir = pathlib.Path(__file__).resolve().parents[1]
def py_to_nb_md(dest_dir):
dir_path = "py"
for file_path in os.listdir("py/"):
file_name = file_path
py_path = os.path.join(dir_path, file_path)
file_name_no_ext = os.path.splitext(file_name)[0]
ext = os.path.splitext(file_name)[1]
if ext != ".py":
continue
nb_path = os.path.join("ipynb", file_name_no_ext + ".ipynb")
md_path = os.path.join(dest_dir, "tutorial", file_name_no_ext + ".md")
tutobooks.py_to_md(py_path, nb_path, md_path, "templates/img")
github_repo_dir = "keras-team/autokeras/blob/master/docs/"
with open(md_path, "r") as md_file:
button_lines = [
":material-link: "
"[**View in Colab**](https://colab.research.google.com/github/"
+ github_repo_dir
+ "ipynb/"
+ file_name_no_ext
+ ".ipynb"
+ ") "
# + '<span class="k-dot">•</span>'
+ ":octicons-mark-github-16: "
"[**GitHub source**](https://github.com/"
+ github_repo_dir
+ "py/"
+ file_name_no_ext
+ ".py)",
"\n",
]
md_content = "".join(button_lines) + "\n" + md_file.read()
with open(md_path, "w") as md_file:
md_file.write(md_content)
def generate(dest_dir):
template_dir = autokeras_dir / "docs" / "templates"
doc_generator = keras_autodoc.DocumentationGenerator(
PAGES,
"https://github.com/keras-team/autokeras/blob/master",
template_dir,
autokeras_dir / "examples",
extra_aliases=aliases_needed,
)
doc_generator.generate(dest_dir)
readme = (autokeras_dir / "README.md").read_text()
index = (template_dir / "index.md").read_text()
index = index.replace("{{autogenerated}}", readme[readme.find("##") :])
(dest_dir / "index.md").write_text(index, encoding="utf-8")
shutil.copyfile(
autokeras_dir / ".github" / "CONTRIBUTING.md",
dest_dir / "contributing.md",
)
py_to_nb_md(dest_dir)
if __name__ == "__main__":
generate(autokeras_dir / "docs" / "sources")
| autokeras/docs/autogen.py/0 | {
"file_path": "autokeras/docs/autogen.py",
"repo_id": "autokeras",
"token_count": 2717
} | 4 |
import inspect
from inspect import isclass
from inspect import isfunction
from inspect import isroutine
from typing import List
from .utils import import_object
def get_classes(module, exclude: List[str] = None, return_strings: bool = True):
"""Get all the classes of a module.
# Arguments
module: The module to fetch the classes from. If it's a string, it
should be in the dotted format. `'keras.layers'` for example.
exclude: The names which will be excluded from the returned list. For
example, `get_classes('keras.layers', exclude=['Dense', 'Conv2D'])`.
return_strings: If False, the actual classes will be returned. Note that
if you use aliases when building your docs, you should use strings.
This is because the computed signature uses
`__name__` and `__module__` if you don't provide a string as input.
# Returns
A list of strings or a list of classes.
"""
return _get_all_module_element(module, exclude, return_strings, True)
def get_functions(
module, exclude: List[str] = None, return_strings: bool = True
):
"""Get all the functions of a module.
# Arguments
module: The module to fetch the functions from. If it's a string, it
should be in the dotted format. `'keras.backend'` for example.
exclude: The names which will be excluded from the returned list. For
example, `get_functions('keras.backend', exclude=['max'])`.
return_strings: If False, the actual functions will be returned. Note
that if you use aliases when building your docs, you should use
strings. This is because the computed signature uses `__name__` and
`__module__` if you don't provide a string as input.
# Returns
A list of strings or a list of functions.
"""
return _get_all_module_element(module, exclude, return_strings, False)
def get_methods(cls, exclude=None, return_strings=True):
"""Get all the method of a class.
# Arguments
cls: The class to fetch the methods from. If it's a
string, it should be in the dotted format. `'keras.layers.Dense'`
for example.
exclude: The names which will be excluded from the returned list. For
example, `get_methods('keras.Model', exclude=['save'])`.
return_strings: If False, the actual methods will be returned. Note that
if you use aliases when building your docs, you should use strings.
This is because the computed signature uses
`__name__` and `__module__` if you don't provide a string as input.
# Returns
A list of strings or a list of methods.
"""
if isinstance(cls, str):
cls_str = cls
cls = import_object(cls)
else:
cls_str = f"{cls.__module__}.{cls.__name__}"
exclude = exclude or []
methods = []
for _, method in inspect.getmembers(cls, predicate=isroutine):
if method.__name__[0] == "_" or method.__name__ in exclude:
continue
if return_strings:
methods.append(f"{cls_str}.{method.__name__}")
else:
methods.append(method)
return methods
def _get_all_module_element(module, exclude, return_strings, class_):
if isinstance(module, str):
module = import_object(module)
exclude = exclude or []
module_data = []
for name in dir(module):
module_member = getattr(module, name)
if not (isfunction(module_member) or isclass(module_member)):
continue
if name[0] == "_" or name in exclude:
continue
if module.__name__ not in module_member.__module__:
continue
if module_member in module_data:
continue
if class_ and not isclass(module_member):
continue
if not class_ and not isfunction(module_member):
continue
if return_strings:
module_data.append(f"{module.__name__}.{name}")
else:
module_data.append(module_member)
module_data.sort(key=id)
return module_data
| autokeras/docs/keras_autodoc/gathering_members.py/0 | {
"file_path": "autokeras/docs/keras_autodoc/gathering_members.py",
"repo_id": "autokeras",
"token_count": 1610
} | 5 |
"""Keras tutobooks implementation.
A tutobook is a tutorial available simultaneously as a notebook,
as a Python script, and as a nicely rendered webpage.
Its source-of-truth (for manual edition and version control) is
its Python script form, but you can also create one by starting
from a notebook and converting it with the command `nb2py`.
Text cells are stored in markdown-formatted comment blocks.
the first line (starting with " * 3) may optionally contain a special
annotation, one of:
- invisible: do not render this block.
- shell: execute this block while prefixing each line with `!`.
The script form should start with a header with the following fields:
Title:
Author: (could be `Authors`: as well, and may contain markdown links)
Date created: (date in yyyy/mm/dd format)
Last modified: (date in yyyy/mm/dd format)
Description: (one-line text description)
## How to add a new code example to Keras.io
You would typically start from an existing notebook.
Save it to disk (let's say as `path_to_your_nb.ipynb`).
`cd` to the `keras-io/scripts/` directory.
Then run:
```
python tutobooks nb2py path_to_your_nb.ipynb ../examples/your_example.py
```
This will create the file `examples/your_example.py`. Open it,
fill in the headers, and generally edit it so that it looks nice.
NOTE THAT THE CONVERSION SCRIPT MAY MAKE MISTAKES IN ITS ATTEMPTS
TO SHORTEN LINES. MAKE SURE TO PROOFREAD THE GENERATED .py IN FULL.
Or alternatively, make sure to keep your lines reasonably-sized (<90 char)
to start with, so that the script won't have to shorten them.
You can then preview what it looks like when converted back again
to ipynb by running:
```
python tutobooks py2nb ../examples/your_example.py preview.ipynb
```
NOTE THAT THIS COMMAND WILL ERROR OUT IF ANY CELLS TAKES TOO LONG
TO EXECUTE. In that case, make your code lighter/faster.
Remember that examples are meant to demonstrate workflows, not
train state-of-the-art models. They should
stay very lightweight.
Open the generated `preview.ipynb` and make sure it looks like what
you expect. If not, keep editing `your_example.py` until it does.
Finally, submit a PR adding `examples/your_example.py`.
"""
import json
import os
import random
import shutil
import sys
from pathlib import Path
TIMEOUT = 60 * 60
MAX_LOC = 300
def nb_to_py(nb_path, py_path):
f = open(nb_path)
content = f.read()
f.close()
nb = json.loads(content)
py = '"""\n'
py += "Title: FILLME\n"
py += "Author: FILLME\n"
py += "Date created: FILLME\n"
py += "Last modified: FILLME\n"
py += "Description: FILLME\n"
py += '"""\n'
for cell in nb["cells"]:
if cell["cell_type"] == "code":
# Is it a shell cell?
if (
cell["source"]
and cell["source"][0]
and cell["source"][0][0] == "!"
):
# It's a shell cell
py += '"""shell\n'
py += "".join(cell["source"]) + "\n"
py += '"""\n\n'
else:
# It's a Python cell
py += "".join(cell["source"]) + "\n\n"
elif cell["cell_type"] == "markdown":
py += '"""\n'
py += "".join(cell["source"]) + "\n"
py += '"""\n\n'
# Save file
f = open(py_path, "w")
f.write(py)
f.close()
# Format file with Black
os.system("black " + py_path)
# Shorten lines
py = open(py_path).read()
try:
py = _shorten_lines(py)
finally:
f = open(py_path, "w")
f.write(py)
f.close()
def py_to_nb(py_path, nb_path, fill_outputs=True):
f = open(py_path)
py = f.read()
f.close()
# validate(py)
# header, _, py, tag = _get_next_script_element(py)
# attributes = _parse_header(header)
cells = []
loc = 0
# Write first header cell
# header_cell = {
# "cell_type": "markdown",
# "source": [
# "# " + attributes["title"] + "\n",
# "\n",
# "**" + attributes["auth_field"] + ":** " + attributes["author"] +"<br>\n",
# "**Date created:** " + attributes["date_created"] + "<br>\n",
# "**Last modified:** " + attributes["last_modified"] + "<br>\n",
# "**Description:** " + attributes["description"],
# ],
# "metadata": {"colab_type": "text"},
# }
# cells.append(header_cell)
while py:
e, cell_type, py, tag = _get_next_script_element(py)
lines = e.split("\n")
if all(line == "" for line in lines):
continue
if lines and not lines[0]:
lines = lines[1:]
source = [line + "\n" for line in lines]
# Drop last newline char
if source and not source[-1].strip():
source = source[:-1]
if tag == "shell":
source = ["!" + line for line in source]
cell_type = "code"
if tag != "invisible" and source:
cell = {"cell_type": cell_type, "source": source}
if cell_type == "code":
cell["outputs"] = []
cell["metadata"] = {"colab_type": "code"}
cell["execution_count"] = 0
loc += _count_locs(source)
else:
cell["metadata"] = {"colab_type": "text"}
cells.append(cell)
notebook = {}
for key in NB_BASE.keys():
notebook[key] = NB_BASE[key]
notebook["metadata"]["colab"]["name"] = str(py_path).split("/")[-1][:-3]
notebook["cells"] = cells
if loc > MAX_LOC:
raise ValueError(
"Found %d lines of code, but expected fewer than %d"
% (loc, MAX_LOC)
)
f = open(nb_path, "w")
f.write(json.dumps(notebook, indent=1, sort_keys=True))
f.close()
if fill_outputs:
print("Generating ipynb")
parent_dir = Path(nb_path).parent
current_files = os.listdir(parent_dir)
try:
os.system(
"jupyter nbconvert --to notebook --execute --debug "
+ str(nb_path)
+ " --inplace"
+ " --ExecutePreprocessor.timeout="
+ str(TIMEOUT)
)
finally:
new_files = os.listdir(parent_dir)
for fname in new_files:
if fname not in current_files:
fpath = parent_dir / fname
if os.path.isdir(fpath):
print("Removing created folder:", fname)
shutil.rmtree(fpath)
else:
print("Removing created file:", fname)
os.remove(fpath)
def nb_to_md(nb_path, md_path, img_dir, working_dir=None):
img_exts = ("png", "jpg", "jpeg")
# Assumes an already populated notebook.
assert str(md_path).endswith(".md")
current_dir = os.getcwd()
original_img_dir = str(img_dir)
if original_img_dir.endswith("/"):
original_img_dir = original_img_dir[:-1]
img_dir = os.path.abspath(img_dir)
nb_path = os.path.abspath(nb_path)
nb_fname = str(nb_path).split("/")[-1]
del_working_dir = False
if working_dir is None:
del_working_dir = True
working_dir = "tmp_" + str(random.randint(1e6, 1e7))
if not os.path.exists(working_dir):
os.makedirs(working_dir)
print("Using working_dir:", working_dir)
os.chdir(working_dir)
shutil.copyfile(nb_path, nb_fname)
md_name = str(md_path).split("/")[-1][:-3]
target_md = md_name + ".md"
img_dir = Path(img_dir) / md_name
if not os.path.exists(img_dir):
os.makedirs(img_dir)
os.system(
# "jupyter nbconvert --to markdown --execute --debug "
"jupyter nbconvert --to markdown "
+ nb_fname
+ " --output "
+ target_md
# + " --ExecutePreprocessor.timeout="
# + str(TIMEOUT)
)
tmp_img_dir = md_name + "_files"
if os.path.exists(tmp_img_dir):
for fname in os.listdir(tmp_img_dir):
if fname.endswith(img_exts):
src = Path(tmp_img_dir) / fname
target = Path(img_dir) / fname
print("copy", src, "to", target)
shutil.copyfile(src, target)
os.chdir(current_dir)
md_content = open(Path(working_dir) / (md_name + ".md")).read()
for ext in img_exts:
md_content = md_content.replace(
"
md_content = _make_output_code_blocks(md_content)
open(md_path, "w").write(md_content)
if del_working_dir:
shutil.rmtree(working_dir)
def py_to_md(py_path, nb_path, md_path, img_dir, working_dir=None):
py_to_nb(py_path, nb_path, fill_outputs=False)
nb_to_md(nb_path, md_path, img_dir, working_dir=working_dir)
def validate(py):
"""Validate the format of a tutobook script.
Specifically:
- validate headers
- validate style with black
"""
lines = py.split("\n")
if not lines[0].startswith('"""'):
raise ValueError('Missing `"""`-fenced header at top of script.')
if not lines[1].startswith("Title: "):
raise ValueError("Missing `Title:` field.")
if not lines[2].startswith("Author: ") and not lines[2].startswith(
"Authors: "
):
raise ValueError("Missing `Author:` field.")
if not lines[3].startswith("Date created: "):
raise ValueError("Missing `Date created:` field.")
if not lines[4].startswith("Last modified: "):
raise ValueError("Missing `Last modified:` field.")
if not lines[5].startswith("Description: "):
raise ValueError("Missing `Description:` field.")
description = lines[5][len("Description: ") :]
if not description:
raise ValueError("Missing `Description:` field content.")
if not description[0] == description[0].upper():
raise ValueError("Description field content must be capitalized.")
if not description[-1] == ".":
raise ValueError("Description field content must end with a period.")
if len(description) > 100:
raise ValueError(
"Description field content must be less than 100 chars."
)
for i, line in enumerate(lines):
if line.startswith('"""') and line.endswith('"""') and len(line) > 3:
raise ValueError(
'Do not use single line `"""`-fenced comments. '
"Encountered at line %d" % (i,)
)
for i, line in enumerate(lines):
if line.endswith(" "):
raise ValueError(
"Found trailing space on line %d; line: `%s`" % (i, line)
)
# Validate style with black
fpath = "/tmp/" + str(random.randint(1e6, 1e7)) + ".py"
f = open(fpath, "w")
pre_formatting = "\n".join(lines)
f.write(pre_formatting)
f.close()
os.system("black " + fpath)
f = open(fpath)
formatted = f.read()
f.close()
os.remove(fpath)
if formatted != pre_formatting:
raise ValueError(
"You python file did not follow `black` conventions. "
"Run `black your_file.py` to autoformat it."
)
def _count_locs(lines):
loc = 0
string_open = False
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if not string_open:
if not line.startswith('"""'):
loc += 1
else:
if not line.endswith('"""'):
string_open = True
else:
if line.startswith('"""'):
string_open = False
return loc
def _shorten_lines(py):
max_len = 90
lines = []
for line in py.split("\n"):
if len(line) <= max_len:
lines.append(line)
continue
i = 0
while len(line) > max_len:
line = line.lstrip()
if " " not in line[1:]:
lines.append(line)
break
else:
short_line = line[:max_len]
line = line[max_len:]
if " " in short_line:
reversed_short_line = short_line[::-1]
index = reversed_short_line.find(" ") + 1
line = short_line[-index:] + line
short_line = short_line[:-index]
lines.append(short_line.lstrip())
i += 1
if i > 10:
raise
lines.append(line.lstrip())
return "\n".join(lines)
def _get_next_script_element(py):
lines = py.split("\n")
assert lines
elines = []
i = 0
tag = None
if lines[0].startswith('"""'):
assert len(lines) >= 2
etype = "markdown"
if len(lines[0]) > 3:
tag = lines[0][3:]
if tag not in ["shell", "invisible"]:
raise ValueError("Found unknown cell tag:", tag)
lines = lines[1:]
else:
etype = "code"
for i, line in enumerate(lines):
if line.startswith('"""'):
break
else:
elines.append(line)
if etype == "markdown":
py = "\n".join(lines[i + 1 :])
else:
py = "\n".join(lines[i:])
e = "\n".join(elines)
return e, etype, py, tag
def _parse_header(header):
lines = header.split("\n")
title = lines[0][len("Title: ") :]
author_line = lines[1]
if author_line.startswith("Authors"):
author = author_line[len("Authors: ") :]
auth_field = "Authors"
else:
author = author_line[len("Author: ") :]
auth_field = "Author"
date_created = lines[2][len("Date created: ") :]
last_modified = lines[3][len("Last modified: ") :]
description = lines[4][len("Description: ") :]
return {
"title": title,
"author": author,
"auth_field": auth_field,
"date_created": date_created,
"last_modified": last_modified,
"description": description,
}
def _make_output_code_blocks(md):
lines = md.split("\n")
output_lines = []
final_lines = []
is_inside_backticks = False
def is_output_line(line, prev_line, output_lines):
if line.startswith(" ") and len(line) >= 5:
if output_lines or (lines[i - 1].strip() == "" and line.strip()):
return True
return False
def flush(output_lines, final_lines):
final_lines.append('<div class="k-default-codeblock">')
final_lines.append("```")
if len(output_lines) == 1:
line = output_lines[0]
final_lines.append(line[4:])
else:
for line in output_lines:
final_lines.append(line[4:])
final_lines.append("```")
final_lines.append("</div>")
for i, line in enumerate(lines):
if line.startswith("```"):
is_inside_backticks = not is_inside_backticks
final_lines.append(line)
continue
if is_inside_backticks:
final_lines.append(line)
continue
if i > 0 and is_output_line(line, lines[-1], output_lines):
output_lines.append(line)
elif not line:
if output_lines:
if output_lines[-1]:
output_lines.append(line)
else:
final_lines.append(line)
else:
if output_lines:
flush(output_lines, final_lines)
output_lines = []
final_lines.append(line)
if output_lines:
flush(output_lines, final_lines)
return "\n".join(final_lines)
NB_BASE = {
"metadata": {
"colab": {
"collapsed_sections": [],
"name": "", # FILL ME
"private_outputs": False,
"provenance": [],
"toc_visible": True,
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3",
},
"language_info": {
"codemirror_mode": {"name": "ipython", "version": 3},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0",
},
},
"nbformat": 4,
"nbformat_minor": 0,
}
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd not in {"nb2py", "py2nb"}:
raise ValueError(
"Specify a command: either "
"`nb2py source_filename.ipynb target_filename.py` or "
"`py2nb source_filename.py target_file name.ipynb"
)
if len(sys.argv) < 4:
raise ValueError("Specify a source filename and a target filename")
source = sys.argv[2]
target = sys.argv[3]
if cmd == "py2nb":
if not source.endswith(".py"):
raise ValueError(
"The source filename should be a Python file. Got:", source
)
if not target.endswith(".ipynb"):
raise ValueError(
"The target filename should be a notebook file. Got:", target
)
py_to_nb(source, target)
if cmd == "nb2py":
if not source.endswith(".ipynb"):
raise ValueError(
"The source filename should be a notebook file. Got:", source
)
if not target.endswith(".py"):
raise ValueError(
"The target filename should be a Python file. Got:", target
)
nb_to_py(source, target)
| autokeras/docs/tutobooks.py/0 | {
"file_path": "autokeras/docs/tutobooks.py",
"repo_id": "autokeras",
"token_count": 8349
} | 6 |
# Keras API proposal "Request For Comment" (RFC) docs
This folder contains approved API proposals. To propose a new API to be considered for review, you can open a Pull Request in this repository to add a new RFC `.md` doc.
## Process
The process for writing and submitting design proposals is same as the [TensorFlow RFC process](https://github.com/tensorflow/community/blob/master/governance/TF-RFCs.md).
- Start from [this template](https://github.com/keras-team/governance/blob/master/rfcs/yyyymmdd-rfc-template.md).
- Fill in the content. Note that you will need to insert code examples.
- Provide enough context information for anyone to undertsand what's going on.
- Provide a solid argument as for why the feature is neeed.
- Include a code example of the **end-to-end workflow** you have in mind.
- Open a Pull Request in the [Keras API proposals folder in this repository](https://github.com/keras-team/governance/tree/master/rfcs).
- Send the Pull Request link to `[email protected]` with a subject that starts with `[API DESIGN REVIEW]` (all caps) so that we notice it.
- Wait for comments, and answer them as they come. Edit the proposal as necessary.
- The proposal will finally be approved or rejected during a meeting of the Keras SIG chairs. Once approved, you can send out Pull Requests to implement the API changes or ask others to write Pull Requests (targeting `tf.keras` and `keras-team/keras`).
Note that:
- Anyone is free to send out API proposals.
- Anyone is free to comment on API proposals or ask questions.
- Anyone is free to attend design review meetings as an observer.
- Participation in design review meetings is restricted to Keras SIG chairs.
- Design review meeting notes will be posted publicly after each meeting.
## Template
Use [this template](https://github.com/keras-team/governance/blob/master/rfcs/yyyymmdd-rfc-template.md) to draft an RFC.
| governance/rfcs/README.md/0 | {
"file_path": "governance/rfcs/README.md",
"repo_id": "governance",
"token_count": 520
} | 7 |
"""ResNetV2 models for Keras.
# Reference paper
- [Aggregated Residual Transformations for Deep Neural Networks]
(https://arxiv.org/abs/1611.05431) (CVPR 2017)
# Reference implementations
- [TensorNets]
(https://github.com/taehoonlee/tensornets/blob/master/tensornets/resnets.py)
- [Torch ResNetV2]
(https://github.com/facebook/fb.resnet.torch/blob/master/models/preresnet.lua)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .resnet_common import ResNet50V2
from .resnet_common import ResNet101V2
from .resnet_common import ResNet152V2
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
data_format: data format of the image tensor.
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
| keras-applications/keras_applications/resnet_v2.py/0 | {
"file_path": "keras-applications/keras_applications/resnet_v2.py",
"repo_id": "keras-applications",
"token_count": 373
} | 8 |
{% extends "base.html" %}
{% block content %}
<h1 id="404-page-not-found">404</h1>
<p><strong>Page not found</strong></p>
{% endblock %}
| keras-contrib/contrib_docs/theme/404.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/404.html",
"repo_id": "keras-contrib",
"token_count": 62
} | 9 |
"""ResNet v1, v2, and segmentation models for Keras.
# Reference
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
- [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027)
Reference material for extended functionality:
- [ResNeXt](https://arxiv.org/abs/1611.05431) for Tiny ImageNet support.
- [Dilated Residual Networks](https://arxiv.org/pdf/1705.09914) for segmentation support
- [Deep Residual Learning for Instrument Segmentation in
Robotic Surgery](https://arxiv.org/abs/1703.08580)
for segmentation support.
Implementation Adapted from: github.com/raghakot/keras-resnet
""" # pylint: disable=E501
from __future__ import division
import six
from keras.models import Model
from keras.layers import Input
from keras.layers import Activation
from keras.layers import Reshape
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Dropout
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
from keras_applications.imagenet_utils import _obtain_input_shape
def _bn_relu(x, bn_name=None, relu_name=None):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name)(x)
return Activation("relu", name=relu_name)(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu residual unit activation function.
This is the original ResNet v1 scheme in https://arxiv.org/abs/1512.03385
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
x = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name=conv_name)(x)
return _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv residual unit with full pre-activation
function. This is the ResNet v2 scheme proposed in
http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
activation = _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name=conv_name)(activation)
return f
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=CHANNEL_AXIS,
name=bn_name_base)(shortcut)
return add([shortcut, residual])
def _residual_block(block_function, filters, blocks, stage,
transition_strides=None, transition_dilation_rates=None,
dilation_rates=None, is_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Builds a residual block with repeating bottleneck blocks.
stage: integer, current stage label, used for generating layer names
blocks: number of blocks 'a','b'..., current block label, used for generating
layer names
transition_strides: a list of tuples for the strides of each transition
transition_dilation_rates: a list of tuples for the dilation rate of each
transition
"""
if transition_dilation_rates is None:
transition_dilation_rates = [(1, 1)] * blocks
if transition_strides is None:
transition_strides = [(1, 1)] * blocks
if dilation_rates is None:
dilation_rates = [1] * blocks
def f(x):
for i in range(blocks):
is_first_block = is_first_layer and i == 0
x = block_function(filters=filters, stage=stage, block=i,
transition_strides=transition_strides[i],
dilation_rate=dilation_rates[i],
is_first_block_of_first_layer=is_first_block,
dropout=dropout,
residual_unit=residual_unit)(x)
return x
return f
def _block_name_base(stage, block):
"""Get the convolution name base and batch normalization name base defined by
stage and block.
If there are less than 26 blocks they will be labeled 'a', 'b', 'c' to match the
paper and keras and beyond 26 blocks they will simply be numbered.
"""
if block < 27:
block = '%c' % (block + 97) # 97 is the ascii number for lowercase 'a'
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
return conv_name_base, bn_name_base
def basic_block(filters, stage, block, transition_strides=(1, 1),
dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input_features):
conv_name_base, bn_name_base = _block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters=filters, kernel_size=(3, 3),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_features)
else:
x = residual_unit(filters=filters, kernel_size=(3, 3),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_features)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
return _shortcut(input_features, x)
return f
def bottleneck(filters, stage, block, transition_strides=(1, 1),
dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input_feature):
conv_name_base, bn_name_base = _block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_feature)
else:
x = residual_unit(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_feature)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters * 4, kernel_size=(1, 1),
conv_name_base=conv_name_base + '2c',
bn_name_base=bn_name_base + '2c')(x)
return _shortcut(input_feature, x)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_data_format() == 'channels_last':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _string_to_function(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
def ResNet(input_shape=None, classes=10, block='bottleneck', residual_unit='v2',
repetitions=None, initial_filters=64, activation='softmax', include_top=True,
input_tensor=None, dropout=None, transition_dilation_rate=(1, 1),
initial_strides=(2, 2), initial_kernel_size=(7, 7), initial_pooling='max',
final_pooling=None, top='classification'):
"""Builds a custom ResNet like architecture. Defaults to ResNet50 v2.
Args:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` dim ordering)
or `(3, 224, 224)` (with `channels_first` dim ordering).
It should have exactly 3 dimensions,
and width and height should be no smaller than 8.
E.g. `(224, 224, 3)` would be one valid value.
classes: The number of outputs at final softmax layer
block: The block function to use. This is either `'basic'` or `'bottleneck'`.
The original paper used `basic` for layers < 50.
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size
is halved. Default of None implies the ResNet50v2 values of [3, 4, 6, 3].
residual_unit: the basic residual unit, 'v1' for conv bn relu, 'v2' for bn relu
conv. See [Identity Mappings in
Deep Residual Networks](https://arxiv.org/abs/1603.05027)
for details.
dropout: None for no dropout, otherwise rate of dropout from 0 to 1.
Based on [Wide Residual Networks.(https://arxiv.org/pdf/1605.07146) paper.
transition_dilation_rate: Dilation rate for transition layers. For semantic
segmentation of images use a dilation rate of (2, 2).
initial_strides: Stride of the very first residual unit and MaxPooling2D call,
with default (2, 2), set to (1, 1) for small images like cifar.
initial_kernel_size: kernel size of the very first convolution, (7, 7) for
imagenet and (3, 3) for small image datasets like tiny imagenet and cifar.
See [ResNeXt](https://arxiv.org/abs/1611.05431) paper for details.
initial_pooling: Determine if there will be an initial pooling layer,
'max' for imagenet and None for small image datasets.
See [ResNeXt](https://arxiv.org/abs/1611.05431) paper for details.
final_pooling: Optional pooling mode for feature extraction at the final
model layer when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
top: Defines final layers to evaluate based on a specific problem type. Options
are 'classification' for ImageNet style problems, 'segmentation' for
problems like the Pascal VOC dataset, and None to exclude these layers
entirely.
Returns:
The keras `Model`.
"""
if activation not in ['softmax', 'sigmoid', None]:
raise ValueError('activation must be one of "softmax", "sigmoid", or None')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
if repetitions is None:
repetitions = [3, 4, 6, 3]
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=8,
data_format=K.image_data_format(),
require_flatten=include_top)
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
if block == 'basic':
block_fn = basic_block
elif block == 'bottleneck':
block_fn = bottleneck
elif isinstance(block, six.string_types):
block_fn = _string_to_function(block)
else:
block_fn = block
if residual_unit == 'v2':
residual_unit = _bn_relu_conv
elif residual_unit == 'v1':
residual_unit = _conv_bn_relu
elif isinstance(residual_unit, six.string_types):
residual_unit = _string_to_function(residual_unit)
else:
residual_unit = residual_unit
# Permute dimension order if necessary
if K.image_data_format() == 'channels_first':
input_shape = (input_shape[1], input_shape[2], input_shape[0])
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=8,
data_format=K.image_data_format(),
require_flatten=include_top)
img_input = Input(shape=input_shape, tensor=input_tensor)
x = _conv_bn_relu(filters=initial_filters, kernel_size=initial_kernel_size,
strides=initial_strides)(img_input)
if initial_pooling == 'max':
x = MaxPooling2D(pool_size=(3, 3), strides=initial_strides, padding="same")(x)
block = x
filters = initial_filters
for i, r in enumerate(repetitions):
transition_dilation_rates = [transition_dilation_rate] * r
transition_strides = [(1, 1)] * r
if transition_dilation_rate == (1, 1):
transition_strides[0] = (2, 2)
block = _residual_block(block_fn, filters=filters,
stage=i, blocks=r,
is_first_layer=(i == 0),
dropout=dropout,
transition_dilation_rates=transition_dilation_rates,
transition_strides=transition_strides,
residual_unit=residual_unit)(block)
filters *= 2
# Last activation
x = _bn_relu(block)
# Classifier block
if include_top and top is 'classification':
x = GlobalAveragePooling2D()(x)
x = Dense(units=classes, activation=activation,
kernel_initializer="he_normal")(x)
elif include_top and top is 'segmentation':
x = Conv2D(classes, (1, 1), activation='linear', padding='same')(x)
if K.image_data_format() == 'channels_first':
channel, row, col = input_shape
else:
row, col, channel = input_shape
x = Reshape((row * col, classes))(x)
x = Activation(activation)(x)
x = Reshape((row, col, classes))(x)
elif final_pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif final_pooling == 'max':
x = GlobalMaxPooling2D()(x)
model = Model(inputs=img_input, outputs=x)
return model
def ResNet18(input_shape, classes):
"""ResNet with 18 layers and v2 residual units
"""
return ResNet(input_shape, classes, basic_block, repetitions=[2, 2, 2, 2])
def ResNet34(input_shape, classes):
"""ResNet with 34 layers and v2 residual units
"""
return ResNet(input_shape, classes, basic_block, repetitions=[3, 4, 6, 3])
def ResNet50(input_shape, classes):
"""ResNet with 50 layers and v2 residual units
"""
return ResNet(input_shape, classes, bottleneck, repetitions=[3, 4, 6, 3])
def ResNet101(input_shape, classes):
"""ResNet with 101 layers and v2 residual units
"""
return ResNet(input_shape, classes, bottleneck, repetitions=[3, 4, 23, 3])
def ResNet152(input_shape, classes):
"""ResNet with 152 layers and v2 residual units
"""
return ResNet(input_shape, classes, bottleneck, repetitions=[3, 8, 36, 3])
| keras-contrib/keras_contrib/applications/resnet.py/0 | {
"file_path": "keras-contrib/keras_contrib/applications/resnet.py",
"repo_id": "keras-contrib",
"token_count": 8914
} | 10 |
from __future__ import print_function
import numpy
from keras.utils.data_utils import get_file
from zipfile import ZipFile
from collections import Counter
from keras.preprocessing.sequence import pad_sequences
def load_data(path='conll2000.zip', min_freq=2):
path = get_file(path,
origin='https://raw.githubusercontent.com/nltk'
'/nltk_data/gh-pages/packages/corpora/conll2000.zip')
print(path)
archive = ZipFile(path, 'r')
train = _parse_data(archive.open('conll2000/train.txt'))
test = _parse_data(archive.open('conll2000/test.txt'))
archive.close()
word_counts = Counter(row[0].lower() for sample in train for row in sample)
vocab = ['<pad>', '<unk>']
vocab += [w for w, f in iter(word_counts.items()) if f >= min_freq]
# in alphabetic order
pos_tags = sorted(list(set(row[1] for sample in train + test for row in sample)))
# in alphabetic order
chunk_tags = sorted(list(set(row[2] for sample in train + test for row in sample)))
train = _process_data(train, vocab, pos_tags, chunk_tags)
test = _process_data(test, vocab, pos_tags, chunk_tags)
return train, test, (vocab, pos_tags, chunk_tags)
def _parse_data(fh):
string = fh.read()
data = []
for sample in string.decode().strip().split('\n\n'):
data.append([row.split() for row in sample.split('\n')])
fh.close()
return data
def _process_data(data, vocab, pos_tags, chunk_tags, maxlen=None, onehot=False):
if maxlen is None:
maxlen = max(len(s) for s in data)
word2idx = dict((w, i) for i, w in enumerate(vocab))
# set to <unk> (index 1) if not in vocab
x = [[word2idx.get(w[0].lower(), 1) for w in s] for s in data]
y_pos = [[pos_tags.index(w[1]) for w in s] for s in data]
y_chunk = [[chunk_tags.index(w[2]) for w in s] for s in data]
x = pad_sequences(x, maxlen) # left padding
# lef padded with -1. Indeed, any integer works as it will be masked
y_pos = pad_sequences(y_pos, maxlen, value=-1)
y_chunk = pad_sequences(y_chunk, maxlen, value=-1)
if onehot:
y_pos = numpy.eye(len(pos_tags), dtype='float32')[y]
y_chunk = numpy.eye(len(chunk_tags), dtype='float32')[y]
else:
y_pos = numpy.expand_dims(y_pos, 2)
y_chunk = numpy.expand_dims(y_chunk, 2)
return x, y_pos, y_chunk
| keras-contrib/keras_contrib/datasets/conll2000.py/0 | {
"file_path": "keras-contrib/keras_contrib/datasets/conll2000.py",
"repo_id": "keras-contrib",
"token_count": 1006
} | 11 |
import pytest
import numpy as np
from keras_contrib import callbacks
from keras.models import Sequential
from keras.layers import Dense
from numpy.testing import assert_allclose
def build_model():
model = Sequential([
Dense(2, activation='relu', input_shape=(2,)),
Dense(1, activation='sigmoid')
])
return model
def cycle(i):
return np.floor(1 + i / (2 * 2000))
def x(i):
return np.abs(i / 2000. - 2 * cycle(i) + 1)
def test_cyclic_lr_triangular_1():
X = np.random.rand(4000, 2)
y = np.random.rand(4000).reshape(-1, 1)
clr = callbacks.CyclicLR()
model = build_model()
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit(X, y, batch_size=1, epochs=1, verbose=0, callbacks=[clr])
r = np.concatenate([
np.linspace(0.001, 0.006, num=2001)[1:],
np.linspace(0.006, 0.001, num=2001)[1:]
])
assert_allclose(clr.history['lr'], r)
def test_cyclic_lr_triangular_2():
X = np.random.rand(4000, 2)
y = np.random.rand(4000).reshape(-1, 1)
clr = callbacks.CyclicLR(mode='triangular2')
model = build_model()
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit(X, y, batch_size=1, epochs=2, verbose=0, callbacks=[clr])
r = np.concatenate([
np.linspace(0.001, 0.006, num=2001)[1:],
np.linspace(0.006, 0.001, num=2001)[1:],
np.linspace(0.001, 0.0035, num=2001)[1:],
np.linspace(0.0035, 0.001, num=2001)[1:],
])
assert_allclose(clr.history['lr'], r)
def test_cyclic_lr_exp_range():
X = np.random.rand(4000, 2)
y = np.random.rand(4000).reshape(-1, 1)
clr = callbacks.CyclicLR(mode='exp_range', gamma=0.9996)
model = build_model()
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit(X, y, batch_size=1, epochs=2, verbose=0, callbacks=[clr])
exp_range = []
def scale_fn(i):
return 0.001 + (0.006 - 0.001) * np.maximum(0, (1 - x(i))) * (0.9996 ** i)
for i in range(8000):
exp_range.append(scale_fn(i + 1))
assert_allclose(clr.history['lr'], np.array(exp_range))
def test_cyclic_lr_custom_fn_test():
X = np.random.rand(4000, 2)
y = np.random.rand(4000).reshape(-1, 1)
def clr_fn(x):
return 1 / (5 ** (x * 0.0001))
clr = callbacks.CyclicLR(scale_fn=clr_fn, scale_mode='iterations')
model = build_model()
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit(X, y, batch_size=1, epochs=2, verbose=0, callbacks=[clr])
custom_range = []
def scale_fn(i):
c = 0.006 - 0.001
return 0.001 + c * np.maximum(0, (1 - x(i))) * 1 / (5 ** (i * 0.0001))
for i in range(8000):
custom_range.append(scale_fn(i + 1))
assert_allclose(clr.history['lr'], np.array(custom_range))
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/callbacks/cyclical_learning_rate_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/callbacks/cyclical_learning_rate_test.py",
"repo_id": "keras-contrib",
"token_count": 1447
} | 12 |
import pytest
import numpy as np
import os
from numpy.testing import assert_allclose
from keras.layers import Embedding
from keras.models import Sequential
from keras.models import load_model
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_accuracy
from keras_contrib.metrics import crf_marginal_accuracy
from keras_contrib.metrics import crf_viterbi_accuracy
from keras_contrib.layers import CRF
from keras_contrib.utils.test_utils import is_tf_keras
nb_samples, timesteps, embedding_dim, output_dim = 2, 10, 4, 5
embedding_num = 12
MODEL_PERSISTENCE_PATH = './test_saving_crf_model.h5'
@pytest.mark.xfail(is_tf_keras,
reason='TODO: fix it. Using K.tf which is bad.',
strict=True)
def test_CRF():
# data
x = np.random.randint(1, embedding_num, nb_samples * timesteps)
x = x.reshape((nb_samples, timesteps))
x[0, -4:] = 0 # right padding
x[1, :5] = 0 # left padding
y = np.random.randint(0, output_dim, nb_samples * timesteps)
y = y.reshape((nb_samples, timesteps))
y_onehot = np.eye(output_dim)[y]
y = np.expand_dims(y, 2) # .astype('float32')
# test with no masking, onehot, fix length
model = Sequential()
model.add(Embedding(embedding_num, embedding_dim, input_length=timesteps))
crf = CRF(output_dim)
model.add(crf)
model.compile(optimizer='rmsprop', loss=crf_loss)
model.fit(x, y_onehot, epochs=1, batch_size=10)
model.save(MODEL_PERSISTENCE_PATH)
load_model(MODEL_PERSISTENCE_PATH,
custom_objects={'CRF': CRF,
'crf_loss': crf_loss,
'crf_viterbi_accuracy': crf_viterbi_accuracy})
# test with masking, sparse target, dynamic length;
# test crf_viterbi_accuracy, crf_marginal_accuracy
model = Sequential()
model.add(Embedding(embedding_num, embedding_dim, mask_zero=True))
crf = CRF(output_dim, sparse_target=True)
model.add(crf)
model.compile(optimizer='rmsprop', loss=crf_loss,
metrics=[crf_viterbi_accuracy, crf_marginal_accuracy])
model.fit(x, y, epochs=1, batch_size=10)
# check mask
y_pred = model.predict(x).argmax(-1)
assert (y_pred[0, -4:] == 0).all() # right padding
assert (y_pred[1, :5] == 0).all() # left padding
# test viterbi_acc
_, v_acc, _ = model.evaluate(x, y)
np_acc = (y_pred[x > 0] == y[:, :, 0][x > 0]).astype('float32').mean()
print(v_acc, np_acc)
assert np.abs(v_acc - np_acc) < 1e-4
# test config
model.get_config()
# test marginal learn mode, fix length
model = Sequential()
model.add(Embedding(embedding_num, embedding_dim, input_length=timesteps,
mask_zero=True))
crf = CRF(output_dim, learn_mode='marginal', unroll=True)
model.add(crf)
model.compile(optimizer='rmsprop', loss=crf_loss)
model.fit(x, y_onehot, epochs=1, batch_size=10)
# check mask (marginal output)
y_pred = model.predict(x)
assert_allclose(y_pred[0, -4:], 1. / output_dim, atol=1e-6)
assert_allclose(y_pred[1, :5], 1. / output_dim, atol=1e-6)
# test marginal learn mode, but with Viterbi test_mode
model = Sequential()
model.add(Embedding(embedding_num, embedding_dim, input_length=timesteps,
mask_zero=True))
crf = CRF(output_dim, learn_mode='marginal', test_mode='viterbi')
model.add(crf)
model.compile(optimizer='rmsprop', loss=crf_loss, metrics=[crf_accuracy])
model.fit(x, y_onehot, epochs=1, batch_size=10)
y_pred = model.predict(x)
# check y_pred is onehot vector (output from 'viterbi' test mode)
assert_allclose(np.eye(output_dim)[y_pred.argmax(-1)], y_pred, atol=1e-6)
try:
os.remove(MODEL_PERSISTENCE_PATH)
except OSError:
pass
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/test_crf.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/test_crf.py",
"repo_id": "keras-contrib",
"token_count": 1757
} | 13 |
# Benchmark the performance of torch custom training loop
This directory contains benchmarks to compare the performance of a Keras model
and a equivalent Torch model while using the same Torch custom training loop.
The benchmark purpose is to understand the performance diff resulting from the
modeling API choice (Keras or Torch).
To run the benchmark, use the command below and change to your target:
```shell
python3 -m benchmarks.torch_ctl_benchmark.conv_model_benchmark
``` | keras-core/benchmarks/torch_ctl_benchmark/README.md/0 | {
"file_path": "keras-core/benchmarks/torch_ctl_benchmark/README.md",
"repo_id": "keras-core",
"token_count": 112
} | 14 |
"""
Title: GPT text generation from scratch with KerasNLP
Author: [Jesse Chan](https://github.com/jessechancy)
Date created: 2022/07/25
Last modified: 2022/07/25
Description: Using KerasNLP to train a mini-GPT model for text generation.
Accelerator: GPU
"""
"""
## Introduction
In this example, we will use KerasNLP to build a scaled down Generative
Pre-Trained (GPT) model. GPT is a Transformer-based model that allows you to generate
sophisticated text from a prompt.
We will train the model on the [simplebooks-92](https://arxiv.org/abs/1911.12391) corpus,
which is a dataset made from several novels. It is a good dataset for this example since
it has a small vocabulary and high word frequency, which is beneficial when training a
model with few parameters.
This example combines concepts from
[Text generation with a miniature GPT](https://keras.io/examples/generative/text_generation_with_miniature_gpt/)
with KerasNLP abstractions. We will demonstrate how KerasNLP tokenization, layers and
metrics simplify the training
process, and then show how to generate output text using the KerasNLP sampling utilities.
Note: If you are running this example on a Colab,
make sure to enable GPU runtime for faster training.
This example requires KerasNLP. You can install it via the following command:
`pip install keras-nlp`
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "jax"
import keras_nlp
import keras_core as keras
import tensorflow.data as tf_data
import tensorflow.strings as tf_strings
"""
## Settings & hyperparameters
"""
# Data
BATCH_SIZE = 64
SEQ_LEN = 128
MIN_TRAINING_SEQ_LEN = 450
# Model
EMBED_DIM = 256
FEED_FORWARD_DIM = 256
NUM_HEADS = 3
NUM_LAYERS = 2
VOCAB_SIZE = 5000 # Limits parameters in model.
# Training
EPOCHS = 6
# Inference
NUM_TOKENS_TO_GENERATE = 80
"""
## Load the data
Now, let's download the dataset! The SimpleBooks dataset consists of 1,573 Gutenberg books, and has
one of the smallest vocabulary size to word-level tokens ratio. It has a vocabulary size of ~98k,
a third of WikiText-103's, with around the same number of tokens (~100M). This makes it easy to fit a small model.
"""
keras.utils.get_file(
origin="https://dldata-public.s3.us-east-2.amazonaws.com/simplebooks.zip",
extract=True,
)
dir = os.path.expanduser("~/.keras/datasets/simplebooks/")
# Load simplebooks-92 train set and filter out short lines.
raw_train_ds = (
tf_data.TextLineDataset(dir + "simplebooks-92-raw/train.txt")
.filter(lambda x: tf_strings.length(x) > MIN_TRAINING_SEQ_LEN)
.batch(BATCH_SIZE)
.shuffle(buffer_size=256)
)
# Load simplebooks-92 validation set and filter out short lines.
raw_val_ds = (
tf_data.TextLineDataset(dir + "simplebooks-92-raw/valid.txt")
.filter(lambda x: tf_strings.length(x) > MIN_TRAINING_SEQ_LEN)
.batch(BATCH_SIZE)
)
"""
## Train the tokenizer
We train the tokenizer from the training dataset for a vocabulary size of `VOCAB_SIZE`,
which is a tuned hyperparameter. We want to limit the vocabulary as much as possible, as
we will see later on
that it has a large effect on the number of model parameters. We also don't want to include
*too few* vocabulary terms, or there would be too many out-of-vocabulary (OOV) sub-words. In
addition, three tokens are reserved in the vocabulary:
- `"[PAD]"` for padding sequences to `SEQ_LEN`. This token has index 0 in both
`reserved_tokens` and `vocab`, since `WordPieceTokenizer` (and other layers) consider
`0`/`vocab[0]` as the default padding.
- `"[UNK]"` for OOV sub-words, which should match the default `oov_token="[UNK]"` in
`WordPieceTokenizer`.
- `"[BOS]"` stands for beginning of sentence, but here technically it is a token
representing the beginning of each line of training data.
"""
# Train tokenizer vocabulary
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(
raw_train_ds,
vocabulary_size=VOCAB_SIZE,
lowercase=True,
reserved_tokens=["[PAD]", "[UNK]", "[BOS]"],
)
"""
## Load tokenizer
We use the vocabulary data to initialize
`keras_nlp.tokenizers.WordPieceTokenizer`. WordPieceTokenizer is an efficient
implementation of the WordPiece algorithm used by BERT and other models. It will strip,
lower-case and do other irreversible preprocessing operations.
"""
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab,
sequence_length=SEQ_LEN,
lowercase=True,
)
"""
## Tokenize data
We preprocess the dataset by tokenizing and splitting it into `features` and `labels`.
"""
# packer adds a start token
start_packer = keras_nlp.layers.StartEndPacker(
sequence_length=SEQ_LEN,
start_value=tokenizer.token_to_id("[BOS]"),
)
def preprocess(inputs):
outputs = tokenizer(inputs)
features = start_packer(outputs)
labels = outputs
return features, labels
# Tokenize and split into train and label sequences.
train_ds = raw_train_ds.map(
preprocess, num_parallel_calls=tf_data.AUTOTUNE
).prefetch(tf_data.AUTOTUNE)
val_ds = raw_val_ds.map(
preprocess, num_parallel_calls=tf_data.AUTOTUNE
).prefetch(tf_data.AUTOTUNE)
"""
## Build the model
We create our scaled down GPT model with the following layers:
- One `keras_nlp.layers.TokenAndPositionEmbedding` layer, which combines the embedding
for the token and its position.
- Multiple `keras_nlp.layers.TransformerDecoder` layers, with the default causal masking.
The layer has no cross-attention when run with decoder sequence only.
- One final dense linear layer
"""
inputs = keras.layers.Input(shape=(None,), dtype="int32")
# Embedding.
embedding_layer = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=VOCAB_SIZE,
sequence_length=SEQ_LEN,
embedding_dim=EMBED_DIM,
mask_zero=True,
)
x = embedding_layer(inputs)
# Transformer decoders.
for _ in range(NUM_LAYERS):
decoder_layer = keras_nlp.layers.TransformerDecoder(
num_heads=NUM_HEADS,
intermediate_dim=FEED_FORWARD_DIM,
)
x = decoder_layer(x) # Giving one argument only skips cross-attention.
# Output.
outputs = keras.layers.Dense(VOCAB_SIZE)(x)
model = keras.Model(inputs=inputs, outputs=outputs)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
perplexity = keras_nlp.metrics.Perplexity(from_logits=True, mask_token_id=0)
model.compile(optimizer="adam", loss=loss_fn, metrics=[perplexity])
"""
Let's take a look at our model summary - a large majority of the
parameters are in the `token_and_position_embedding` and the output `dense` layer!
This means that the vocabulary size (`VOCAB_SIZE`) has a large effect on the size of the model,
while the number of Transformer decoder layers (`NUM_LAYERS`) doesn't affect it as much.
"""
model.summary()
"""
## Training
Now that we have our model, let's train it with the `fit()` method.
"""
model.fit(train_ds, validation_data=val_ds, verbose=2, epochs=EPOCHS)
"""
## Inference
With our trained model, we can test it out to gauge its performance. To do this
we can seed our model with an input sequence starting with the `"[BOS]"` token,
and progressively sample the model by making predictions for each subsequent
token in a loop.
To start lets build a prompt with the same shape as our model inputs, containing
only the `"[BOS]"` token.
"""
# The "packer" layers adds the [BOS] token for us.
prompt_tokens = start_packer(tokenizer([""]))
prompt_tokens
"""
We will use the `keras_nlp.samplers` module for inference, which requires a
callback function wrapping the model we just trained. This wrapper calls
the model and returns the logit predictions for the current token we are
generating.
Note: There are two pieces of more advanced functionality available when
defining your callback. The first is the ability to take in a `cache` of states
computed in previous generation steps, which can be used to speed up generation.
The second is the ability to output the final dense "hidden state" of each
generated token. This is used by `keras_nlp.samplers.ContrastiveSampler`, which
avoids repetition by penalizing repeated hidden states. Both are optional, and
we will ignore them for now.
"""
def next(prompt, cache, index):
logits = model(prompt)[:, index - 1, :]
# Ignore hidden states for now; only needed for contrastive search.
hidden_states = None
return logits, hidden_states, cache
"""
Creating the wrapper function is the most complex part of using these functions. Now that
it's done, let's test out the different utilities, starting with greedy search.
"""
"""
### Greedy search
We greedily pick the most probable token at each timestep. In other words, we get the
argmax of the model output.
"""
sampler = keras_nlp.samplers.GreedySampler()
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1, # Start sampling immediately after the [BOS] token.
)
txt = tokenizer.detokenize(output_tokens)
print(f"Greedy search generated text: \n{txt}\n")
"""
As you can see, greedy search starts out making some sense, but quickly starts repeating
itself. This is a common problem with text generation that can be fixed by some of the
probabilistic text generation utilities shown later on!
"""
"""
### Beam search
At a high-level, beam search keeps track of the `num_beams` most probable sequences at
each timestep, and predicts the best next token from all sequences. It is an improvement
over greedy search since it stores more possibilities. However, it is less efficient than
greedy search since it has to compute and store multiple potential sequences.
**Note:** beam search with `num_beams=1` is identical to greedy search.
"""
sampler = keras_nlp.samplers.BeamSampler(num_beams=10)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Beam search generated text: \n{txt}\n")
"""
Similar to greedy search, beam search quickly starts repeating itself, since it is still
a deterministic method.
"""
"""
### Random search
Random search is our first probabilistic method. At each time step, it samples the next
token using the softmax probabilities provided by the model.
"""
sampler = keras_nlp.samplers.RandomSampler()
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Random search generated text: \n{txt}\n")
"""
Voilà, no repetitions! However, with random search, we may see some nonsensical words
appearing since any word in the vocabulary has a chance of appearing with this sampling
method. This is fixed by our next search utility, top-k search.
"""
"""
### Top-K search
Similar to random search, we sample the next token from the probability distribution
provided by the model. The only difference is that here, we select out the top `k` most
probable tokens, and distribute the probability mass over them before sampling. This way,
we won't be sampling from low probability tokens, and hence we would have less
nonsensical words!
"""
sampler = keras_nlp.samplers.TopKSampler(k=10)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-K search generated text: \n{txt}\n")
"""
### Top-P search
Even with the top-k search, there is something to improve upon. With top-k search, the
number `k` is fixed, which means it selects the same number of tokens for any probability
distribution. Consider two scenarios, one where the probability mass is concentrated over
2 words and another where the probability mass is evenly concentrated across 10. Should
we choose `k=2` or `k=10`? There is no one size that fits all `k` here.
This is where top-p search comes in! Instead of choosing a `k`, we choose a probability
`p` that we want the probabilities of the top tokens to sum up to. This way, we can
dynamically adjust the `k` based on the probability distribution. By setting `p=0.9`, if
90% of the probability mass is concentrated on the top 2 tokens, we can filter out the
top 2 tokens to sample from. If instead the 90% is distributed over 10 tokens, it will
similarly filter out the top 10 tokens to sample from.
"""
sampler = keras_nlp.samplers.TopPSampler(p=0.5)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-P search generated text: \n{txt}\n")
"""
### Using callbacks for text generation
We can also wrap the utilities in a callback, which allows you to print out a prediction
sequence for every epoch of the model! Here is an example of a callback for top-k search:
"""
class TopKTextGenerator(keras.callbacks.Callback):
"""A callback to generate text from a trained model using top-k."""
def __init__(self, k):
self.sampler = keras_nlp.samplers.TopKSampler(k)
def on_epoch_end(self, epoch, logs=None):
output_tokens = self.sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-K search generated text: \n{txt}\n")
text_generation_callback = TopKTextGenerator(k=10)
# Dummy training loop to demonstrate callback.
model.fit(
train_ds.take(1), verbose=2, epochs=2, callbacks=[text_generation_callback]
)
"""
## Conclusion
To recap, in this example, we use KerasNLP layers to train a sub-word vocabulary,
tokenize training data, create a miniature GPT model, and perform inference with the
text generation library.
If you would like to understand how Transformers work, or learn more about training the
full GPT model, here are some further readings:
- Attention Is All You Need [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)
- GPT-3 Paper [Brown et al., 2020](https://arxiv.org/abs/2005.14165)
"""
| keras-core/examples/keras_io/generative/text_generation_gpt.py/0 | {
"file_path": "keras-core/examples/keras_io/generative/text_generation_gpt.py",
"repo_id": "keras-core",
"token_count": 4429
} | 15 |
"""
Title: Denoising Diffusion Implicit Models
Author: [András Béres](https://www.linkedin.com/in/andras-beres-789190210)
Date created: 2022/06/24
Last modified: 2022/06/24
Description: Generating images of flowers with denoising diffusion implicit models.
Accelerator: GPU
"""
"""
## Introduction
### What are diffusion models?
Recently, [denoising diffusion models](https://arxiv.org/abs/2006.11239), including
[score-based generative models](https://arxiv.org/abs/1907.05600), gained popularity as a
powerful class of generative models, that can [rival](https://arxiv.org/abs/2105.05233)
even [generative adversarial networks (GANs)](https://arxiv.org/abs/1406.2661) in image
synthesis quality. They tend to generate more diverse samples, while being stable to
train and easy to scale. Recent large diffusion models, such as
[DALL-E 2](https://openai.com/dall-e-2/) and [Imagen](https://imagen.research.google/),
have shown incredible text-to-image generation capability. One of their drawbacks is
however, that they are slower to sample from, because they require multiple forward passes
for generating an image.
Diffusion refers to the process of turning a structured signal (an image) into noise
step-by-step. By simulating diffusion, we can generate noisy images from our training
images, and can train a neural network to try to denoise them. Using the trained network
we can simulate the opposite of diffusion, reverse diffusion, which is the process of an
image emerging from noise.

One-sentence summary: **diffusion models are trained to denoise noisy images, and can
generate images by iteratively denoising pure noise.**
### Goal of this example
This code example intends to be a minimal but feature-complete (with a generation quality
metric) implementation of diffusion models, with modest compute requirements and
reasonable performance. My implementation choices and hyperparameter tuning were done
with these goals in mind.
Since currently the literature of diffusion models is
[mathematically quite complex](https://arxiv.org/abs/2206.00364)
with multiple theoretical frameworks
([score matching](https://arxiv.org/abs/1907.05600),
[differential equations](https://arxiv.org/abs/2011.13456),
[Markov chains](https://arxiv.org/abs/2006.11239)) and sometimes even
[conflicting notations (see Appendix C.2)](https://arxiv.org/abs/2010.02502),
it can be daunting trying to understand
them. My view of these models in this example will be that they learn to separate a
noisy image into its image and Gaussian noise components.
In this example I made effort to break down all long mathematical expressions into
digestible pieces and gave all variables explanatory names. I also included numerous
links to relevant literature to help interested readers dive deeper into the topic, in
the hope that this code example will become a good starting point for practitioners
learning about diffusion models.
In the following sections, we will implement a continuous time version of
[Denoising Diffusion Implicit Models (DDIMs)](https://arxiv.org/abs/2010.02502)
with deterministic sampling.
"""
"""
## Setup
"""
import math
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_core as keras
from keras_core import layers
from keras_core import ops
"""
## Hyperparameters
"""
# data
dataset_name = "oxford_flowers102"
dataset_repetitions = 5
num_epochs = 1 # train for at least 50 epochs for good results
image_size = 64
# KID = Kernel Inception Distance, see related section
kid_image_size = 75
kid_diffusion_steps = 5
plot_diffusion_steps = 20
# sampling
min_signal_rate = 0.02
max_signal_rate = 0.95
# architecture
embedding_dims = 32
embedding_max_frequency = 1000.0
widths = [32, 64, 96, 128]
block_depth = 2
# optimization
batch_size = 64
ema = 0.999
learning_rate = 1e-3
weight_decay = 1e-4
"""
## Data pipeline
We will use the
[Oxford Flowers 102](https://www.tensorflow.org/datasets/catalog/oxford_flowers102)
dataset for
generating images of flowers, which is a diverse natural dataset containing around 8,000
images. Unfortunately the official splits are imbalanced, as most of the images are
contained in the test split. We create new splits (80% train, 20% validation) using the
[Tensorflow Datasets slicing API](https://www.tensorflow.org/datasets/splits). We apply
center crops as preprocessing, and repeat the dataset multiple times (reason given in the
next section).
"""
def preprocess_image(data):
# center crop image
height = ops.shape(data["image"])[0]
width = ops.shape(data["image"])[1]
crop_size = ops.minimum(height, width)
image = tf.image.crop_to_bounding_box(
data["image"],
(height - crop_size) // 2,
(width - crop_size) // 2,
crop_size,
crop_size,
)
# resize and clip
# for image downsampling it is important to turn on antialiasing
image = tf.image.resize(
image, size=[image_size, image_size], antialias=True
)
return ops.clip(image / 255.0, 0.0, 1.0)
def prepare_dataset(split):
# the validation dataset is shuffled as well, because data order matters
# for the KID estimation
return (
tfds.load(dataset_name, split=split, shuffle_files=True)
.map(preprocess_image, num_parallel_calls=tf.data.AUTOTUNE)
.cache()
.repeat(dataset_repetitions)
.shuffle(10 * batch_size)
.batch(batch_size, drop_remainder=True)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# load dataset
train_dataset = prepare_dataset("train[:80%]+validation[:80%]+test[:80%]")
val_dataset = prepare_dataset("train[80%:]+validation[80%:]+test[80%:]")
"""
## Kernel inception distance
[Kernel Inception Distance (KID)](https://arxiv.org/abs/1801.01401) is an image quality
metric which was proposed as a replacement for the popular
[Frechet Inception Distance (FID)](https://arxiv.org/abs/1706.08500).
I prefer KID to FID because it is simpler to
implement, can be estimated per-batch, and is computationally lighter. More details
[here](https://keras.io/examples/generative/gan_ada/#kernel-inception-distance).
In this example, the images are evaluated at the minimal possible resolution of the
Inception network (75x75 instead of 299x299), and the metric is only measured on the
validation set for computational efficiency. We also limit the number of sampling steps
at evaluation to 5 for the same reason.
Since the dataset is relatively small, we go over the train and validation splits
multiple times per epoch, because the KID estimation is noisy and compute-intensive, so
we want to evaluate only after many iterations, but for many iterations.
"""
@keras.saving.register_keras_serializable()
class KID(keras.metrics.Metric):
def __init__(self, name, **kwargs):
super().__init__(name=name, **kwargs)
# KID is estimated per batch and is averaged across batches
self.kid_tracker = keras.metrics.Mean(name="kid_tracker")
# a pretrained InceptionV3 is used without its classification layer
# transform the pixel values to the 0-255 range, then use the same
# preprocessing as during pretraining
self.encoder = keras.Sequential(
[
keras.Input(shape=(image_size, image_size, 3)),
layers.Rescaling(255.0),
layers.Resizing(height=kid_image_size, width=kid_image_size),
layers.Lambda(keras.applications.inception_v3.preprocess_input),
keras.applications.InceptionV3(
include_top=False,
input_shape=(kid_image_size, kid_image_size, 3),
weights="imagenet",
),
layers.GlobalAveragePooling2D(),
],
name="inception_encoder",
)
def polynomial_kernel(self, features_1, features_2):
feature_dimensions = ops.cast(ops.shape(features_1)[1], dtype="float32")
return (
features_1 @ ops.transpose(features_2) / feature_dimensions + 1.0
) ** 3.0
def update_state(self, real_images, generated_images, sample_weight=None):
real_features = self.encoder(real_images, training=False)
generated_features = self.encoder(generated_images, training=False)
# compute polynomial kernels using the two sets of features
kernel_real = self.polynomial_kernel(real_features, real_features)
kernel_generated = self.polynomial_kernel(
generated_features, generated_features
)
kernel_cross = self.polynomial_kernel(real_features, generated_features)
# estimate the squared maximum mean discrepancy using the average kernel values
batch_size = real_features.shape[0]
batch_size_f = ops.cast(batch_size, dtype="float32")
mean_kernel_real = ops.sum(
kernel_real * (1.0 - ops.eye(batch_size))
) / (batch_size_f * (batch_size_f - 1.0))
mean_kernel_generated = ops.sum(
kernel_generated * (1.0 - ops.eye(batch_size))
) / (batch_size_f * (batch_size_f - 1.0))
mean_kernel_cross = ops.mean(kernel_cross)
kid = mean_kernel_real + mean_kernel_generated - 2.0 * mean_kernel_cross
# update the average KID estimate
self.kid_tracker.update_state(kid)
def result(self):
return self.kid_tracker.result()
def reset_state(self):
self.kid_tracker.reset_state()
"""
## Network architecture
Here we specify the architecture of the neural network that we will use for denoising. We
build a [U-Net](https://arxiv.org/abs/1505.04597) with identical input and output
dimensions. U-Net is a popular semantic segmentation architecture, whose main idea is
that it progressively downsamples and then upsamples its input image, and adds skip
connections between layers having the same resolution. These help with gradient flow and
avoid introducing a representation bottleneck, unlike usual
[autoencoders](https://www.deeplearningbook.org/contents/autoencoders.html). Based on
this, one can view
[diffusion models as denoising autoencoders](https://benanne.github.io/2022/01/31/diffusion.html)
without a bottleneck.
The network takes two inputs, the noisy images and the variances of their noise
components. The latter is required since denoising a signal requires different operations
at different levels of noise. We transform the noise variances using sinusoidal
embeddings, similarly to positional encodings used both in
[transformers](https://arxiv.org/abs/1706.03762) and
[NeRF](https://arxiv.org/abs/2003.08934). This helps the network to be
[highly sensitive](https://arxiv.org/abs/2006.10739) to the noise level, which is
crucial for good performance. We implement sinusoidal embeddings using a
[Lambda layer](https://keras.io/api/layers/core_layers/lambda/).
Some other considerations:
* We build the network using the
[Keras Functional API](https://keras.io/guides/functional_api/), and use
[closures](https://twitter.com/fchollet/status/1441927912836321280) to build blocks of
layers in a consistent style.
* [Diffusion models](https://arxiv.org/abs/2006.11239) embed the index of the timestep of
the diffusion process instead of the noise variance, while
[score-based models (Table 1)](https://arxiv.org/abs/2206.00364)
usually use some function of the noise level. I
prefer the latter so that we can change the sampling schedule at inference time, without
retraining the network.
* [Diffusion models](https://arxiv.org/abs/2006.11239) input the embedding to each
convolution block separately. We only input it at the start of the network for
simplicity, which in my experience barely decreases performance, because the skip and
residual connections help the information propagate through the network properly.
* In the literature it is common to use
[attention layers](https://keras.io/api/layers/attention_layers/multi_head_attention/)
at lower resolutions for better global coherence. I omitted it for simplicity.
* We disable the learnable center and scale parameters of the batch normalization layers,
since the following convolution layers make them redundant.
* We initialize the last convolution's kernel to all zeros as a good practice, making the
network predict only zeros after initialization, which is the mean of its targets. This
will improve behaviour at the start of training and make the mean squared error loss
start at exactly 1.
"""
@keras.saving.register_keras_serializable()
def sinusoidal_embedding(x):
embedding_min_frequency = 1.0
frequencies = ops.exp(
ops.linspace(
ops.log(embedding_min_frequency),
ops.log(embedding_max_frequency),
embedding_dims // 2,
)
)
angular_speeds = ops.cast(2.0 * math.pi * frequencies, "float32")
embeddings = ops.concatenate(
[ops.sin(angular_speeds * x), ops.cos(angular_speeds * x)], axis=3
)
return embeddings
def ResidualBlock(width):
def apply(x):
input_width = x.shape[3]
if input_width == width:
residual = x
else:
residual = layers.Conv2D(width, kernel_size=1)(x)
x = layers.BatchNormalization(center=False, scale=False)(x)
x = layers.Conv2D(
width, kernel_size=3, padding="same", activation="swish"
)(x)
x = layers.Conv2D(width, kernel_size=3, padding="same")(x)
x = layers.Add()([x, residual])
return x
return apply
def DownBlock(width, block_depth):
def apply(x):
x, skips = x
for _ in range(block_depth):
x = ResidualBlock(width)(x)
skips.append(x)
x = layers.AveragePooling2D(pool_size=2)(x)
return x
return apply
def UpBlock(width, block_depth):
def apply(x):
x, skips = x
x = layers.UpSampling2D(size=2, interpolation="bilinear")(x)
for _ in range(block_depth):
x = layers.Concatenate()([x, skips.pop()])
x = ResidualBlock(width)(x)
return x
return apply
def get_network(image_size, widths, block_depth):
noisy_images = keras.Input(shape=(image_size, image_size, 3))
noise_variances = keras.Input(shape=(1, 1, 1))
e = layers.Lambda(sinusoidal_embedding, output_shape=(1, 1, 32))(
noise_variances
)
e = layers.UpSampling2D(size=image_size, interpolation="nearest")(e)
x = layers.Conv2D(widths[0], kernel_size=1)(noisy_images)
x = layers.Concatenate()([x, e])
skips = []
for width in widths[:-1]:
x = DownBlock(width, block_depth)([x, skips])
for _ in range(block_depth):
x = ResidualBlock(widths[-1])(x)
for width in reversed(widths[:-1]):
x = UpBlock(width, block_depth)([x, skips])
x = layers.Conv2D(3, kernel_size=1, kernel_initializer="zeros")(x)
return keras.Model([noisy_images, noise_variances], x, name="residual_unet")
"""
This showcases the power of the Functional API. Note how we built a relatively complex
U-Net with skip connections, residual blocks, multiple inputs, and sinusoidal embeddings
in 80 lines of code!
"""
"""
## Diffusion model
### Diffusion schedule
Let us say, that a diffusion process starts at time = 0, and ends at time = 1. This
variable will be called diffusion time, and can be either discrete (common in diffusion
models) or continuous (common in score-based models). I choose the latter, so that the
number of sampling steps can be changed at inference time.
We need to have a function that tells us at each point in the diffusion process the noise
levels and signal levels of the noisy image corresponding to the actual diffusion time.
This will be called the diffusion schedule (see `diffusion_schedule()`).
This schedule outputs two quantities: the `noise_rate` and the `signal_rate`
(corresponding to sqrt(1 - alpha) and sqrt(alpha) in the DDIM paper, respectively). We
generate the noisy image by weighting the random noise and the training image by their
corresponding rates and adding them together.
Since the (standard normal) random noises and the (normalized) images both have zero mean
and unit variance, the noise rate and signal rate can be interpreted as the standard
deviation of their components in the noisy image, while the squares of their rates can be
interpreted as their variance (or their power in the signal processing sense). The rates
will always be set so that their squared sum is 1, meaning that the noisy images will
always have unit variance, just like its unscaled components.
We will use a simplified, continuous version of the
[cosine schedule (Section 3.2)](https://arxiv.org/abs/2102.09672),
that is quite commonly used in the literature.
This schedule is symmetric, slow towards the start and end of the diffusion process, and
it also has a nice geometric interpretation, using the
[trigonometric properties of the unit circle](https://en.wikipedia.org/wiki/Unit_circle#/media/File:Circle-trig6.svg):

### Training process
The training procedure (see `train_step()` and `denoise()`) of denoising diffusion models
is the following: we sample random diffusion times uniformly, and mix the training images
with random gaussian noises at rates corresponding to the diffusion times. Then, we train
the model to separate the noisy image to its two components.
Usually, the neural network is trained to predict the unscaled noise component, from
which the predicted image component can be calculated using the signal and noise rates.
Pixelwise
[mean squared error](https://keras.io/api/losses/regression_losses/#mean_squared_error-function) should
be used theoretically, however I recommend using
[mean absolute error](https://keras.io/api/losses/regression_losses/#mean_absolute_error-function)
instead (similarly to
[this](https://github.com/lucidrains/denoising-diffusion-pytorch/blob/master/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L371)
implementation), which produces better results on this dataset.
### Sampling (reverse diffusion)
When sampling (see `reverse_diffusion()`), at each step we take the previous estimate of
the noisy image and separate it into image and noise using our network. Then we recombine
these components using the signal and noise rate of the following step.
Though a similar view is shown in
[Equation 12 of DDIMs](https://arxiv.org/abs/2010.02502), I believe the above explanation
of the sampling equation is not widely known.
This example only implements the deterministic sampling procedure from DDIM, which
corresponds to *eta = 0* in the paper. One can also use stochastic sampling (in which
case the model becomes a
[Denoising Diffusion Probabilistic Model (DDPM)](https://arxiv.org/abs/2006.11239)),
where a part of the predicted noise is
replaced with the same or larger amount of random noise
([see Equation 16 and below](https://arxiv.org/abs/2010.02502)).
Stochastic sampling can be used without retraining the network (since both models are
trained the same way), and it can improve sample quality, while on the other hand
requiring more sampling steps usually.
"""
@keras.saving.register_keras_serializable()
class DiffusionModel(keras.Model):
def __init__(self, image_size, widths, block_depth):
super().__init__()
self.normalizer = layers.Normalization()
self.network = get_network(image_size, widths, block_depth)
self.ema_network = keras.models.clone_model(self.network)
def compile(self, **kwargs):
super().compile(**kwargs)
self.noise_loss_tracker = keras.metrics.Mean(name="n_loss")
self.image_loss_tracker = keras.metrics.Mean(name="i_loss")
self.kid = KID(name="kid")
@property
def metrics(self):
return [self.noise_loss_tracker, self.image_loss_tracker, self.kid]
def denormalize(self, images):
# convert the pixel values back to 0-1 range
images = self.normalizer.mean + images * self.normalizer.variance**0.5
return ops.clip(images, 0.0, 1.0)
def diffusion_schedule(self, diffusion_times):
# diffusion times -> angles
start_angle = ops.cast(ops.arccos(max_signal_rate), "float32")
end_angle = ops.cast(ops.arccos(min_signal_rate), "float32")
diffusion_angles = start_angle + diffusion_times * (
end_angle - start_angle
)
# angles -> signal and noise rates
signal_rates = ops.cos(diffusion_angles)
noise_rates = ops.sin(diffusion_angles)
# note that their squared sum is always: sin^2(x) + cos^2(x) = 1
return noise_rates, signal_rates
def denoise(self, noisy_images, noise_rates, signal_rates, training):
# the exponential moving average weights are used at evaluation
if training:
network = self.network
else:
network = self.ema_network
# predict noise component and calculate the image component using it
pred_noises = network(
[noisy_images, noise_rates**2], training=training
)
pred_images = (noisy_images - noise_rates * pred_noises) / signal_rates
return pred_noises, pred_images
def reverse_diffusion(self, initial_noise, diffusion_steps):
# reverse diffusion = sampling
num_images = initial_noise.shape[0]
step_size = 1.0 / diffusion_steps
# important line:
# at the first sampling step, the "noisy image" is pure noise
# but its signal rate is assumed to be nonzero (min_signal_rate)
next_noisy_images = initial_noise
for step in range(diffusion_steps):
noisy_images = next_noisy_images
# separate the current noisy image to its components
diffusion_times = ops.ones((num_images, 1, 1, 1)) - step * step_size
noise_rates, signal_rates = self.diffusion_schedule(diffusion_times)
pred_noises, pred_images = self.denoise(
noisy_images, noise_rates, signal_rates, training=False
)
# network used in eval mode
# remix the predicted components using the next signal and noise rates
next_diffusion_times = diffusion_times - step_size
next_noise_rates, next_signal_rates = self.diffusion_schedule(
next_diffusion_times
)
next_noisy_images = (
next_signal_rates * pred_images + next_noise_rates * pred_noises
)
# this new noisy image will be used in the next step
return pred_images
def generate(self, num_images, diffusion_steps):
# noise -> images -> denormalized images
initial_noise = keras.random.normal(
shape=(num_images, image_size, image_size, 3)
)
generated_images = self.reverse_diffusion(
initial_noise, diffusion_steps
)
generated_images = self.denormalize(generated_images)
return generated_images
def train_step(self, images):
# normalize images to have standard deviation of 1, like the noises
images = self.normalizer(images, training=True)
noises = keras.random.normal(
shape=(batch_size, image_size, image_size, 3)
)
# sample uniform random diffusion times
diffusion_times = keras.random.uniform(
shape=(batch_size, 1, 1, 1), minval=0.0, maxval=1.0
)
noise_rates, signal_rates = self.diffusion_schedule(diffusion_times)
# mix the images with noises accordingly
noisy_images = signal_rates * images + noise_rates * noises
with tf.GradientTape() as tape:
# train the network to separate noisy images to their components
pred_noises, pred_images = self.denoise(
noisy_images, noise_rates, signal_rates, training=True
)
noise_loss = self.loss(noises, pred_noises) # used for training
image_loss = self.loss(images, pred_images) # only used as metric
gradients = tape.gradient(noise_loss, self.network.trainable_weights)
self.optimizer.apply_gradients(
zip(gradients, self.network.trainable_weights)
)
self.noise_loss_tracker.update_state(noise_loss)
self.image_loss_tracker.update_state(image_loss)
# track the exponential moving averages of weights
for weight, ema_weight in zip(
self.network.weights, self.ema_network.weights
):
ema_weight.assign(ema * ema_weight + (1 - ema) * weight)
# KID is not measured during the training phase for computational efficiency
return {m.name: m.result() for m in self.metrics[:-1]}
def test_step(self, images):
# normalize images to have standard deviation of 1, like the noises
images = self.normalizer(images, training=False)
noises = keras.random.normal(
shape=(batch_size, image_size, image_size, 3)
)
# sample uniform random diffusion times
diffusion_times = keras.random.uniform(
shape=(batch_size, 1, 1, 1), minval=0.0, maxval=1.0
)
noise_rates, signal_rates = self.diffusion_schedule(diffusion_times)
# mix the images with noises accordingly
noisy_images = signal_rates * images + noise_rates * noises
# use the network to separate noisy images to their components
pred_noises, pred_images = self.denoise(
noisy_images, noise_rates, signal_rates, training=False
)
noise_loss = self.loss(noises, pred_noises)
image_loss = self.loss(images, pred_images)
self.image_loss_tracker.update_state(image_loss)
self.noise_loss_tracker.update_state(noise_loss)
# measure KID between real and generated images
# this is computationally demanding, kid_diffusion_steps has to be small
images = self.denormalize(images)
generated_images = self.generate(
num_images=batch_size, diffusion_steps=kid_diffusion_steps
)
self.kid.update_state(images, generated_images)
return {m.name: m.result() for m in self.metrics}
def plot_images(self, epoch=None, logs=None, num_rows=3, num_cols=6):
# plot random generated images for visual evaluation of generation quality
generated_images = self.generate(
num_images=num_rows * num_cols,
diffusion_steps=plot_diffusion_steps,
)
plt.figure(figsize=(num_cols * 2.0, num_rows * 2.0))
for row in range(num_rows):
for col in range(num_cols):
index = row * num_cols + col
plt.subplot(num_rows, num_cols, index + 1)
plt.imshow(generated_images[index])
plt.axis("off")
plt.tight_layout()
plt.show()
plt.close()
"""
## Training
"""
# create and compile the model
model = DiffusionModel(image_size, widths, block_depth)
# below tensorflow 2.9:
# pip install tensorflow_addons
# import tensorflow_addons as tfa
# optimizer=tfa.optimizers.AdamW
model.compile(
optimizer=keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
),
loss=keras.losses.mean_absolute_error,
)
# pixelwise mean absolute error is used as loss
# save the best model based on the validation KID metric
checkpoint_path = "checkpoints/diffusion_model.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
save_weights_only=True,
monitor="val_kid",
mode="min",
save_best_only=True,
)
# calculate mean and variance of training dataset for normalization
model.normalizer.adapt(train_dataset)
# run training and plot generated images periodically
model.fit(
train_dataset,
epochs=num_epochs,
validation_data=val_dataset,
callbacks=[
keras.callbacks.LambdaCallback(on_epoch_end=model.plot_images),
checkpoint_callback,
],
)
"""
## Inference
"""
# load the best model and generate images
model.load_weights(checkpoint_path)
model.plot_images()
"""
## Results
By running the training for at least 50 epochs (takes 2 hours on a T4 GPU and 30 minutes
on an A100 GPU), one can get high quality image generations using this code example.
The evolution of a batch of images over a 80 epoch training (color artifacts are due to
GIF compression):

Images generated using between 1 and 20 sampling steps from the same initial noise:

Interpolation (spherical) between initial noise samples:

Deterministic sampling process (noisy images on top, predicted images on bottom, 40
steps):

Stochastic sampling process (noisy images on top, predicted images on bottom, 80 steps):

Trained model and demo available on HuggingFace:
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/denoising-diffusion-implicit-models) | [](https://huggingface.co/spaces/keras-io/denoising-diffusion-implicit-models) |
"""
"""
## Lessons learned
During preparation for this code example I have run numerous experiments using
[this repository](https://github.com/beresandras/clear-diffusion-keras).
In this section I list
the lessons learned and my recommendations in my subjective order of importance.
### Algorithmic tips
* **min. and max. signal rates**: I found the min. signal rate to be an important
hyperparameter. Setting it too low will make the generated images oversaturated, while
setting it too high will make them undersaturated. I recommend tuning it carefully. Also,
setting it to 0 will lead to a division by zero error. The max. signal rate can be set to
1, but I found that setting it lower slightly improves generation quality.
* **loss function**: While large models tend to use mean squared error (MSE) loss, I
recommend using mean absolute error (MAE) on this dataset. In my experience MSE loss
generates more diverse samples (it also seems to hallucinate more
[Section 3](https://arxiv.org/abs/2111.05826)), while MAE loss leads to smoother images.
I recommend trying both.
* **weight decay**: I did occasionally run into diverged trainings when scaling up the
model, and found that weight decay helps in avoiding instabilities at a low performance
cost. This is why I use
[AdamW](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/experimental/AdamW)
instead of [Adam](https://keras.io/api/optimizers/adam/) in this example.
* **exponential moving average of weights**: This helps to reduce the variance of the KID
metric, and helps in averaging out short-term changes during training.
* **image augmentations**: Though I did not use image augmentations in this example, in
my experience adding horizontal flips to the training increases generation performance,
while random crops do not. Since we use a supervised denoising loss, overfitting can be
an issue, so image augmentations might be important on small datasets. One should also be
careful not to use
[leaky augmentations](https://keras.io/examples/generative/gan_ada/#invertible-data-augmentation),
which can be done following
[this method (end of Section 5)](https://arxiv.org/abs/2206.00364) for instance.
* **data normalization**: In the literature the pixel values of images are usually
converted to the -1 to 1 range. For theoretical correctness, I normalize the images to
have zero mean and unit variance instead, exactly like the random noises.
* **noise level input**: I chose to input the noise variance to the network, as it is
symmetrical under our sampling schedule. One could also input the noise rate (similar
performance), the signal rate (lower performance), or even the
[log-signal-to-noise ratio (Appendix B.1)](https://arxiv.org/abs/2107.00630)
(did not try, as its range is highly
dependent on the min. and max. signal rates, and would require adjusting the min.
embedding frequency accordingly).
* **gradient clipping**: Using global gradient clipping with a value of 1 can help with
training stability for large models, but decreased performance significantly in my
experience.
* **residual connection downscaling**: For
[deeper models (Appendix B)](https://arxiv.org/abs/2205.11487), scaling the residual
connections with 1/sqrt(2) can be helpful, but did not help in my case.
* **learning rate**: For me, [Adam optimizer's](https://keras.io/api/optimizers/adam/)
default learning rate of 1e-3 worked very well, but lower learning rates are more common
in the [literature (Tables 11-13)](https://arxiv.org/abs/2105.05233).
### Architectural tips
* **sinusoidal embedding**: Using sinusoidal embeddings on the noise level input of the
network is crucial for good performance. I recommend setting the min. embedding frequency
to the reciprocal of the range of this input, and since we use the noise variance in this
example, it can be left always at 1. The max. embedding frequency controls the smallest
change in the noise variance that the network will be sensitive to, and the embedding
dimensions set the number of frequency components in the embedding. In my experience the
performance is not too sensitive to these values.
* **skip connections**: Using skip connections in the network architecture is absolutely
critical, without them the model will fail to learn to denoise at a good performance.
* **residual connections**: In my experience residual connections also significantly
improve performance, but this might be due to the fact that we only input the noise
level embeddings to the first layer of the network instead of to all of them.
* **normalization**: When scaling up the model, I did occasionally encounter diverged
trainings, using normalization layers helped to mitigate this issue. In the literature it
is common to use
[GroupNormalization](https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization)
(with 8 groups for example) or
[LayerNormalization](https://keras.io/api/layers/normalization_layers/layer_normalization/)
in the network, I however chose to use
[BatchNormalization](https://keras.io/api/layers/normalization_layers/batch_normalization/),
as it gave similar benefits in my experiments but was computationally lighter.
* **activations**: The choice of activation functions had a larger effect on generation
quality than I expected. In my experiments using non-monotonic activation functions
outperformed monotonic ones (such as
[ReLU](https://www.tensorflow.org/api_docs/python/tf/keras/activations/relu)), with
[Swish](https://www.tensorflow.org/api_docs/python/tf/keras/activations/swish) performing
the best (this is also what [Imagen uses, page 41](https://arxiv.org/abs/2205.11487)).
* **attention**: As mentioned earlier, it is common in the literature to use
[attention layers](https://keras.io/api/layers/attention_layers/multi_head_attention/) at low
resolutions for better global coherence. I omitted them for simplicity.
* **upsampling**:
[Bilinear and nearest neighbour upsampling](https://keras.io/api/layers/reshaping_layers/up_sampling2d/)
in the network performed similarly, however I did not try
[transposed convolutions](https://keras.io/api/layers/convolution_layers/convolution2d_transpose/).
For a similar list about GANs check out
[this Keras tutorial](https://keras.io/examples/generative/gan_ada/#gan-tips-and-tricks).
"""
"""
## What to try next?
If you would like to dive in deeper to the topic, I recommend checking out
[this repository](https://github.com/beresandras/clear-diffusion-keras) that I created in
preparation for this code example, which implements a wider range of features in a
similar style, such as:
* stochastic sampling
* second-order sampling based on the
[differential equation view of DDIMs (Equation 13)](https://arxiv.org/abs/2010.02502)
* more diffusion schedules
* more network output types: predicting image or
[velocity (Appendix D)](https://arxiv.org/abs/2202.00512) instead of noise
* more datasets
"""
"""
## Related works
* [Score-based generative modeling](https://yang-song.github.io/blog/2021/score/)
(blogpost)
* [What are diffusion models?](https://lilianweng.github.io/posts/2021-07-11-diffusion-models/)
(blogpost)
* [Annotated diffusion model](https://huggingface.co/blog/annotated-diffusion) (blogpost)
* [CVPR 2022 tutorial on diffusion models](https://cvpr2022-tutorial-diffusion-models.github.io/)
(slides available)
* [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364):
attempts unifying diffusion methods under a common framework
* High-level video overviews: [1](https://www.youtube.com/watch?v=yTAMrHVG1ew),
[2](https://www.youtube.com/watch?v=344w5h24-h8)
* Detailed technical videos: [1](https://www.youtube.com/watch?v=fbLgFrlTnGU),
[2](https://www.youtube.com/watch?v=W-O7AZNzbzQ)
* Score-based generative models: [NCSN](https://arxiv.org/abs/1907.05600),
[NCSN+](https://arxiv.org/abs/2006.09011), [NCSN++](https://arxiv.org/abs/2011.13456)
* Denoising diffusion models: [DDPM](https://arxiv.org/abs/2006.11239),
[DDIM](https://arxiv.org/abs/2010.02502), [DDPM+](https://arxiv.org/abs/2102.09672),
[DDPM++](https://arxiv.org/abs/2105.05233)
* Large diffusion models: [GLIDE](https://arxiv.org/abs/2112.10741),
[DALL-E 2](https://arxiv.org/abs/2204.06125/), [Imagen](https://arxiv.org/abs/2205.11487)
"""
| keras-core/examples/keras_io/tensorflow/generative/ddim.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/generative/ddim.py",
"repo_id": "keras-core",
"token_count": 12890
} | 16 |
"""
Title: Using pre-trained word embeddings
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/05/05
Last modified: 2020/05/05
Description: Text classification on the Newsgroup20 dataset using pre-trained GloVe word embeddings.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import tensorflow.data as tf_data
import keras_core as keras
"""
## Introduction
In this example, we show how to train a text classification model that uses pre-trained
word embeddings.
We'll work with the Newsgroup20 dataset, a set of 20,000 message board messages
belonging to 20 different topic categories.
For the pre-trained word embeddings, we'll use
[GloVe embeddings](http://nlp.stanford.edu/projects/glove/).
"""
"""
## Download the Newsgroup20 data
"""
data_path = keras.utils.get_file(
"news20.tar.gz",
"http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.tar.gz",
untar=True,
)
"""
## Let's take a look at the data
"""
import os
import pathlib
data_dir = pathlib.Path(data_path).parent / "20_newsgroup"
dirnames = os.listdir(data_dir)
print("Number of directories:", len(dirnames))
print("Directory names:", dirnames)
fnames = os.listdir(data_dir / "comp.graphics")
print("Number of files in comp.graphics:", len(fnames))
print("Some example filenames:", fnames[:5])
"""
Here's a example of what one file contains:
"""
print(open(data_dir / "comp.graphics" / "38987").read())
"""
As you can see, there are header lines that are leaking the file's category, either
explicitly (the first line is literally the category name), or implicitly, e.g. via the
`Organization` filed. Let's get rid of the headers:
"""
samples = []
labels = []
class_names = []
class_index = 0
for dirname in sorted(os.listdir(data_dir)):
class_names.append(dirname)
dirpath = data_dir / dirname
fnames = os.listdir(dirpath)
print("Processing %s, %d files found" % (dirname, len(fnames)))
for fname in fnames:
fpath = dirpath / fname
f = open(fpath, encoding="latin-1")
content = f.read()
lines = content.split("\n")
lines = lines[10:]
content = "\n".join(lines)
samples.append(content)
labels.append(class_index)
class_index += 1
print("Classes:", class_names)
print("Number of samples:", len(samples))
"""
There's actually one category that doesn't have the expected number of files, but the
difference is small enough that the problem remains a balanced classification problem.
"""
"""
## Shuffle and split the data into training & validation sets
"""
# Shuffle the data
seed = 1337
rng = np.random.RandomState(seed)
rng.shuffle(samples)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
# Extract a training & validation split
validation_split = 0.2
num_validation_samples = int(validation_split * len(samples))
train_samples = samples[:-num_validation_samples]
val_samples = samples[-num_validation_samples:]
train_labels = labels[:-num_validation_samples]
val_labels = labels[-num_validation_samples:]
"""
## Create a vocabulary index
Let's use the `TextVectorization` to index the vocabulary found in the dataset.
Later, we'll use the same layer instance to vectorize the samples.
Our layer will only consider the top 20,000 words, and will truncate or pad sequences to
be actually 200 tokens long.
"""
from keras_core.layers import TextVectorization
vectorizer = TextVectorization(max_tokens=20000, output_sequence_length=200)
text_ds = tf_data.Dataset.from_tensor_slices(train_samples).batch(128)
vectorizer.adapt(text_ds)
"""
You can retrieve the computed vocabulary used via `vectorizer.get_vocabulary()`. Let's
print the top 5 words:
"""
vectorizer.get_vocabulary()[:5]
"""
Let's vectorize a test sentence:
"""
output = vectorizer([["the cat sat on the mat"]])
output.numpy()[0, :6]
"""
As you can see, "the" gets represented as "2". Why not 0, given that "the" was the first
word in the vocabulary? That's because index 0 is reserved for padding and index 1 is
reserved for "out of vocabulary" tokens.
Here's a dict mapping words to their indices:
"""
voc = vectorizer.get_vocabulary()
word_index = dict(zip(voc, range(len(voc))))
"""
As you can see, we obtain the same encoding as above for our test sentence:
"""
test = ["the", "cat", "sat", "on", "the", "mat"]
[word_index[w] for w in test]
"""
## Load pre-trained word embeddings
"""
"""
Let's download pre-trained GloVe embeddings (a 822M zip file).
You'll need to run the following commands:
```
!wget http://nlp.stanford.edu/data/glove.6B.zip
!unzip -q glove.6B.zip
```
"""
"""
The archive contains text-encoded vectors of various sizes: 50-dimensional,
100-dimensional, 200-dimensional, 300-dimensional. We'll use the 100D ones.
Let's make a dict mapping words (strings) to their NumPy vector representation:
"""
path_to_glove_file = os.path.join(
os.path.expanduser("~"), ".keras/datasets/glove.6B.100d.txt"
)
embeddings_index = {}
with open(path_to_glove_file) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print("Found %s word vectors." % len(embeddings_index))
"""
Now, let's prepare a corresponding embedding matrix that we can use in a Keras
`Embedding` layer. It's a simple NumPy matrix where entry at index `i` is the pre-trained
vector for the word of index `i` in our `vectorizer`'s vocabulary.
"""
num_tokens = len(voc) + 2
embedding_dim = 100
hits = 0
misses = 0
# Prepare embedding matrix
embedding_matrix = np.zeros((num_tokens, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
# This includes the representation for "padding" and "OOV"
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
"""
Next, we load the pre-trained word embeddings matrix into an `Embedding` layer.
Note that we set `trainable=False` so as to keep the embeddings fixed (we don't want to
update them during training).
"""
from keras_core.layers import Embedding
embedding_layer = Embedding(
num_tokens,
embedding_dim,
trainable=False,
)
embedding_layer.build((1,))
embedding_layer.set_weights([embedding_matrix])
"""
## Build the model
A simple 1D convnet with global max pooling and a classifier at the end.
"""
from keras_core import layers
int_sequences_input = keras.Input(shape=(None,), dtype="int64")
embedded_sequences = embedding_layer(int_sequences_input)
x = layers.Conv1D(128, 5, activation="relu")(embedded_sequences)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)
preds = layers.Dense(len(class_names), activation="softmax")(x)
model = keras.Model(int_sequences_input, preds)
model.summary()
"""
## Train the model
First, convert our list-of-strings data to NumPy arrays of integer indices. The arrays
are right-padded.
"""
x_train = vectorizer(np.array([[s] for s in train_samples])).numpy()
x_val = vectorizer(np.array([[s] for s in val_samples])).numpy()
y_train = np.array(train_labels)
y_val = np.array(val_labels)
"""
We use categorical crossentropy as our loss since we're doing softmax classification.
Moreover, we use `sparse_categorical_crossentropy` since our labels are integers.
"""
model.compile(
loss="sparse_categorical_crossentropy", optimizer="rmsprop", metrics=["acc"]
)
model.fit(
x_train, y_train, batch_size=128, epochs=20, validation_data=(x_val, y_val)
)
"""
## Export an end-to-end model
Now, we may want to export a `Model` object that takes as input a string of arbitrary
length, rather than a sequence of indices. It would make the model much more portable,
since you wouldn't have to worry about the input preprocessing pipeline.
Our `vectorizer` is actually a Keras layer, so it's simple:
"""
string_input = keras.Input(shape=(1,), dtype="string")
x = vectorizer(string_input)
preds = model(x)
end_to_end_model = keras.Model(string_input, preds)
probabilities = end_to_end_model(
keras.ops.convert_to_tensor(
[["this message is about computer graphics and 3D modeling"]]
)
)
print(class_names[np.argmax(probabilities[0])])
| keras-core/examples/keras_io/tensorflow/nlp/pretrained_word_embeddings.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/nlp/pretrained_word_embeddings.py",
"repo_id": "keras-core",
"token_count": 2985
} | 17 |
"""
Title: Model interpretability with Integrated Gradients
Author: [A_K_Nain](https://twitter.com/A_K_Nain)
Date created: 2020/06/02
Last modified: 2020/06/02
Description: How to obtain integrated gradients for a classification model.
Accelerator: NONE
"""
"""
## Integrated Gradients
[Integrated Gradients](https://arxiv.org/abs/1703.01365) is a technique for
attributing a classification model's prediction to its input features. It is
a model interpretability technique: you can use it to visualize the relationship
between input features and model predictions.
Integrated Gradients is a variation on computing
the gradient of the prediction output with regard to features of the input.
To compute integrated gradients, we need to perform the following steps:
1. Identify the input and the output. In our case, the input is an image and the
output is the last layer of our model (dense layer with softmax activation).
2. Compute which features are important to a neural network
when making a prediction on a particular data point. To identify these features, we
need to choose a baseline input. A baseline input can be a black image (all pixel
values set to zero) or random noise. The shape of the baseline input needs to be
the same as our input image, e.g. (299, 299, 3).
3. Interpolate the baseline for a given number of steps. The number of steps represents
the steps we need in the gradient approximation for a given input image. The number of
steps is a hyperparameter. The authors recommend using anywhere between
20 and 1000 steps.
4. Preprocess these interpolated images and do a forward pass.
5. Get the gradients for these interpolated images.
6. Approximate the gradients integral using the trapezoidal rule.
To read in-depth about integrated gradients and why this method works,
consider reading this excellent
[article](https://distill.pub/2020/attribution-baselines/).
**References:**
- Integrated Gradients original [paper](https://arxiv.org/abs/1703.01365)
- [Original implementation](https://github.com/ankurtaly/Integrated-Gradients)
"""
"""
## Setup
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from IPython.display import Image, display
import tensorflow as tf
import keras_core as keras
from keras_core import layers
from keras_core.applications import xception
keras.config.disable_traceback_filtering()
# Size of the input image
img_size = (299, 299, 3)
# Load Xception model with imagenet weights
model = xception.Xception(weights="imagenet")
# The local path to our target image
img_path = keras.utils.get_file(
"elephant.jpg", "https://i.imgur.com/Bvro0YD.png"
)
display(Image(img_path))
"""
## Integrated Gradients algorithm
"""
def get_img_array(img_path, size=(299, 299)):
# `img` is a PIL image of size 299x299
img = keras.utils.load_img(img_path, target_size=size)
# `array` is a float32 Numpy array of shape (299, 299, 3)
array = keras.utils.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 299, 299, 3)
array = np.expand_dims(array, axis=0)
return array
def get_gradients(img_input, top_pred_idx):
"""Computes the gradients of outputs w.r.t input image.
Args:
img_input: 4D image tensor
top_pred_idx: Predicted label for the input image
Returns:
Gradients of the predictions w.r.t img_input
"""
images = tf.cast(img_input, tf.float32)
with tf.GradientTape() as tape:
tape.watch(images)
preds = model(images)
top_class = preds[:, top_pred_idx]
grads = tape.gradient(top_class, images)
return grads
def get_integrated_gradients(
img_input, top_pred_idx, baseline=None, num_steps=50
):
"""Computes Integrated Gradients for a predicted label.
Args:
img_input (ndarray): Original image
top_pred_idx: Predicted label for the input image
baseline (ndarray): The baseline image to start with for interpolation
num_steps: Number of interpolation steps between the baseline
and the input used in the computation of integrated gradients. These
steps along determine the integral approximation error. By default,
num_steps is set to 50.
Returns:
Integrated gradients w.r.t input image
"""
# If baseline is not provided, start with a black image
# having same size as the input image.
if baseline is None:
baseline = np.zeros(img_size).astype(np.float32)
else:
baseline = baseline.astype(np.float32)
# 1. Do interpolation.
img_input = img_input.astype(np.float32)
interpolated_image = [
baseline + (step / num_steps) * (img_input - baseline)
for step in range(num_steps + 1)
]
interpolated_image = np.array(interpolated_image).astype(np.float32)
# 2. Preprocess the interpolated images
interpolated_image = xception.preprocess_input(interpolated_image)
# 3. Get the gradients
grads = []
for i, img in enumerate(interpolated_image):
img = tf.expand_dims(img, axis=0)
grad = get_gradients(img, top_pred_idx=top_pred_idx)
grads.append(grad[0])
grads = tf.convert_to_tensor(grads, dtype=tf.float32)
# 4. Approximate the integral using the trapezoidal rule
grads = (grads[:-1] + grads[1:]) / 2.0
avg_grads = tf.reduce_mean(grads, axis=0)
# 5. Calculate integrated gradients and return
integrated_grads = (img_input - baseline) * avg_grads
return integrated_grads
def random_baseline_integrated_gradients(
img_input, top_pred_idx, num_steps=50, num_runs=2
):
"""Generates a number of random baseline images.
Args:
img_input (ndarray): 3D image
top_pred_idx: Predicted label for the input image
num_steps: Number of interpolation steps between the baseline
and the input used in the computation of integrated gradients. These
steps along determine the integral approximation error. By default,
num_steps is set to 50.
num_runs: number of baseline images to generate
Returns:
Averaged integrated gradients for `num_runs` baseline images
"""
# 1. List to keep track of Integrated Gradients (IG) for all the images
integrated_grads = []
# 2. Get the integrated gradients for all the baselines
for run in range(num_runs):
baseline = np.random.random(img_size) * 255
igrads = get_integrated_gradients(
img_input=img_input,
top_pred_idx=top_pred_idx,
baseline=baseline,
num_steps=num_steps,
)
integrated_grads.append(igrads)
# 3. Return the average integrated gradients for the image
integrated_grads = tf.convert_to_tensor(integrated_grads)
return tf.reduce_mean(integrated_grads, axis=0)
"""
## Helper class for visualizing gradients and integrated gradients
"""
class GradVisualizer:
"""Plot gradients of the outputs w.r.t an input image."""
def __init__(self, positive_channel=None, negative_channel=None):
if positive_channel is None:
self.positive_channel = [0, 255, 0]
else:
self.positive_channel = positive_channel
if negative_channel is None:
self.negative_channel = [255, 0, 0]
else:
self.negative_channel = negative_channel
def apply_polarity(self, attributions, polarity):
if polarity == "positive":
return np.clip(attributions, 0, 1)
else:
return np.clip(attributions, -1, 0)
def apply_linear_transformation(
self,
attributions,
clip_above_percentile=99.9,
clip_below_percentile=70.0,
lower_end=0.2,
):
# 1. Get the thresholds
m = self.get_thresholded_attributions(
attributions, percentage=100 - clip_above_percentile
)
e = self.get_thresholded_attributions(
attributions, percentage=100 - clip_below_percentile
)
# 2. Transform the attributions by a linear function f(x) = a*x + b such that
# f(m) = 1.0 and f(e) = lower_end
transformed_attributions = (1 - lower_end) * (
np.abs(attributions) - e
) / (m - e) + lower_end
# 3. Make sure that the sign of transformed attributions is the same as original attributions
transformed_attributions *= np.sign(attributions)
# 4. Only keep values that are bigger than the lower_end
transformed_attributions *= transformed_attributions >= lower_end
# 5. Clip values and return
transformed_attributions = np.clip(transformed_attributions, 0.0, 1.0)
return transformed_attributions
def get_thresholded_attributions(self, attributions, percentage):
if percentage == 100.0:
return np.min(attributions)
# 1. Flatten the attributions
flatten_attr = attributions.flatten()
# 2. Get the sum of the attributions
total = np.sum(flatten_attr)
# 3. Sort the attributions from largest to smallest.
sorted_attributions = np.sort(np.abs(flatten_attr))[::-1]
# 4. Calculate the percentage of the total sum that each attribution
# and the values about it contribute.
cum_sum = 100.0 * np.cumsum(sorted_attributions) / total
# 5. Threshold the attributions by the percentage
indices_to_consider = np.where(cum_sum >= percentage)[0][0]
# 6. Select the desired attributions and return
attributions = sorted_attributions[indices_to_consider]
return attributions
def binarize(self, attributions, threshold=0.001):
return attributions > threshold
def morphological_cleanup_fn(self, attributions, structure=np.ones((4, 4))):
closed = ndimage.grey_closing(attributions, structure=structure)
opened = ndimage.grey_opening(closed, structure=structure)
return opened
def draw_outlines(
self,
attributions,
percentage=90,
connected_component_structure=np.ones((3, 3)),
):
# 1. Binarize the attributions.
attributions = self.binarize(attributions)
# 2. Fill the gaps
attributions = ndimage.binary_fill_holes(attributions)
# 3. Compute connected components
connected_components, num_comp = ndimage.label(
attributions, structure=connected_component_structure
)
# 4. Sum up the attributions for each component
total = np.sum(attributions[connected_components > 0])
component_sums = []
for comp in range(1, num_comp + 1):
mask = connected_components == comp
component_sum = np.sum(attributions[mask])
component_sums.append((component_sum, mask))
# 5. Compute the percentage of top components to keep
sorted_sums_and_masks = sorted(
component_sums, key=lambda x: x[0], reverse=True
)
sorted_sums = list(zip(*sorted_sums_and_masks))[0]
cumulative_sorted_sums = np.cumsum(sorted_sums)
cutoff_threshold = percentage * total / 100
cutoff_idx = np.where(cumulative_sorted_sums >= cutoff_threshold)[0][0]
if cutoff_idx > 2:
cutoff_idx = 2
# 6. Set the values for the kept components
border_mask = np.zeros_like(attributions)
for i in range(cutoff_idx + 1):
border_mask[sorted_sums_and_masks[i][1]] = 1
# 7. Make the mask hollow and show only the border
eroded_mask = ndimage.binary_erosion(border_mask, iterations=1)
border_mask[eroded_mask] = 0
# 8. Return the outlined mask
return border_mask
def process_grads(
self,
image,
attributions,
polarity="positive",
clip_above_percentile=99.9,
clip_below_percentile=0,
morphological_cleanup=False,
structure=np.ones((3, 3)),
outlines=False,
outlines_component_percentage=90,
overlay=True,
):
if polarity not in ["positive", "negative"]:
raise ValueError(
f""" Allowed polarity values: 'positive' or 'negative'
but provided {polarity}"""
)
if clip_above_percentile < 0 or clip_above_percentile > 100:
raise ValueError("clip_above_percentile must be in [0, 100]")
if clip_below_percentile < 0 or clip_below_percentile > 100:
raise ValueError("clip_below_percentile must be in [0, 100]")
# 1. Apply polarity
if polarity == "positive":
attributions = self.apply_polarity(attributions, polarity=polarity)
channel = self.positive_channel
else:
attributions = self.apply_polarity(attributions, polarity=polarity)
attributions = np.abs(attributions)
channel = self.negative_channel
# 2. Take average over the channels
attributions = np.average(attributions, axis=2)
# 3. Apply linear transformation to the attributions
attributions = self.apply_linear_transformation(
attributions,
clip_above_percentile=clip_above_percentile,
clip_below_percentile=clip_below_percentile,
lower_end=0.0,
)
# 4. Cleanup
if morphological_cleanup:
attributions = self.morphological_cleanup_fn(
attributions, structure=structure
)
# 5. Draw the outlines
if outlines:
attributions = self.draw_outlines(
attributions, percentage=outlines_component_percentage
)
# 6. Expand the channel axis and convert to RGB
attributions = np.expand_dims(attributions, 2) * channel
# 7.Superimpose on the original image
if overlay:
attributions = np.clip((attributions * 0.8 + image), 0, 255)
return attributions
def visualize(
self,
image,
gradients,
integrated_gradients,
polarity="positive",
clip_above_percentile=99.9,
clip_below_percentile=0,
morphological_cleanup=False,
structure=np.ones((3, 3)),
outlines=False,
outlines_component_percentage=90,
overlay=True,
figsize=(15, 8),
):
# 1. Make two copies of the original image
img1 = np.copy(image)
img2 = np.copy(image)
# 2. Process the normal gradients
grads_attr = self.process_grads(
image=img1,
attributions=gradients,
polarity=polarity,
clip_above_percentile=clip_above_percentile,
clip_below_percentile=clip_below_percentile,
morphological_cleanup=morphological_cleanup,
structure=structure,
outlines=outlines,
outlines_component_percentage=outlines_component_percentage,
overlay=overlay,
)
# 3. Process the integrated gradients
igrads_attr = self.process_grads(
image=img2,
attributions=integrated_gradients,
polarity=polarity,
clip_above_percentile=clip_above_percentile,
clip_below_percentile=clip_below_percentile,
morphological_cleanup=morphological_cleanup,
structure=structure,
outlines=outlines,
outlines_component_percentage=outlines_component_percentage,
overlay=overlay,
)
_, ax = plt.subplots(1, 3, figsize=figsize)
ax[0].imshow(image)
ax[1].imshow(grads_attr.astype(np.uint8))
ax[2].imshow(igrads_attr.astype(np.uint8))
ax[0].set_title("Input")
ax[1].set_title("Normal gradients")
ax[2].set_title("Integrated gradients")
plt.show()
"""
## Let's test-drive it
"""
# 1. Convert the image to numpy array
img = get_img_array(img_path)
# 2. Keep a copy of the original image
orig_img = np.copy(img[0]).astype(np.uint8)
# 3. Preprocess the image
img_processed = tf.cast(xception.preprocess_input(img), dtype=tf.float32)
# 4. Get model predictions
preds = model.predict(img_processed)
top_pred_idx = tf.argmax(preds[0])
print("Predicted:", top_pred_idx, xception.decode_predictions(preds, top=1)[0])
# 5. Get the gradients of the last layer for the predicted label
grads = get_gradients(img_processed, top_pred_idx=top_pred_idx)
# 6. Get the integrated gradients
igrads = random_baseline_integrated_gradients(
np.copy(orig_img), top_pred_idx=top_pred_idx, num_steps=50, num_runs=2
)
# 7. Process the gradients and plot
vis = GradVisualizer()
vis.visualize(
image=orig_img,
gradients=grads[0].numpy(),
integrated_gradients=igrads.numpy(),
clip_above_percentile=99,
clip_below_percentile=0,
)
vis.visualize(
image=orig_img,
gradients=grads[0].numpy(),
integrated_gradients=igrads.numpy(),
clip_above_percentile=95,
clip_below_percentile=28,
morphological_cleanup=True,
outlines=True,
)
| keras-core/examples/keras_io/tensorflow/vision/integrated_gradients.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/integrated_gradients.py",
"repo_id": "keras-core",
"token_count": 6900
} | 18 |
"""
Title: Timeseries classification with a Transformer model
Author: [Theodoros Ntakouris](https://github.com/ntakouris)
Date created: 2021/06/25
Last modified: 2021/08/05
Description: This notebook demonstrates how to do timeseries classification using a Transformer model.
Accelerator: GPU
"""
"""
## Introduction
This is the Transformer architecture from
[Attention Is All You Need](https://arxiv.org/abs/1706.03762),
applied to timeseries instead of natural language.
This example requires TensorFlow 2.4 or higher.
## Load the dataset
We are going to use the same dataset and preprocessing as the
[TimeSeries Classification from Scratch](https://keras.io/examples/timeseries/timeseries_classification_from_scratch)
example.
"""
import numpy as np
def readucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)
root_url = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA/"
x_train, y_train = readucr(root_url + "FordA_TRAIN.tsv")
x_test, y_test = readucr(root_url + "FordA_TEST.tsv")
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
n_classes = len(np.unique(y_train))
idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
"""
## Build the model
Our model processes a tensor of shape `(batch size, sequence length, features)`,
where `sequence length` is the number of time steps and `features` is each input
timeseries.
You can replace your classification RNN layers with this one: the
inputs are fully compatible!
"""
import keras_core as keras
from keras_core import layers
"""
We include residual connections, layer normalization, and dropout.
The resulting layer can be stacked multiple times.
The projection layers are implemented through `keras.layers.Conv1D`.
"""
def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
# Attention and Normalization
x = layers.MultiHeadAttention(
key_dim=head_size, num_heads=num_heads, dropout=dropout
)(inputs, inputs)
x = layers.Dropout(dropout)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
res = x + inputs
# Feed Forward Part
x = layers.Conv1D(filters=ff_dim, kernel_size=1, activation="relu")(res)
x = layers.Dropout(dropout)(x)
x = layers.Conv1D(filters=inputs.shape[-1], kernel_size=1)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
return x + res
"""
The main part of our model is now complete. We can stack multiple of those
`transformer_encoder` blocks and we can also proceed to add the final
Multi-Layer Perceptron classification head. Apart from a stack of `Dense`
layers, we need to reduce the output tensor of the `TransformerEncoder` part of
our model down to a vector of features for each data point in the current
batch. A common way to achieve this is to use a pooling layer. For
this example, a `GlobalAveragePooling1D` layer is sufficient.
"""
def build_model(
input_shape,
head_size,
num_heads,
ff_dim,
num_transformer_blocks,
mlp_units,
dropout=0,
mlp_dropout=0,
):
inputs = keras.Input(shape=input_shape)
x = inputs
for _ in range(num_transformer_blocks):
x = transformer_encoder(x, head_size, num_heads, ff_dim, dropout)
x = layers.GlobalAveragePooling1D(data_format="channels_first")(x)
for dim in mlp_units:
x = layers.Dense(dim, activation="relu")(x)
x = layers.Dropout(mlp_dropout)(x)
outputs = layers.Dense(n_classes, activation="softmax")(x)
return keras.Model(inputs, outputs)
"""
## Train and evaluate
"""
input_shape = x_train.shape[1:]
model = build_model(
input_shape,
head_size=256,
num_heads=4,
ff_dim=4,
num_transformer_blocks=4,
mlp_units=[128],
mlp_dropout=0.4,
dropout=0.25,
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=1e-4),
metrics=["sparse_categorical_accuracy"],
)
model.summary()
callbacks = [
keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)
]
model.fit(
x_train,
y_train,
validation_split=0.2,
epochs=2,
batch_size=64,
callbacks=callbacks,
)
model.evaluate(x_test, y_test, verbose=1)
"""
## Conclusions
In about 110-120 epochs (25s each on Colab), the model reaches a training
accuracy of ~0.95, validation accuracy of ~84 and a testing
accuracy of ~85, without hyperparameter tuning. And that is for a model
with less than 100k parameters. Of course, parameter count and accuracy could be
improved by a hyperparameter search and a more sophisticated learning rate
schedule, or a different optimizer.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/timeseries_transformer_classification)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/timeseries_transformer_classification).
"""
| keras-core/examples/keras_io/timeseries/timeseries_classification_transformer.py/0 | {
"file_path": "keras-core/examples/keras_io/timeseries/timeseries_classification_transformer.py",
"repo_id": "keras-core",
"token_count": 1823
} | 19 |
"""
Title: Image segmentation with a U-Net-like architecture
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/20
Last modified: 2020/04/20
Description: Image segmentation model trained from scratch on the Oxford Pets dataset.
Accelerator: GPU
"""
"""
## Download the data
"""
"""shell
!wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz
!wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz
curl -O https://thor.robots.ox.ac.uk/datasets/pets/images.tar.gz
curl -O https://thor.robots.ox.ac.uk/datasets/pets/annotations.tar.gz
tar -xf images.tar.gz
tar -xf annotations.tar.gz
"""
"""
## Prepare paths of input images and target segmentation masks
"""
import os
input_dir = "images/"
target_dir = "annotations/trimaps/"
img_size = (160, 160)
num_classes = 3
batch_size = 32
input_img_paths = sorted(
[
os.path.join(input_dir, fname)
for fname in os.listdir(input_dir)
if fname.endswith(".jpg")
]
)
target_img_paths = sorted(
[
os.path.join(target_dir, fname)
for fname in os.listdir(target_dir)
if fname.endswith(".png") and not fname.startswith(".")
]
)
print("Number of samples:", len(input_img_paths))
for input_path, target_path in zip(input_img_paths[:10], target_img_paths[:10]):
print(input_path, "|", target_path)
"""
## What does one input image and corresponding segmentation mask look like?
"""
from IPython.display import Image, display
from keras_core.utils import load_img
from PIL import ImageOps
# Display input image #7
display(Image(filename=input_img_paths[9]))
# Display auto-contrast version of corresponding target (per-pixel categories)
img = ImageOps.autocontrast(load_img(target_img_paths[9]))
display(img)
"""
## Prepare dataset to load & vectorize batches of data
"""
import keras_core as keras
import numpy as np
from tensorflow import data as tf_data
from tensorflow import image as tf_image
from tensorflow import io as tf_io
def get_dataset(
batch_size,
img_size,
input_img_paths,
target_img_paths,
max_dataset_len=None,
):
"""Returns a TF Dataset."""
def load_img_masks(input_img_path, target_img_path):
input_img = tf_io.read_file(input_img_path)
input_img = tf_io.decode_png(input_img, channels=3)
input_img = tf_image.resize(input_img, img_size)
input_img = tf_image.convert_image_dtype(input_img, "float32")
target_img = tf_io.read_file(target_img_path)
target_img = tf_io.decode_png(target_img, channels=1)
target_img = tf_image.resize(target_img, img_size, method="nearest")
target_img = tf_image.convert_image_dtype(target_img, "uint8")
# Ground truth labels are 1, 2, 3. Subtract one to make them 0, 1, 2:
target_img -= 1
return input_img, target_img
# For faster debugging, limit the size of data
if max_dataset_len:
input_img_paths = input_img_paths[:max_dataset_len]
target_img_paths = target_img_paths[:max_dataset_len]
dataset = tf_data.Dataset.from_tensor_slices(
(input_img_paths, target_img_paths)
)
dataset = dataset.map(load_img_masks, num_parallel_calls=tf_data.AUTOTUNE)
return dataset.batch(batch_size)
"""
## Prepare U-Net Xception-style model
"""
from keras_core import layers
def get_model(img_size, num_classes):
inputs = keras.Input(shape=img_size + (3,))
### [First half of the network: downsampling inputs] ###
# Entry block
x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
# Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(filters, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(2)(x)
# Project residual
residual = layers.UpSampling2D(2)(previous_block_activation)
residual = layers.Conv2D(filters, 1, padding="same")(residual)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = layers.Conv2D(
num_classes, 3, activation="softmax", padding="same"
)(x)
# Define the model
model = keras.Model(inputs, outputs)
return model
# Build model
model = get_model(img_size, num_classes)
model.summary()
"""
## Set aside a validation split
"""
import random
# Split our img paths into a training and a validation set
val_samples = 1000
random.Random(1337).shuffle(input_img_paths)
random.Random(1337).shuffle(target_img_paths)
train_input_img_paths = input_img_paths[:-val_samples]
train_target_img_paths = target_img_paths[:-val_samples]
val_input_img_paths = input_img_paths[-val_samples:]
val_target_img_paths = target_img_paths[-val_samples:]
# Instantiate dataset for each split
# Limit input files in `max_dataset_len` for faster epoch training time.
# Remove the `max_dataset_len` arg when running with full dataset.
train_dataset = get_dataset(
batch_size,
img_size,
train_input_img_paths,
train_target_img_paths,
max_dataset_len=1000,
)
valid_dataset = get_dataset(
batch_size, img_size, val_input_img_paths, val_target_img_paths
)
"""
## Train the model
"""
# Configure the model for training.
# We use the "sparse" version of categorical_crossentropy
# because our target data is integers.
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy")
callbacks = [
keras.callbacks.ModelCheckpoint(
"oxford_segmentation.keras", save_best_only=True
)
]
# Train the model, doing validation at the end of each epoch.
epochs = 15
model.fit(
train_dataset,
epochs=epochs,
validation_data=valid_dataset,
callbacks=callbacks,
)
"""
## Visualize predictions
"""
# Generate predictions for all images in the validation set
val_dataset = get_dataset(
batch_size, img_size, val_input_img_paths, val_target_img_paths
)
val_preds = model.predict(val_dataset)
def display_mask(i):
"""Quick utility to display a model's prediction."""
mask = np.argmax(val_preds[i], axis=-1)
mask = np.expand_dims(mask, axis=-1)
img = ImageOps.autocontrast(keras.utils.array_to_img(mask))
display(img)
# Display results for validation image #10
i = 10
# Display input image
display(Image(filename=val_input_img_paths[i]))
# Display ground-truth target mask
img = ImageOps.autocontrast(load_img(val_target_img_paths[i]))
display(img)
# Display mask predicted by our model
display_mask(i) # Note that the model only sees inputs at 150x150.
| keras-core/examples/keras_io/vision/oxford_pets_image_segmentation.py/0 | {
"file_path": "keras-core/examples/keras_io/vision/oxford_pets_image_segmentation.py",
"repo_id": "keras-core",
"token_count": 3059
} | 20 |
from keras_core import backend
from keras_core import layers
from keras_core.api_export import keras_core_export
from keras_core.applications import imagenet_utils
from keras_core.models import Functional
from keras_core.ops import operation_utils
from keras_core.utils import file_utils
WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"xception/xception_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
@keras_core_export(
[
"keras_core.applications.xception.Xception",
"keras_core.applications.Xception",
]
)
def Xception(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the Xception architecture.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input image size for this model is 299x299.
Note: each Keras Application expects a specific kind of input preprocessing.
For Xception, call `keras_core.applications.xception.preprocess_input`
on your inputs before passing them to the model.
`xception.preprocess_input` will scale input pixels between -1 and 1.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation` can
only be `None` or `"softmax"`.
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=71,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
x = layers.Conv2D(
32, (3, 3), strides=(2, 2), use_bias=False, name="block1_conv1"
)(img_input)
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv1_bn")(x)
x = layers.Activation("relu", name="block1_conv1_act")(x)
x = layers.Conv2D(64, (3, 3), use_bias=False, name="block1_conv2")(x)
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv2_bn")(x)
x = layers.Activation("relu", name="block1_conv2_act")(x)
residual = layers.Conv2D(
128, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.SeparableConv2D(
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block2_sepconv2_act")(x)
x = layers.SeparableConv2D(
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block2_pool"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
256, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation("relu", name="block3_sepconv1_act")(x)
x = layers.SeparableConv2D(
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block3_sepconv2_act")(x)
x = layers.SeparableConv2D(
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block3_pool"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
728, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation("relu", name="block4_sepconv1_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block4_sepconv2_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block4_pool"
)(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = "block" + str(i + 5)
x = layers.Activation("relu", name=prefix + "_sepconv1_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=prefix + "_sepconv1",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + "_sepconv1_bn"
)(x)
x = layers.Activation("relu", name=prefix + "_sepconv2_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=prefix + "_sepconv2",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + "_sepconv2_bn"
)(x)
x = layers.Activation("relu", name=prefix + "_sepconv3_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=prefix + "_sepconv3",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + "_sepconv3_bn"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
1024, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation("relu", name="block13_sepconv1_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block13_sepconv1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block13_sepconv1_bn"
)(x)
x = layers.Activation("relu", name="block13_sepconv2_act")(x)
x = layers.SeparableConv2D(
1024, (3, 3), padding="same", use_bias=False, name="block13_sepconv2"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block13_sepconv2_bn"
)(x)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block13_pool"
)(x)
x = layers.add([x, residual])
x = layers.SeparableConv2D(
1536, (3, 3), padding="same", use_bias=False, name="block14_sepconv1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block14_sepconv1_bn"
)(x)
x = layers.Activation("relu", name="block14_sepconv1_act")(x)
x = layers.SeparableConv2D(
2048, (3, 3), padding="same", use_bias=False, name="block14_sepconv2"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block14_sepconv2_bn"
)(x)
x = layers.Activation("relu", name="block14_sepconv2_act")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name="xception")
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = file_utils.get_file(
"xception_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="0a58e3b7378bc2990ea3b43d5981f1f6",
)
else:
weights_path = file_utils.get_file(
"xception_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="b0042744bf5b25fce3cb969f33bebb97",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_core_export("keras_core.applications.xception.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_core_export("keras_core.applications.xception.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| keras-core/keras_core/applications/xception.py/0 | {
"file_path": "keras-core/keras_core/applications/xception.py",
"repo_id": "keras-core",
"token_count": 5791
} | 21 |
import json
import os
from keras_core.api_export import keras_core_export
# The type of float to use throughout a session.
_FLOATX = "float32"
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = "channels_last"
# Default backend: TensorFlow.
_BACKEND = "tensorflow"
@keras_core_export(["keras_core.config.floatx", "keras_core.backend.floatx"])
def floatx():
"""Return the default float type, as a string.
E.g. `'float16'`, `'float32'`, `'float64'`.
Returns:
String, the current default float type.
Example:
>>> keras_core.config.floatx()
'float32'
"""
return _FLOATX
@keras_core_export(
["keras_core.config.set_floatx", "keras_core.backend.set_floatx"]
)
def set_floatx(value):
"""Set the default float dtype.
Note: It is not recommended to set this to `"float16"` for training,
as this will likely cause numeric stability issues.
Instead, mixed precision, which leverages
a mix of `float16` and `float32`. It can be configured by calling
`keras_core.mixed_precision.set_dtype_policy('mixed_float16')`.
Args:
value: String; `'float16'`, `'float32'`, or `'float64'`.
Examples:
>>> keras_core.config.floatx()
'float32'
>>> keras_core.config.set_floatx('float64')
>>> keras_core.config.floatx()
'float64'
>>> # Set it back to float32
>>> keras_core.config.set_floatx('float32')
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
accepted_dtypes = {"float16", "float32", "float64"}
if value not in accepted_dtypes:
raise ValueError(
f"Unknown `floatx` value: {value}. "
f"Expected one of {accepted_dtypes}"
)
_FLOATX = str(value)
@keras_core_export(["keras_core.config.epsilon", "keras_core.backend.epsilon"])
def epsilon():
"""Return the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
>>> keras_core.config.epsilon()
1e-07
"""
return _EPSILON
@keras_core_export(
["keras_core.config.set_epsilon", "keras_core.backend.set_epsilon"]
)
def set_epsilon(value):
"""Set the value of the fuzz factor used in numeric expressions.
Args:
value: float. New value of epsilon.
Examples:
>>> keras_core.config.epsilon()
1e-07
>>> keras_core.config.set_epsilon(1e-5)
>>> keras_core.config.epsilon()
1e-05
>>> # Set it back to the default value.
>>> keras_core.config.set_epsilon(1e-7)
"""
global _EPSILON
_EPSILON = value
@keras_core_export(
[
"keras_core.config.image_data_format",
"keras_core.backend.image_data_format",
]
)
def image_data_format():
"""Return the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`.
Example:
>>> keras_core.config.image_data_format()
'channels_last'
"""
return _IMAGE_DATA_FORMAT
@keras_core_export(
[
"keras_core.config.set_image_data_format",
"keras_core.backend.set_image_data_format",
]
)
def set_image_data_format(data_format):
"""Set the value of the image data format convention.
Args:
data_format: string. `'channels_first'` or `'channels_last'`.
Examples:
>>> keras_core.config.image_data_format()
'channels_last'
>>> keras_core.config.set_image_data_format('channels_first')
>>> keras_core.config.image_data_format()
'channels_first'
>>> # Set it back to `'channels_last'`
>>> keras_core.config.set_image_data_format('channels_last')
"""
global _IMAGE_DATA_FORMAT
data_format = str(data_format).lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
"{'channels_first', 'channels_last'}. "
f"Received: data_format={data_format}"
)
_IMAGE_DATA_FORMAT = data_format
def standardize_data_format(data_format):
if data_format is None:
return image_data_format()
data_format = str(data_format).lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
"{'channels_first', 'channels_last'}. "
f"Received: data_format={data_format}"
)
return data_format
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if "KERAS_HOME" in os.environ:
_KERAS_DIR = os.environ.get("KERAS_HOME")
else:
_keras_base_dir = os.path.expanduser("~")
if not os.access(_keras_base_dir, os.W_OK):
_keras_base_dir = "/tmp"
_KERAS_DIR = os.path.join(_keras_base_dir, ".keras")
def keras_home():
# Private accessor for the keras home location.
return _KERAS_DIR
# Attempt to read Keras config file.
_config_path = os.path.expanduser(os.path.join(_KERAS_DIR, "keras.json"))
if os.path.exists(_config_path):
try:
with open(_config_path) as f:
_config = json.load(f)
except ValueError:
_config = {}
_floatx = _config.get("floatx", floatx())
assert _floatx in {"float16", "float32", "float64"}
_epsilon = _config.get("epsilon", epsilon())
assert isinstance(_epsilon, float)
_backend = _config.get("backend", _BACKEND)
_image_data_format = _config.get("image_data_format", image_data_format())
assert _image_data_format in {"channels_last", "channels_first"}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
_BACKEND = _backend
# Save config file, if possible.
if not os.path.exists(_KERAS_DIR):
try:
os.makedirs(_KERAS_DIR)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
"floatx": floatx(),
"epsilon": epsilon(),
"backend": _BACKEND,
"image_data_format": image_data_format(),
}
try:
with open(_config_path, "w") as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
# Set backend based on KERAS_BACKEND flag, if applicable.
if "KERAS_BACKEND" in os.environ:
_backend = os.environ["KERAS_BACKEND"]
if _backend:
_BACKEND = _backend
if _BACKEND != "tensorflow":
# If we are not running on the tensorflow backend, we should stop tensorflow
# from using all available GPU memory. See
# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
@keras_core_export(
[
"keras_core.config.backend",
"keras_core.backend.backend",
]
)
def backend():
"""Publicly accessible method for determining the current backend.
Returns:
String, the name of the backend Keras is currently using. One of
`"tensorflow"`, `"torch"`, or `"jax"`.
Example:
>>> keras.config.backend()
'tensorflow'
"""
return _BACKEND
| keras-core/keras_core/backend/config.py/0 | {
"file_path": "keras-core/keras_core/backend/config.py",
"repo_id": "keras-core",
"token_count": 3058
} | 22 |
class NumpyLayer:
pass
| keras-core/keras_core/backend/numpy/layer.py/0 | {
"file_path": "keras-core/keras_core/backend/numpy/layer.py",
"repo_id": "keras-core",
"token_count": 11
} | 23 |
import tensorflow as tf
from keras_core import backend
from keras_core.optimizers import base_optimizer
class TFOptimizer(base_optimizer.BaseOptimizer):
"""A class for Tensorflow specific optimizer logic.
The major behavior change for this class is for tf.distribute.
It will override methods from base Keras core Optimizer,
which provide distribute specific functionality, e.g. variable
creation, loss reduction, etc.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._distribution_strategy = tf.distribute.get_strategy()
def add_variable_from_reference(self, reference_variable, name=None):
if isinstance(reference_variable, backend.Variable):
colocate_var = reference_variable.value
else:
colocate_var = reference_variable
with self._distribution_strategy.extended.colocate_vars_with(
colocate_var
):
return super().add_variable_from_reference(
reference_variable, name=name
)
def _var_key(self, variable):
if isinstance(variable, backend.Variable):
variable = variable.value # Convert to tf.Variable
if hasattr(variable, "_distributed_container"):
variable = variable._distributed_container()
elif (
isinstance(variable, tf.__internal__.CompositeTensor)
and hasattr(variable, "handle")
and hasattr(variable.handle, "_distributed_container")
):
# For ResourceVariables, the _distributed_container attribute
# is added to their handle tensors.
variable = variable.handle._distributed_container()
return variable._unique_id
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
def distributed_apply_weight_decay(distribution, variables, **kwargs):
def weight_decay_fn(variable):
if self._use_weight_decay(variable):
lr = tf.cast(self.learning_rate, variable.dtype)
wd = tf.cast(self.weight_decay, variable.dtype)
variable.assign(variable - variable * wd * lr)
for variable in variables:
distribution.extended.update(
variable, weight_decay_fn, group=False
)
tf.__internal__.distribute.interim.maybe_merge_call(
distributed_apply_weight_decay,
self._distribution_strategy,
variables,
)
def _internal_apply_gradients(self, grads_and_vars):
tf.__internal__.distribute.interim.maybe_merge_call(
self._distributed_apply_gradients_fn,
self._distribution_strategy,
grads_and_vars,
)
def _distributed_apply_gradients_fn(
self, distribution, grads_and_vars, **kwargs
):
"""`apply_gradients` using a `DistributionStrategy`."""
def apply_grad_to_update_var(var, grad):
learning_rate = self._get_current_learning_rate()
grad = tf.convert_to_tensor(grad)
return self.update_step(grad, var, learning_rate)
for grad, var in grads_and_vars:
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False
)
if self.use_ema:
_, var_list = zip(*grads_and_vars)
self._update_model_variables_moving_average(var_list)
if self.ema_overwrite_frequency:
# Only when self.ema_overwrite_frequency is not None, we
# overwrite the model variables.
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
tf.cond(
tf.cast(should_overwrite_model_vars, tf.bool),
true_fn=lambda: self._overwrite_model_variables_with_average_value( # noqa: E501
var_list
),
false_fn=lambda: None,
)
self.iterations.assign(self.iterations + 1)
def _overwrite_model_variables_with_average_value(self, var_list):
"""Overwrite model variables with their moving average values.
This function overwrites variables on each device.
Args:
var_list: list of model variables.
"""
strategy = self._distribution_strategy
# Override model variable by the stored average value on all devices.
for var, average_var in zip(
var_list, self._model_variables_moving_average
):
strategy.extended.update(
var, lambda a, b: a.assign(b), args=(average_var,)
)
| keras-core/keras_core/backend/tensorflow/optimizer.py/0 | {
"file_path": "keras-core/keras_core/backend/tensorflow/optimizer.py",
"repo_id": "keras-core",
"token_count": 2190
} | 24 |
import numpy as np
from keras_core import backend
from keras_core import initializers
from keras_core import testing
from keras_core import utils
class InitializersTest(testing.TestCase):
def test_random_normal(self):
utils.set_random_seed(1337)
shape = (25, 20)
mean = 0.0
stddev = 1.0
seed = 1234
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=seed
)
values = initializer(shape=shape)
self.assertEqual(initializer.mean, mean)
self.assertEqual(initializer.stddev, stddev)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
self.assertAllClose(
np.std(backend.convert_to_numpy(values)), stddev, atol=1e-1
)
self.run_class_serialization_test(initializer)
# Test that a fixed seed yields the same results each call.
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=1337
)
values = initializer(shape=shape)
next_values = initializer(shape=shape)
self.assertAllClose(values, next_values)
# Test that a SeedGenerator yields different results each call.
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=backend.random.SeedGenerator(1337)
)
values = initializer(shape=shape)
next_values = initializer(shape=shape)
self.assertNotAllClose(values, next_values)
# Test serialization with SeedGenerator
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=backend.random.SeedGenerator(1337)
)
values = initializer(shape=shape)
# Test that unseeded generator gets different results after cloning
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=None
)
values = initializer(shape=shape)
cloned_initializer = initializers.RandomNormal.from_config(
initializer.get_config()
)
new_values = cloned_initializer(shape=shape)
self.assertNotAllClose(values, new_values)
# Test that seeded generator gets same results after cloning
initializer = initializers.RandomNormal(
mean=mean, stddev=stddev, seed=1337
)
values = initializer(shape=shape)
cloned_initializer = initializers.RandomNormal.from_config(
initializer.get_config()
)
new_values = cloned_initializer(shape=shape)
self.assertAllClose(values, new_values)
def test_random_uniform(self):
shape = (5, 5)
minval = -1.0
maxval = 1.0
seed = 1234
initializer = initializers.RandomUniform(
minval=minval, maxval=maxval, seed=seed
)
values = initializer(shape=shape)
self.assertEqual(initializer.minval, minval)
self.assertEqual(initializer.maxval, maxval)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
values = backend.convert_to_numpy(values)
self.assertGreaterEqual(np.min(values), minval)
self.assertLess(np.max(values), maxval)
self.run_class_serialization_test(initializer)
def test_variance_scaling(self):
utils.set_random_seed(1337)
shape = (25, 20)
scale = 2.0
seed = 1234
initializer = initializers.VarianceScaling(
scale=scale, seed=seed, mode="fan_in"
)
values = initializer(shape=shape)
self.assertEqual(initializer.scale, scale)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
self.assertAllClose(
np.std(backend.convert_to_numpy(values)),
np.sqrt(scale / 25),
atol=1e-1,
)
self.run_class_serialization_test(initializer)
initializer = initializers.VarianceScaling(
scale=scale, seed=seed, mode="fan_out"
)
values = initializer(shape=shape)
self.assertEqual(initializer.scale, scale)
self.assertEqual(initializer.seed, seed)
self.assertEqual(values.shape, shape)
self.assertAllClose(
np.std(backend.convert_to_numpy(values)),
np.sqrt(scale / 20),
atol=1e-1,
)
self.run_class_serialization_test(initializer)
def test_orthogonal_initializer(self):
shape = (5, 5)
gain = 2.0
seed = 1234
initializer = initializers.OrthogonalInitializer(gain=gain, seed=seed)
values = initializer(shape=shape)
self.assertEqual(initializer.seed, seed)
self.assertEqual(initializer.gain, gain)
self.assertEqual(values.shape, shape)
array = backend.convert_to_numpy(values)
# Making sure that the columns have gain * unit norm value
for column in array.T:
self.assertAlmostEqual(np.linalg.norm(column), gain * 1.0)
# Making sure that each column is orthonormal to the other column
for i in range(array.shape[-1]):
for j in range(i + 1, array.shape[-1]):
self.assertAlmostEqual(
np.dot(array[..., i], array[..., j]), 0.0
)
self.run_class_serialization_test(initializer)
def test_get_method(self):
obj = initializers.get("glorot_normal")
self.assertTrue(obj, initializers.GlorotNormal)
obj = initializers.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
initializers.get("typo")
| keras-core/keras_core/initializers/random_initializers_test.py/0 | {
"file_path": "keras-core/keras_core/initializers/random_initializers_test.py",
"repo_id": "keras-core",
"token_count": 2571
} | 25 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.attention.attention import Attention
@keras_core_export("keras_core.layers.AdditiveAttention")
class AdditiveAttention(Attention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)` as a non-linear sum
`scores = reduce_sum(tanh(query + key), axis=-1)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call Args:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=True,
dropout=0.0,
**kwargs,
):
super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)
def build(self, input_shape):
self._validate_inputs(input_shape)
dim = input_shape[0][-1]
self.scale = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
self.built = True
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
base_config = super().get_config()
del base_config["score_mode"]
return base_config
| keras-core/keras_core/layers/attention/additive_attention.py/0 | {
"file_path": "keras-core/keras_core/layers/attention/additive_attention.py",
"repo_id": "keras-core",
"token_count": 1875
} | 26 |
from keras_core.api_export import keras_core_export
from keras_core.layers.convolutional.base_conv_transpose import (
BaseConvTranspose,
)
@keras_core_export(
[
"keras_core.layers.Conv3DTranspose",
"keras_core.layers.Convolution3DTranspose",
]
)
class Conv3DTranspose(BaseConvTranspose):
"""3D transposed convolution layer.
The need for transposed convolutions generally arise from the desire to use
a transformation going in the opposite direction of a normal convolution,
i.e., from something that has the shape of the output of some convolution
to something that has the shape of its input while maintaining a
connectivity pattern that is compatible with said convolution.
Args:
filters: int, the dimension of the output space (the number of filters
in the transposed convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
transposed convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the transposed convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated transposed convolution.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, new_spatial_dim1, new_spatial_dim2, new_spatial_dim3,
filters)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, filters, new_spatial_dim1, new_spatial_dim2,
new_spatial_dim3)`
Returns:
A 5D tensor representing `activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
Examples:
>>> x = np.random.rand(4, 10, 8, 12, 128)
>>> y = keras_core.layers.Conv3DTranspose(32, 2, 2, activation='relu')(x)
>>> print(y.shape)
(4, 20, 16, 24, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs
)
| keras-core/keras_core/layers/convolutional/conv3d_transpose.py/0 | {
"file_path": "keras-core/keras_core/layers/convolutional/conv3d_transpose.py",
"repo_id": "keras-core",
"token_count": 2430
} | 27 |
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return inputs
| keras-core/keras_core/layers/core/identity.py/0 | {
"file_path": "keras-core/keras_core/layers/core/identity.py",
"repo_id": "keras-core",
"token_count": 179
} | 28 |
from keras_core import backend
from keras_core import ops
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.layers.layer import Layer
class Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
Args:
**kwargs: standard layer keyword arguments.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Args:
shape1: Tuple or None. Shape of the first tensor
shape2: Tuple or None. Shape of the second tensor
Returns:
Expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: If shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[: -len(shape2)])
for i, j in zip(shape1[-len(shape2) :], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError(
"Inputs have incompatible shapes. "
f"Received shapes {shape1} and {shape2}"
)
output_shape.append(i)
return tuple(output_shape)
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], (tuple, list)):
raise ValueError(
"A merge layer should be called on a list of inputs. "
f"Received: input_shape={input_shape} (not a list of shapes)"
)
if len(input_shape) < 1:
raise ValueError(
"A merge layer should be called "
"on a list of at least 1 input. "
f"Received {len(input_shape)} inputs. "
f"Full input_shape received: {input_shape}"
)
batch_sizes = {s[0] for s in input_shape if s} - {None}
if len(batch_sizes) > 1:
raise ValueError(
"Cannot merge tensors with different batch sizes. "
f"Received tensors with shapes {input_shape}"
)
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(
output_shape, shape
)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
self.built = True
def call(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"A merge layer should be called on a list of inputs. "
f"Received: inputs={inputs} (not a list of tensors)"
)
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(ops.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = ops.ndim(x)
for _ in range(max_ndim - x_ndim):
x = ops.expand_dims(x, axis=1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... ,
# batch_size)
transposed = False
for x in inputs:
x_ndim = ops.ndim(x)
if x_ndim is None:
x_shape = ops.shape(x)
batch_size = x_shape[0]
new_shape = backend.concatenate(
[x_shape[1:], ops.expand_dims(batch_size, axis=-1)]
)
x_transposed = ops.reshape(
x,
ops.stack(
[batch_size, ops.prod(x_shape[1:])],
axis=0,
),
)
x_transposed = ops.transpose(x_transposed, perm=(1, 0))
x_transposed = ops.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(ops.transpose(x, perm=dims))
print(dims)
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or
# scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = ops.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the
# output too.
if y_ndim is None:
y_shape = ops.shape(y)
y_ndim = ops.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = ops.concatenate(
[
ops.expand_dims(batch_size, axis=-1),
y_shape[: y_ndim - 1],
]
)
y = ops.reshape(y, (-1, batch_size))
y = ops.transpose(y, perm=(1, 0))
y = ops.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = ops.transpose(y, perm=dims)
return y
else:
return self._merge_function(inputs)
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(
output_shape, shape
)
batch_sizes = {s[0] for s in input_shape if s is not None} - {None}
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape([x.shape for x in inputs])
output_sparse = all(x.sparse for x in inputs)
return KerasTensor(
output_shape, dtype=self.compute_dtype, sparse=output_sparse
)
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, (tuple, list)):
raise ValueError(f"`mask` should be a list. Received: mask={mask}")
if not isinstance(inputs, (tuple, list)):
raise ValueError(
f"`inputs` should be a list. Received: inputs={inputs}"
)
if len(mask) != len(inputs):
raise ValueError(
"The lists `inputs` and `mask` should have the same length. "
f"Received: inputs={inputs} of length {len(inputs)}, and "
f"mask={mask} of length {len(mask)}"
)
if all(m is None for m in mask):
return None
masks = [ops.expand_dims(m, axis=0) for m in mask if m is not None]
return ops.all(ops.concatenate(masks, axis=0), axis=0, keepdims=False)
def get_config(self):
return super().get_config()
| keras-core/keras_core/layers/merging/base_merge.py/0 | {
"file_path": "keras-core/keras_core/layers/merging/base_merge.py",
"repo_id": "keras-core",
"token_count": 5059
} | 29 |
import numpy as np
import pytest
from keras_core import initializers
from keras_core import layers
from keras_core import testing
class SpectralNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_spectralnorm(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4)),
expected_output_shape=(10, 3, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Embedding(10, 4)},
input_data=np.random.randint(10, size=(10,)),
expected_output_shape=(10, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
def test_invalid_power_iterations(self):
with self.assertRaisesRegex(
ValueError, "`power_iterations` should be greater than zero."
):
layers.SpectralNormalization(layers.Dense(2), power_iterations=0)
def test_invalid_layer(self):
layer = layers.SpectralNormalization(layers.ReLU())
inputs = np.ones(shape=(4, 2))
with self.assertRaisesRegex(
ValueError, "object has no attribute 'kernel' nor 'embeddings'"
):
layer(inputs)
def test_apply_layer(self):
images = np.ones((1, 2, 2, 1))
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
1, (2, 2), kernel_initializer=initializers.Constant(value=1)
),
power_iterations=8,
)
result = sn_wrapper(images, training=False)
result_train = sn_wrapper(images, training=True)
expected_output = np.array([[[[4.0]]]], dtype=np.float32)
self.assertAllClose(result, expected_output)
# max eigen value of 2x2 matrix of ones is 2
self.assertAllClose(result_train, expected_output / 2)
| keras-core/keras_core/layers/normalization/spectral_normalization_test.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/spectral_normalization_test.py",
"repo_id": "keras-core",
"token_count": 1104
} | 30 |
import numpy as np
import pytest
import tensorflow as tf
from keras_core import backend
from keras_core import layers
from keras_core import testing
class HashedCrossingTest(testing.TestCase):
def test_basics(self):
self.run_layer_test(
layers.HashedCrossing,
init_kwargs={
"num_bins": 3,
"output_mode": "int",
},
input_data=(np.array([1, 2]), np.array([4, 5])),
expected_output_shape=(2,),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
# Incomplete op support on tensorflow.
run_mixed_precision_check=False,
)
self.run_layer_test(
layers.HashedCrossing,
init_kwargs={"num_bins": 4, "output_mode": "one_hot"},
input_data=(np.array([1, 2]), np.array([4, 5])),
expected_output_shape=(2, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
# Incomplete op support on tensorflow.
run_mixed_precision_check=False,
)
def test_correctness(self):
layer = layers.HashedCrossing(num_bins=5)
feat1 = np.array(["A", "B", "A", "B", "A"])
feat2 = np.array([101, 101, 101, 102, 102])
output = layer((feat1, feat2))
self.assertAllClose(tf.constant([1, 4, 1, 1, 3]), output)
layer = layers.HashedCrossing(num_bins=5, output_mode="one_hot")
feat1 = np.array(["A", "B", "A", "B", "A"])
feat2 = np.array([101, 101, 101, 102, 102])
output = layer((feat1, feat2))
self.assertAllClose(
np.array(
[
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
]
),
output,
)
def test_tf_data_compatibility(self):
layer = layers.HashedCrossing(num_bins=5)
feat1 = np.array(["A", "B", "A", "B", "A"])
feat2 = np.array([101, 101, 101, 102, 102])
ds = (
tf.data.Dataset.from_tensor_slices((feat1, feat2))
.batch(5)
.map(lambda x1, x2: layer((x1, x2)))
)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(np.array([1, 4, 1, 1, 3]), output)
def test_upsupported_shape_input_fails(self):
with self.assertRaisesRegex(ValueError, "inputs should have shape"):
layers.HashedCrossing(num_bins=10)(
(np.array([[[1.0]]]), np.array([[[1.0]]]))
)
@pytest.mark.xfail
def test_cross_output_dtype(self):
input_1, input_2 = np.array([1]), np.array([1])
layer = layers.HashedCrossing(num_bins=2)
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "int64")
layer = layers.HashedCrossing(num_bins=2, dtype="int32")
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "int32")
layer = layers.HashedCrossing(num_bins=2, output_mode="one_hot")
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "float32")
layer = layers.HashedCrossing(
num_bins=2, output_mode="one_hot", dtype="float64"
)
output_dtype = backend.standardize_dtype(
layer((input_1, input_2)).dtype
)
self.assertEqual(output_dtype, "float64")
def test_non_list_input_fails(self):
with self.assertRaisesRegex(ValueError, "should be called on a list"):
layers.HashedCrossing(num_bins=10)(np.array(1))
def test_single_input_fails(self):
with self.assertRaisesRegex(ValueError, "at least two inputs"):
layers.HashedCrossing(num_bins=10)([np.array(1)])
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Need sparse tensor support.",
)
def test_sparse_input_fails(self):
with self.assertRaisesRegex(
ValueError, "inputs should be dense tensors"
):
sparse_in = tf.sparse.from_dense(np.array([1]))
layers.HashedCrossing(num_bins=10)((sparse_in, sparse_in))
def test_float_input_fails(self):
with self.assertRaisesRegex(
ValueError, "should have an integer or string"
):
layers.HashedCrossing(num_bins=10)(
(np.array([1.0]), np.array([1.0]))
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Need string tensor support.",
)
def test_tf_string(self):
layer = layers.HashedCrossing(num_bins=10)
feat1 = tf.constant("A")
feat2 = tf.constant(101)
outputs = layer((feat1, feat2))
self.assertAllClose(outputs, 1)
layer = layers.HashedCrossing(num_bins=5, output_mode="one_hot")
feat1 = tf.constant(["A", "B", "A", "B", "A"])
feat2 = tf.constant([101, 101, 101, 102, 102])
self.assertAllClose(
tf.constant(
[
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
]
),
layer((feat1, feat2)),
)
layer = layers.HashedCrossing(num_bins=5)
feat1 = tf.constant(["A", "B", "A", "B", "A"])
feat2 = tf.constant([101, 101, 101, 102, 102])
self.assertAllClose(tf.constant([1, 4, 1, 1, 3]), layer((feat1, feat2)))
layer = layers.HashedCrossing(
num_bins=5, output_mode="one_hot", sparse=True
)
cloned_layer = layers.HashedCrossing.from_config(layer.get_config())
feat1 = tf.constant([["A"], ["B"], ["A"], ["B"], ["A"]])
feat2 = tf.constant([[101], [101], [101], [102], [102]])
original_outputs = layer((feat1, feat2))
cloned_outputs = cloned_layer((feat1, feat2))
self.assertAllClose(
tf.sparse.to_dense(cloned_outputs),
tf.sparse.to_dense(original_outputs),
)
| keras-core/keras_core/layers/preprocessing/hashed_crossing_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/hashed_crossing_test.py",
"repo_id": "keras-core",
"token_count": 3626
} | 31 |
import unittest.mock
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras_core import backend
from keras_core import layers
from keras_core import testing
from keras_core import utils
class MockedRandomFlip(layers.RandomFlip):
def call(self, inputs, training=True):
unbatched = len(inputs.shape) == 3
batch_size = 1 if unbatched else self.backend.shape(inputs)[0]
mocked_value = self.backend.numpy.full(
(batch_size, 1, 1, 1), 0.1, dtype="float32"
)
with unittest.mock.patch.object(
self.backend.random,
"uniform",
return_value=mocked_value,
):
out = super().call(inputs, training=training)
return out
class RandomFlipTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("random_flip_horizontal", "horizontal"),
("random_flip_vertical", "vertical"),
("random_flip_both", "horizontal_and_vertical"),
)
def test_random_flip(self, mode):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.RandomFlip,
init_kwargs={
"mode": mode,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=run_training_check,
)
def test_random_flip_horizontal(self):
run_training_check = False if backend.backend() == "numpy" else True
utils.set_random_seed(0)
# Test 3D input: shape (1*2*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "horizontal",
"seed": 42,
},
input_data=np.asarray([[[2, 3, 4], [5, 6, 7]]]),
expected_output=backend.convert_to_tensor([[[5, 6, 7], [2, 3, 4]]]),
supports_masking=False,
run_training_check=run_training_check,
)
# Test 4D input: shape (2*1*2*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "horizontal",
"seed": 42,
},
input_data=np.asarray(
[
[[[2, 3, 4], [5, 6, 7]]],
[[[2, 3, 4], [5, 6, 7]]],
]
),
expected_output=backend.convert_to_tensor(
[
[[[5, 6, 7], [2, 3, 4]]],
[[[5, 6, 7], [2, 3, 4]]],
]
),
supports_masking=False,
run_training_check=run_training_check,
)
def test_random_flip_vertical(self):
run_training_check = False if backend.backend() == "numpy" else True
utils.set_random_seed(0)
# Test 3D input: shape (2*1*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "vertical",
"seed": 42,
},
input_data=np.asarray([[[2, 3, 4]], [[5, 6, 7]]]),
expected_output=backend.convert_to_tensor(
[[[5, 6, 7]], [[2, 3, 4]]]
),
supports_masking=False,
run_training_check=run_training_check,
)
# Test 4D input: shape (2*2*1*3)
self.run_layer_test(
MockedRandomFlip,
init_kwargs={
"mode": "vertical",
"seed": 42,
},
input_data=np.asarray(
[
[
[[2, 3, 4]],
[[5, 6, 7]],
],
[
[[2, 3, 4]],
[[5, 6, 7]],
],
]
),
expected_output=backend.convert_to_tensor(
[
[[[5, 6, 7]], [[2, 3, 4]]],
[[[5, 6, 7]], [[2, 3, 4]]],
]
),
supports_masking=False,
run_training_check=run_training_check,
)
def test_tf_data_compatibility(self):
# Test 3D input: shape (2, 1, 3)
layer = layers.RandomFlip("vertical", seed=42)
input_data = np.array([[[2, 3, 4]], [[5, 6, 7]]])
expected_output = np.array([[[5, 6, 7]], [[2, 3, 4]]])
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, expected_output)
# Test 4D input: shape (2, 2, 1, 3)
layer = layers.RandomFlip("vertical", seed=42)
input_data = np.array(
[
[
[[2, 3, 4]],
[[5, 6, 7]],
],
[
[[2, 3, 4]],
[[5, 6, 7]],
],
]
)
expected_output = np.array(
[
[[[5, 6, 7]], [[2, 3, 4]]],
[[[5, 6, 7]], [[2, 3, 4]]],
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, expected_output)
| keras-core/keras_core/layers/preprocessing/random_flip_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_flip_test.py",
"repo_id": "keras-core",
"token_count": 3143
} | 32 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.utils import argument_validation
@keras_core_export("keras_core.layers.Cropping3D")
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Examples:
>>> input_shape = (2, 28, 28, 10, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = keras_core.layers.Cropping3D(cropping=(2, 4, 2))(x)
>>> y.shape
(2, 24, 20, 6, 3)
Args:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping is applied to depth, height,
and width.
- If tuple of 3 ints: interpreted as three different symmetric
cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_cropped_axis, second_cropped_axis,
third_cropped_axis, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(
self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = (
(cropping, cropping),
(cropping, cropping),
(cropping, cropping),
)
elif hasattr(cropping, "__len__"):
if len(cropping) != 3:
raise ValueError(
f"`cropping` should have 3 elements. Received: {cropping}."
)
dim1_cropping = argument_validation.standardize_tuple(
cropping[0], 2, "1st entry of cropping", allow_zero=True
)
dim2_cropping = argument_validation.standardize_tuple(
cropping[1], 2, "2nd entry of cropping", allow_zero=True
)
dim3_cropping = argument_validation.standardize_tuple(
cropping[2], 2, "3rd entry of cropping", allow_zero=True
)
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
"`cropping` should be either an int, a tuple of 3 ints "
"(symmetric_dim1_crop, symmetric_dim2_crop, "
"symmetric_dim3_crop), "
"or a tuple of 3 tuples of 2 ints "
"((left_dim1_crop, right_dim1_crop),"
" (left_dim2_crop, right_dim2_crop),"
" (left_dim3_crop, right_dim2_crop)). "
f"Received: {cropping}."
)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
spatial_dims = list(input_shape[2:5])
else:
spatial_dims = list(input_shape[1:4])
for index in range(0, 3):
if spatial_dims[index] is None:
continue
spatial_dims[index] -= sum(self.cropping[index])
if spatial_dims[index] <= 0:
raise ValueError(
"Values in `cropping` argument should be greater than the "
"corresponding spatial dimension of the input. Received: "
f"input_shape={input_shape}, cropping={self.cropping}"
)
if self.data_format == "channels_first":
return (input_shape[0], input_shape[1], *spatial_dims)
else:
return (input_shape[0], *spatial_dims, input_shape[4])
def call(self, inputs):
if self.data_format == "channels_first":
spatial_dims = list(inputs.shape[2:5])
else:
spatial_dims = list(inputs.shape[1:4])
for index in range(0, 3):
if spatial_dims[index] is None:
continue
spatial_dims[index] -= sum(self.cropping[index])
if spatial_dims[index] <= 0:
raise ValueError(
"Values in `cropping` argument should be greater than the "
"corresponding spatial dimension of the input. Received: "
f"inputs.shape={inputs.shape}, cropping={self.cropping}"
)
if self.data_format == "channels_first":
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
else:
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
def get_config(self):
config = {"cropping": self.cropping, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/reshaping/cropping3d.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/cropping3d.py",
"repo_id": "keras-core",
"token_count": 6492
} | 33 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.utils import argument_validation
@keras_core_export("keras_core.layers.ZeroPadding1D")
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Examples:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras_core.layers.ZeroPadding1D(padding=2)(x)
>>> y
[[[ 0 0 0]
[ 0 0 0]
[ 0 1 2]
[ 3 4 5]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 6 7 8]
[ 9 10 11]
[ 0 0 0]
[ 0 0 0]]]
Args:
padding: Int, or tuple of int (length 2), or dictionary.
- If int: how many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of 2 ints: how many zeros to add at the beginning and the
end of the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch_size, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch_size, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super().__init__(**kwargs)
self.padding = argument_validation.standardize_tuple(
padding, 2, "padding", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
if output_shape[1] is not None:
output_shape[1] += self.padding[0] + self.padding[1]
return tuple(output_shape)
def call(self, inputs):
all_dims_padding = ((0, 0), self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/reshaping/zero_padding1d.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/zero_padding1d.py",
"repo_id": "keras-core",
"token_count": 973
} | 34 |
import numpy as np
from keras_core import backend
from keras_core import initializers
from keras_core import testing
from keras_core.layers.rnn.conv_lstm import ConvLSTM
from keras_core.layers.rnn.conv_lstm import ConvLSTMCell
class ConvLSTMCellTest(testing.TestCase):
def test_correctness(self):
x = np.arange(150).reshape((2, 5, 5, 3)).astype("float32") / 10
s1 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 10
s2 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 10
layer = ConvLSTMCell(
rank=2,
filters=4,
kernel_size=3,
padding="same",
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
)
output = layer(x, [s1, s2])
checksum_0 = np.sum(backend.convert_to_numpy(output[0]))
self.assertAllClose(checksum_0, 188.89502)
checksum_1 = np.sum(backend.convert_to_numpy(output[1][0]))
self.assertAllClose(checksum_1, 188.89502)
checksum_2 = np.sum(backend.convert_to_numpy(output[1][1]))
self.assertAllClose(checksum_2, 2170.444)
class ConvLSTMTest(testing.TestCase):
def test_correctness(self):
x = np.arange(450).reshape((2, 3, 5, 5, 3)).astype("float32") / 100
s1 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 100
s2 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 100
layer = ConvLSTM(
rank=2,
filters=4,
kernel_size=3,
padding="same",
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
)
output = layer(x, initial_state=[s1, s2])
output = backend.convert_to_numpy(output)
self.assertAllClose(np.sum(output), 119.812454)
| keras-core/keras_core/layers/rnn/conv_lstm_test.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/conv_lstm_test.py",
"repo_id": "keras-core",
"token_count": 911
} | 35 |
"""Legacy Keras 1/2 backend functions."""
import itertools
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.utils.module_utils import tensorflow as tf
py_any = any
py_all = all
@keras_core_export("keras_core._legacy.backend.abs")
def abs(x):
"""DEPRECATED."""
return tf.abs(x)
@keras_core_export("keras_core._legacy.backend.all")
def all(x, axis=None, keepdims=False):
"""DEPRECATED."""
x = tf.cast(x, tf.bool)
return tf.reduce_all(x, axis, keepdims)
@keras_core_export("keras_core._legacy.backend.any")
def any(x, axis=None, keepdims=False):
"""DEPRECATED."""
x = tf.cast(x, tf.bool)
return tf.reduce_any(x, axis, keepdims)
@keras_core_export("keras_core._legacy.backend.argmax")
def argmax(x, axis=-1):
"""DEPRECATED."""
return tf.argmax(x, axis)
@keras_core_export("keras_core._legacy.backend.argmin")
def argmin(x, axis=-1):
"""DEPRECATED."""
return tf.argmin(x, axis)
@keras_core_export("keras_core._legacy.backend.arange")
def arange(start, stop=None, step=1, dtype="int32"):
"""DEPRECATED."""
if stop is None and start < 0:
start = 0
result = tf.range(start, limit=stop, delta=step, name="arange")
if dtype != "int32":
result = tf.cast(result, dtype)
return result
@keras_core_export("keras_core._legacy.backend.batch_dot")
def batch_dot(x, y, axes=None):
"""DEPRECATED."""
x_shape = x.shape
y_shape = y.shape
x_ndim = len(x_shape)
y_ndim = len(y_shape)
if x_ndim < 2 or y_ndim < 2:
raise ValueError(
"Cannot do batch_dot on inputs "
"with rank < 2. "
"Received inputs with tf.shapes "
+ str(x_shape)
+ " and "
+ str(y_shape)
+ "."
)
x_batch_size = x_shape[0]
y_batch_size = y_shape[0]
if x_batch_size is not None and y_batch_size is not None:
if x_batch_size != y_batch_size:
raise ValueError(
"Cannot do batch_dot on inputs "
"with different batch sizes. "
"Received inputs with tf.shapes "
+ str(x_shape)
+ " and "
+ str(y_shape)
+ "."
)
if isinstance(axes, int):
axes = [axes, axes]
if axes is None:
if y_ndim == 2:
axes = [x_ndim - 1, y_ndim - 1]
else:
axes = [x_ndim - 1, y_ndim - 2]
if py_any(isinstance(a, (list, tuple)) for a in axes):
raise ValueError(
"Multiple target dimensions are not supported. "
+ "Expected: None, int, (int, int), "
+ "Provided: "
+ str(axes)
)
# if tuple, convert to list.
axes = list(axes)
# convert negative indices.
if axes[0] < 0:
axes[0] += x_ndim
if axes[1] < 0:
axes[1] += y_ndim
# sanity checks
if 0 in axes:
raise ValueError(
"Cannot perform batch_dot over axis 0. "
"If your inputs are not batched, "
"add a dummy batch dimension to your "
"inputs using K.expand_dims(x, 0)"
)
a0, a1 = axes
d1 = x_shape[a0]
d2 = y_shape[a1]
if d1 is not None and d2 is not None and d1 != d2:
raise ValueError(
"Cannot do batch_dot on inputs with tf.shapes "
+ str(x_shape)
+ " and "
+ str(y_shape)
+ " with axes="
+ str(axes)
+ ". x.shape[%d] != y.shape[%d] (%d != %d)."
% (axes[0], axes[1], d1, d2)
)
# backup ndims. Need them later.
orig_x_ndim = x_ndim
orig_y_ndim = y_ndim
# if rank is 2, expand to 3.
if x_ndim == 2:
x = tf.expand_dims(x, 1)
a0 += 1
x_ndim += 1
if y_ndim == 2:
y = tf.expand_dims(y, 2)
y_ndim += 1
# bring x's dimension to be reduced to last axis.
if a0 != x_ndim - 1:
pattern = list(range(x_ndim))
for i in range(a0, x_ndim - 1):
pattern[i] = pattern[i + 1]
pattern[-1] = a0
x = tf.transpose(x, pattern)
# bring y's dimension to be reduced to axis 1.
if a1 != 1:
pattern = list(range(y_ndim))
for i in range(a1, 1, -1):
pattern[i] = pattern[i - 1]
pattern[1] = a1
y = tf.transpose(y, pattern)
# normalize both inputs to rank 3.
if x_ndim > 3:
# squash middle dimensions of x.
x_shape = tf.shape(x)
x_mid_dims = x_shape[1:-1]
x_squashed_shape = tf.stack([x_shape[0], -1, x_shape[-1]])
x = tf.reshape(x, x_squashed_shape)
x_squashed = True
else:
x_squashed = False
if y_ndim > 3:
# squash trailing dimensions of y.
y_shape = tf.shape(y)
y_trail_dims = y_shape[2:]
y_squashed_shape = tf.stack([y_shape[0], y_shape[1], -1])
y = tf.reshape(y, y_squashed_shape)
y_squashed = True
else:
y_squashed = False
result = tf.matmul(x, y)
# if inputs were squashed, we have to reshape the matmul output.
output_shape = tf.shape(result)
do_reshape = False
if x_squashed:
output_shape = tf.concat(
[output_shape[:1], x_mid_dims, output_shape[-1:]], 0
)
do_reshape = True
if y_squashed:
output_shape = tf.concat([output_shape[:-1], y_trail_dims], 0)
do_reshape = True
if do_reshape:
result = tf.reshape(result, output_shape)
# if the inputs were originally rank 2, we remove the added 1 dim.
if orig_x_ndim == 2:
result = tf.squeeze(result, 1)
elif orig_y_ndim == 2:
result = tf.squeeze(result, -1)
return result
@keras_core_export("keras_core._legacy.backend.batch_flatten")
def batch_flatten(x):
"""DEPRECATED."""
x = tf.reshape(x, tf.stack([-1, prod(tf.shape(x)[1:])]))
return x
@keras_core_export("keras_core._legacy.backend.batch_get_value")
def batch_get_value(tensors):
"""DEPRECATED."""
return [x.numpy() for x in tensors]
@keras_core_export("keras_core._legacy.backend.batch_set_value")
def batch_set_value(tuples):
"""DEPRECATED."""
if tf.executing_eagerly() or tf.inside_function():
for x, value in tuples:
value = np.asarray(value, dtype=x.dtype.name)
x.assign(value)
@keras_core_export("keras_core._legacy.backend.batch_normalization")
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""DEPRECATED."""
return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
@keras_core_export("keras_core._legacy.backend.bias_add")
def bias_add(x, bias, data_format=None):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
bias_shape = bias.shape
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
f"Unexpected bias dimensions {len(bias_shape)}. "
f"Expected it to be 1 or {ndim(x) - 1} dimensions"
)
if len(bias_shape) == 1:
if data_format == "channels_first":
return tf.nn.bias_add(x, bias, data_format="NCHW")
return tf.nn.bias_add(x, bias, data_format="NHWC")
if ndim(x) in (3, 4, 5):
if data_format == "channels_first":
bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]
return x + reshape(bias, bias_reshape_axis)
return x + reshape(bias, (1,) + bias_shape)
return tf.nn.bias_add(x, bias)
@keras_core_export("keras_core._legacy.backend.binary_crossentropy")
def binary_crossentropy(target, output, from_logits=False):
"""DEPRECATED."""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=target, logits=output
)
epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype)
output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)
# Compute cross entropy from probabilities.
bce = target * tf.math.log(output + backend.epsilon())
bce += (1 - target) * tf.math.log(1 - output + backend.epsilon())
return -bce
@keras_core_export("keras_core._legacy.backend.binary_focal_crossentropy")
def binary_focal_crossentropy(
target,
output,
apply_class_balancing=False,
alpha=0.25,
gamma=2.0,
from_logits=False,
):
"""DEPRECATED."""
sigmoidal = tf.sigmoid(output) if from_logits else output
p_t = target * sigmoidal + (1 - target) * (1 - sigmoidal)
# Calculate focal factor
focal_factor = tf.pow(1.0 - p_t, gamma)
# Binary crossentropy
bce = binary_crossentropy(
target=target,
output=output,
from_logits=from_logits,
)
focal_bce = focal_factor * bce
if apply_class_balancing:
weight = target * alpha + (1 - target) * (1 - alpha)
focal_bce = weight * focal_bce
return focal_bce
@keras_core_export("keras_core._legacy.backend.cast")
def cast(x, dtype):
"""DEPRECATED."""
return tf.cast(x, dtype)
@keras_core_export("keras_core._legacy.backend.cast_to_floatx")
def cast_to_floatx(x):
"""DEPRECATED."""
if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)):
return tf.cast(x, dtype=backend.floatx())
return np.asarray(x, dtype=backend.floatx())
@keras_core_export("keras_core._legacy.backend.categorical_crossentropy")
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""DEPRECATED."""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
target.shape.assert_is_compatible_with(output.shape)
if from_logits:
return tf.nn.softmax_cross_entropy_with_logits(
labels=target, logits=output, axis=axis
)
# Adjust the predictions so that the probability of
# each class for every sample adds up to 1
# This is needed to ensure that the cross entropy is
# computed correctly.
output = output / tf.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype)
output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)
return -tf.reduce_sum(target * tf.math.log(output), axis)
@keras_core_export("keras_core._legacy.backend.categorical_focal_crossentropy")
def categorical_focal_crossentropy(
target,
output,
alpha=0.25,
gamma=2.0,
from_logits=False,
axis=-1,
):
"""DEPRECATED."""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
target.shape.assert_is_compatible_with(output.shape)
if from_logits:
output = tf.nn.softmax(output, axis=axis)
# Adjust the predictions so that the probability of
# each class for every sample adds up to 1
# This is needed to ensure that the cross entropy is
# computed correctly.
output = output / tf.reduce_sum(output, axis=axis, keepdims=True)
epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype)
output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)
# Calculate cross entropy
cce = -target * tf.math.log(output)
# Calculate factors
modulating_factor = tf.pow(1.0 - output, gamma)
weighting_factor = tf.multiply(modulating_factor, alpha)
# Apply weighting factor
focal_cce = tf.multiply(weighting_factor, cce)
focal_cce = tf.reduce_sum(focal_cce, axis=axis)
return focal_cce
@keras_core_export("keras_core._legacy.backend.clip")
def clip(x, min_value, max_value):
"""DEPRECATED."""
if isinstance(min_value, (int, float)) and isinstance(
max_value, (int, float)
):
if max_value < min_value:
max_value = min_value
if min_value is None:
min_value = -np.inf
if max_value is None:
max_value = np.inf
return tf.clip_by_value(x, min_value, max_value)
@keras_core_export("keras_core._legacy.backend.concatenate")
def concatenate(tensors, axis=-1):
"""DEPRECATED."""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return tf.compat.v1.sparse_concat(axis, tensors)
elif py_all(isinstance(x, tf.RaggedTensor) for x in tensors):
return tf.concat(tensors, axis)
else:
return tf.concat([to_dense(x) for x in tensors], axis)
@keras_core_export("keras_core._legacy.backend.constant")
def constant(value, dtype=None, shape=None, name=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
return tf.constant(value, dtype=dtype, shape=shape, name=name)
def _preprocess_conv1d_input(x, data_format):
tf_data_format = "NWC" # to pass TF Conv2dNative operations
if data_format == "channels_first":
tf_data_format = "NCW"
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
tf_data_format = "NHWC"
if data_format == "channels_first":
if force_transpose:
x = tf.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = "NCHW"
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
tf_data_format = "NDHWC"
if data_format == "channels_first":
tf_data_format = "NCDHW"
return x, tf_data_format
def _preprocess_padding(padding):
if padding == "same":
padding = "SAME"
elif padding == "valid":
padding = "VALID"
else:
raise ValueError(f"Invalid padding: {padding}")
return padding
@keras_core_export("keras_core._legacy.backend.conv1d")
def conv1d(
x, kernel, strides=1, padding="valid", data_format=None, dilation_rate=1
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
kernel_shape = kernel.shape.as_list()
if padding == "causal":
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = "valid"
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = tf.compat.v1.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NWC":
x = tf.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_core_export("keras_core._legacy.backend.conv2d")
def conv2d(
x,
kernel,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = tf.compat.v1.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_core_export("keras_core._legacy.backend.conv2d_transpose")
def conv2d_transpose(
x,
kernel,
output_shape,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == "channels_first" and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(
x, data_format, force_transpose
)
if data_format == "channels_first" and tf_data_format == "NHWC":
output_shape = (
output_shape[0],
output_shape[2],
output_shape[3],
output_shape[1],
)
if output_shape[0] is None:
output_shape = (tf.shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = tf.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = tf.compat.v1.nn.conv2d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format,
)
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError(
"Expected the 2 dimensions of the `dilation_rate` argument "
"to be equal to each other. "
f"Received: dilation_rate={dilation_rate}"
)
x = tf.nn.atrous_conv2d_transpose(
x, kernel, output_shape, rate=dilation_rate[0], padding=padding
)
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_core_export("keras_core._legacy.backend.conv3d")
def conv3d(
x,
kernel,
strides=(1, 1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = tf.compat.v1.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NDHWC":
x = tf.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_core_export("keras_core._legacy.backend.cos")
def cos(x):
"""DEPRECATED."""
return tf.cos(x)
@keras_core_export("keras_core._legacy.backend.count_params")
def count_params(x):
"""DEPRECATED."""
return np.prod(x.shape.as_list())
@keras_core_export("keras_core._legacy.backend.ctc_batch_cost")
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""DEPRECATED."""
label_length = tf.cast(tf.squeeze(label_length, axis=-1), tf.int32)
input_length = tf.cast(tf.squeeze(input_length, axis=-1), tf.int32)
sparse_labels = tf.cast(
ctc_label_dense_to_sparse(y_true, label_length), tf.int32
)
y_pred = tf.math.log(
tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon()
)
return tf.expand_dims(
tf.compat.v1.nn.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length
),
1,
)
@keras_core_export("keras_core._legacy.backend.ctc_label_dense_to_sparse")
def ctc_label_dense_to_sparse(labels, label_lengths):
"""DEPRECATED."""
label_shape = tf.shape(labels)
num_batches_tns = tf.stack([label_shape[0]])
max_num_labels_tns = tf.stack([label_shape[1]])
def range_less_than(old_input, current_input):
return tf.expand_dims(tf.range(tf.shape(old_input)[1]), 0) < tf.fill(
max_num_labels_tns, current_input
)
init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
dense_mask = tf.compat.v1.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1
)
dense_mask = dense_mask[:, 0, :]
label_array = tf.reshape(
tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape
)
label_ind = tf.compat.v1.boolean_mask(label_array, dense_mask)
batch_array = tf.transpose(
tf.reshape(
tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0),
)
)
batch_ind = tf.compat.v1.boolean_mask(batch_array, dense_mask)
indices = tf.transpose(
tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])
)
vals_sparse = tf.compat.v1.gather_nd(labels, indices)
return tf.SparseTensor(
tf.cast(indices, tf.int64), vals_sparse, tf.cast(label_shape, tf.int64)
)
@keras_core_export("keras_core._legacy.backend.ctc_decode")
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""DEPRECATED."""
input_shape = tf.shape(y_pred)
num_samples, num_steps = input_shape[0], input_shape[1]
y_pred = tf.math.log(
tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon()
)
input_length = tf.cast(input_length, tf.int32)
if greedy:
(decoded, log_prob) = tf.nn.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length
)
else:
(decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths,
)
decoded_dense = []
for st in decoded:
st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps))
decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1))
return (decoded_dense, log_prob)
@keras_core_export("keras_core._legacy.backend.cumsum")
def cumsum(x, axis=0):
"""DEPRECATED."""
return tf.cumsum(x, axis=axis)
@keras_core_export("keras_core._legacy.backend.cumprod")
def cumprod(x, axis=0):
"""DEPRECATED."""
return tf.math.cumprod(x, axis=axis)
@keras_core_export("keras_core._legacy.backend.depthwise_conv2d")
def depthwise_conv2d(
x,
depthwise_kernel,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = tf.nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
dilations=dilation_rate,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_core_export("keras_core._legacy.backend.dot")
def dot(x, y):
"""DEPRECATED."""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(x.shape, tf.unstack(tf.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(y.shape, tf.unstack(tf.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = tf.reshape(x, [-1, x_shape[-1]])
yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return tf.reshape(
tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]
)
if is_sparse(x):
out = tf.sparse.sparse_dense_matmul(x, y)
else:
out = tf.matmul(x, y)
return out
@keras_core_export("keras_core._legacy.backend.dropout")
def dropout(x, level, noise_shape=None, seed=None):
"""DEPRECATED."""
if seed is None:
seed = np.random.randint(10e6)
return tf.nn.dropout(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_core_export("keras_core._legacy.backend.dtype")
def dtype(x):
"""DEPRECATED."""
return x.dtype.base_dtype.name
@keras_core_export("keras_core._legacy.backend.elu")
def elu(x, alpha=1.0):
"""DEPRECATED."""
res = tf.nn.elu(x)
if alpha == 1:
return res
else:
return tf.where(x > 0, res, alpha * res)
@keras_core_export("keras_core._legacy.backend.equal")
def equal(x, y):
"""DEPRECATED."""
return tf.equal(x, y)
@keras_core_export("keras_core._legacy.backend.eval")
def eval(x):
"""DEPRECATED."""
return get_value(to_dense(x))
@keras_core_export("keras_core._legacy.backend.exp")
def exp(x):
"""DEPRECATED."""
return tf.exp(x)
@keras_core_export("keras_core._legacy.backend.expand_dims")
def expand_dims(x, axis=-1):
"""DEPRECATED."""
return tf.expand_dims(x, axis)
@keras_core_export("keras_core._legacy.backend.eye")
def eye(size, dtype=None, name=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
tf_dtype = tf.as_dtype(dtype)
return variable(tf.eye(size, dtype=tf_dtype), dtype, name)
@keras_core_export("keras_core._legacy.backend.flatten")
def flatten(x):
"""DEPRECATED."""
return tf.reshape(x, [-1])
@keras_core_export("keras_core._legacy.backend.foldl")
def foldl(fn, elems, initializer=None, name=None):
"""DEPRECATED."""
return tf.compat.v1.foldl(fn, elems, initializer=initializer, name=name)
@keras_core_export("keras_core._legacy.backend.foldr")
def foldr(fn, elems, initializer=None, name=None):
"""DEPRECATED."""
return tf.compat.v1.foldr(fn, elems, initializer=initializer, name=name)
@keras_core_export("keras_core._legacy.backend.gather")
def gather(reference, indices):
"""DEPRECATED."""
return tf.compat.v1.gather(reference, indices)
@keras_core_export("keras_core._legacy.backend.get_value")
def get_value(x):
"""DEPRECATED."""
if not tf.is_tensor(x):
return x
if tf.executing_eagerly() or isinstance(x, tf.__internal__.EagerTensor):
return x.numpy()
if not getattr(x, "_in_graph_mode", True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with tf.__internal__.eager_context.eager_mode():
return x.numpy()
with tf.init_scope():
return x.numpy()
@keras_core_export("keras_core._legacy.backend.gradients")
def gradients(loss, variables):
"""DEPRECATED."""
return tf.compat.v1.gradients(
loss, variables, colocate_gradients_with_ops=True
)
@keras_core_export("keras_core._legacy.backend.greater")
def greater(x, y):
"""DEPRECATED."""
return tf.greater(x, y)
@keras_core_export("keras_core._legacy.backend.greater_equal")
def greater_equal(x, y):
"""DEPRECATED."""
return tf.greater_equal(x, y)
@keras_core_export("keras_core._legacy.backend.hard_sigmoid")
def hard_sigmoid(x):
"""DEPRECATED."""
point_two = tf.convert_to_tensor(0.2, dtype=x.dtype)
point_five = tf.convert_to_tensor(0.5, dtype=x.dtype)
x = tf.multiply(x, point_two)
x = tf.add(x, point_five)
x = tf.clip_by_value(x, 0.0, 1.0)
return x
@keras_core_export("keras_core._legacy.backend.in_top_k")
def in_top_k(predictions, targets, k):
"""DEPRECATED."""
return tf.compat.v1.math.in_top_k(predictions, targets, k)
@keras_core_export("keras_core._legacy.backend.int_shape")
def int_shape(x):
"""DEPRECATED."""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_core_export("keras_core._legacy.backend.is_sparse")
def is_sparse(tensor):
"""DEPRECATED."""
spec = getattr(tensor, "_type_spec", None)
if spec is not None:
return isinstance(spec, tf.SparseTensorSpec)
return isinstance(tensor, tf.SparseTensor)
@keras_core_export("keras_core._legacy.backend.l2_normalize")
def l2_normalize(x, axis=None):
"""DEPRECATED."""
return tf.linalg.l2_normalize(x, axis=axis)
@keras_core_export("keras_core._legacy.backend.less")
def less(x, y):
"""DEPRECATED."""
return tf.less(x, y)
@keras_core_export("keras_core._legacy.backend.less_equal")
def less_equal(x, y):
"""DEPRECATED."""
return tf.less_equal(x, y)
@keras_core_export("keras_core._legacy.backend.log")
def log(x):
"""DEPRECATED."""
return tf.math.log(x)
@keras_core_export("keras_core._legacy.backend.map_fn")
def map_fn(fn, elems, name=None, dtype=None):
"""DEPRECATED."""
return tf.compat.v1.map_fn(fn, elems, name=name, dtype=dtype)
@keras_core_export("keras_core._legacy.backend.max")
def max(x, axis=None, keepdims=False):
"""DEPRECATED."""
return tf.reduce_max(x, axis, keepdims)
@keras_core_export("keras_core._legacy.backend.maximum")
def maximum(x, y):
"""DEPRECATED."""
return tf.maximum(x, y)
@keras_core_export("keras_core._legacy.backend.mean")
def mean(x, axis=None, keepdims=False):
"""DEPRECATED."""
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, backend.floatx())
return tf.reduce_mean(x, axis, keepdims)
@keras_core_export("keras_core._legacy.backend.min")
def min(x, axis=None, keepdims=False):
"""DEPRECATED."""
return tf.reduce_min(x, axis, keepdims)
@keras_core_export("keras_core._legacy.backend.minimum")
def minimum(x, y):
"""DEPRECATED."""
return tf.minimum(x, y)
@keras_core_export("keras_core._legacy.backend.moving_average_update")
def moving_average_update(x, value, momentum):
"""DEPRECATED."""
momentum = tf.cast(momentum, x.dtype)
value = tf.cast(value, x.dtype)
return x.assign_sub((x - value) * (1 - momentum))
@keras_core_export("keras_core._legacy.backend.name_scope")
def name_scope(name):
"""DEPRECATED."""
return tf.name_scope(name)
@keras_core_export("keras_core._legacy.backend.ndim")
def ndim(x):
"""DEPRECATED."""
return x.shape.rank
@keras_core_export("keras_core._legacy.backend.not_equal")
def not_equal(x, y):
"""DEPRECATED."""
return tf.not_equal(x, y)
@keras_core_export("keras_core._legacy.backend.one_hot")
def one_hot(indices, num_classes):
"""DEPRECATED."""
return tf.one_hot(indices, depth=num_classes, axis=-1)
@keras_core_export("keras_core._legacy.backend.ones")
def ones(shape, dtype=None, name=None):
"""DEPRECATED."""
with tf.init_scope():
if dtype is None:
dtype = backend.floatx()
tf_dtype = tf.as_dtype(dtype)
v = tf.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@keras_core_export("keras_core._legacy.backend.ones_like")
def ones_like(x, dtype=None, name=None):
"""DEPRECATED."""
return tf.ones_like(x, dtype=dtype, name=name)
@keras_core_export("keras_core._legacy.backend.permute_dimensions")
def permute_dimensions(x, pattern):
"""DEPRECATED."""
return tf.transpose(x, perm=pattern)
@keras_core_export("keras_core._legacy.backend.pool2d")
def pool2d(
x,
pool_size,
strides=(1, 1),
padding="valid",
data_format=None,
pool_mode="max",
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
if len(pool_size) != 2:
raise ValueError("`pool_size` must be a tuple of 2 integers.")
if len(strides) != 2:
raise ValueError("`strides` must be a tuple of 2 integers.")
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == "max":
x = tf.compat.v1.nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
elif pool_mode == "avg":
x = tf.compat.v1.nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
else:
raise ValueError("Invalid pooling mode: " + str(pool_mode))
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_core_export("keras_core._legacy.backend.pool3d")
def pool3d(
x,
pool_size,
strides=(1, 1, 1),
padding="valid",
data_format=None,
pool_mode="max",
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == "NDHWC":
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == "max":
x = tf.nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
elif pool_mode == "avg":
x = tf.nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
else:
raise ValueError("Invalid pooling mode: " + str(pool_mode))
if data_format == "channels_first" and tf_data_format == "NDHWC":
x = tf.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_core_export("keras_core._legacy.backend.pow")
def pow(x, a):
"""DEPRECATED."""
return tf.pow(x, a)
@keras_core_export("keras_core._legacy.backend.prod")
def prod(x, axis=None, keepdims=False):
"""DEPRECATED."""
return tf.reduce_prod(x, axis, keepdims)
@keras_core_export("keras_core._legacy.backend.random_bernoulli")
def random_bernoulli(shape, p=0.0, dtype=None, seed=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
if seed is None:
seed = np.random.randint(10e6)
return tf.where(
tf.random.uniform(shape, dtype=dtype, seed=seed) <= p,
tf.ones(shape, dtype=dtype),
tf.zeros(shape, dtype=dtype),
)
@keras_core_export("keras_core._legacy.backend.random_normal")
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
if seed is None:
seed = np.random.randint(10e6)
return tf.random.normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
@keras_core_export("keras_core._legacy.backend.random_normal_variable")
def random_normal_variable(
shape, mean, scale, dtype=None, name=None, seed=None
):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
tf_dtype = tf.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = tf.compat.v1.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed
)(shape)
return variable(value, dtype=dtype, name=name)
@keras_core_export("keras_core._legacy.backend.random_uniform")
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
if seed is None:
seed = np.random.randint(10e6)
return tf.random.uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed
)
@keras_core_export("keras_core._legacy.backend.random_uniform_variable")
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
tf_dtype = tf.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = tf.compat.v1.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed
)(shape)
return variable(value, dtype=dtype, name=name)
@keras_core_export("keras_core._legacy.backend.reshape")
def reshape(x, shape):
"""DEPRECATED."""
return tf.reshape(x, shape)
@keras_core_export("keras_core._legacy.backend.relu")
def relu(x, alpha=0.0, max_value=None, threshold=0.0):
"""DEPRECATED."""
# While x can be a tensor or variable, we also see cases where
# numpy arrays, lists, tuples are passed as well.
# lists, tuples do not have 'dtype' attribute.
dtype = getattr(x, "dtype", backend.floatx())
if alpha != 0.0:
if max_value is None and threshold == 0:
return tf.nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = tf.nn.relu(-x + threshold)
else:
negative_part = tf.nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * tf.cast(tf.greater(x, threshold), dtype=dtype)
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = tf.nn.relu6(x)
clip_max = False
else:
x = tf.nn.relu(x)
if clip_max:
max_value = tf.convert_to_tensor(max_value, dtype=x.dtype)
zero = tf.convert_to_tensor(0, dtype=x.dtype)
x = tf.clip_by_value(x, zero, max_value)
if alpha != 0.0:
alpha = tf.convert_to_tensor(alpha, dtype=x.dtype)
x -= alpha * negative_part
return x
@keras_core_export("keras_core._legacy.backend.repeat")
def repeat(x, n):
"""DEPRECATED."""
assert ndim(x) == 2
x = tf.expand_dims(x, 1)
pattern = tf.stack([1, n, 1])
return tf.tile(x, pattern)
@keras_core_export("keras_core._legacy.backend.repeat_elements")
def repeat_elements(x, rep, axis):
"""DEPRECATED."""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = tf.shape(x)
x_rep = tf.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = tf.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = tf.constant(reps, dtype="int32")
x_shape *= reps
x_rep = tf.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
return x_rep
@keras_core_export("keras_core._legacy.backend.resize_images")
def resize_images(
x, height_factor, width_factor, data_format, interpolation="nearest"
):
"""DEPRECATED."""
if data_format == "channels_first":
rows, cols = 2, 3
elif data_format == "channels_last":
rows, cols = 1, 2
else:
raise ValueError(f"Invalid `data_format` argument: {data_format}")
new_shape = x.shape[rows : cols + 1]
if new_shape.is_fully_defined():
new_shape = tf.constant(new_shape.as_list(), dtype="int32")
else:
new_shape = tf.shape(x)[rows : cols + 1]
new_shape *= tf.constant(
np.array([height_factor, width_factor], dtype="int32")
)
if data_format == "channels_first":
x = permute_dimensions(x, [0, 2, 3, 1])
interpolations = {
"area": tf.image.ResizeMethod.AREA,
"bicubic": tf.image.ResizeMethod.BICUBIC,
"bilinear": tf.image.ResizeMethod.BILINEAR,
"gaussian": tf.image.ResizeMethod.GAUSSIAN,
"lanczos3": tf.image.ResizeMethod.LANCZOS3,
"lanczos5": tf.image.ResizeMethod.LANCZOS5,
"mitchellcubic": tf.image.ResizeMethod.MITCHELLCUBIC,
"nearest": tf.image.ResizeMethod.NEAREST_NEIGHBOR,
}
interploations_list = '"' + '", "'.join(interpolations.keys()) + '"'
if interpolation in interpolations:
x = tf.image.resize(x, new_shape, method=interpolations[interpolation])
else:
raise ValueError(
"`interpolation` argument should be one of: "
f'{interploations_list}. Received: "{interpolation}".'
)
if data_format == "channels_first":
x = permute_dimensions(x, [0, 3, 1, 2])
return x
@keras_core_export("keras_core._legacy.backend.resize_volumes")
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""DEPRECATED."""
if data_format == "channels_first":
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == "channels_last":
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError(f"Invalid data_format: {data_format}")
@keras_core_export("keras_core._legacy.backend.reverse")
def reverse(x, axes):
"""DEPRECATED."""
if isinstance(axes, int):
axes = [axes]
return tf.reverse(x, axes)
@keras_core_export("keras_core._legacy.backend.rnn")
def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
"""DEPRECATED."""
if not tf.__internal__.tf2.enabled():
return_all_outputs = True # Not supported in TF1.
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return tf.transpose(input_t, axes)
if not time_major:
inputs = tf.nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = tf.nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = tf.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != tf.bool:
mask = tf.cast(mask, tf.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
if tf.nest.is_nested(mask_t):
raise ValueError(
f"mask_t is expected to be tensor, but got {mask_t}"
)
if tf.nest.is_nested(input_t):
raise ValueError(
f"input_t is expected to be tensor, but got {input_t}"
)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = tf.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return tf.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError("Unrolling requires a fixed number of timesteps.")
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of
# nested input, the input is flattened and then transformed
# individually. The result of this will be a tuple of lists, each of
# the item in tuple is list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = tf.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if tf.nest.is_nested(inputs):
processed_input = tf.nest.map_structure(
_process_single_input_t, inputs
)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return tf.nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = tf.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(
inp, tuple(states) + tuple(constants)
)
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = tf.where(tiled_mask_t, output, prev_output)
flat_states = tf.nest.flatten(states)
flat_new_states = tf.nest.flatten(new_states)
tiled_mask_t = tuple(
_expand_mask(mask_t, s) for s in flat_states
)
flat_final_states = tuple(
tf.where(m, s, ps)
for m, s, ps in zip(
tiled_mask_t, flat_new_states, flat_states
)
)
states = tf.nest.pack_sequence_as(states, flat_final_states)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = tf.stack(successive_outputs)
if zero_output_for_mask:
last_output = tf.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output),
)
outputs = tf.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs),
)
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(
inp, tuple(states) + tuple(constants)
)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = tf.stack(successive_outputs)
else: # Unroll == False
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it
# will be flattened first, and tensor array will be created one per
# flattened tensor.
input_ta = tuple(
tf.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name=f"input_ta_{i}",
)
for i, inp in enumerate(flatted_inputs)
)
input_ta = tuple(
ta.unstack(input_)
if not go_backwards
else ta.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs)
)
# Get the time(0) input and compute the output for that, the output will
# be used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = tf.nest.pack_sequence_as(
inputs, [inp[0] for inp in flatted_inputs]
)
# output_time_zero is used to determine the cell output shape and its
# dtype. the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants)
)
output_ta_size = time_steps_t if return_all_outputs else 1
output_ta = tuple(
tf.TensorArray(
dtype=out.dtype,
size=output_ta_size,
element_shape=out.shape,
tensor_array_name=f"output_ta_{i}",
)
for i, out in enumerate(tf.nest.flatten(output_time_zero))
)
time = tf.constant(0, dtype="int32", name="time")
if input_length is None:
max_iterations = time_steps_t
else:
max_iterations = tf.reduce_max(input_length)
while_loop_kwargs = {
"cond": lambda time, *_: time < time_steps_t,
"maximum_iterations": max_iterations,
"parallel_iterations": 32,
"swap_memory": True,
}
if mask is not None:
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tf.TensorArray(
dtype=tf.bool, size=time_steps_t, tensor_array_name="mask_ta"
)
mask_ta = mask_ta.unstack(mask)
def masking_fn(time):
return mask_ta.read(time)
def compute_masked_output(mask_t, flat_out, flat_mask):
tiled_mask_t = tuple(
_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape))
for o in flat_out
)
return tuple(
tf.where(m, o, fm)
for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask)
)
elif isinstance(input_length, tf.Tensor):
if go_backwards:
max_len = tf.reduce_max(input_length, axis=0)
rev_input_length = tf.subtract(max_len - 1, input_length)
def masking_fn(time):
return tf.less(rev_input_length, time)
else:
def masking_fn(time):
return tf.greater(input_length, time)
def compute_masked_output(mask_t, flat_out, flat_mask):
return tuple(
tf.compat.v1.where(mask_t, o, zo)
for (o, zo) in zip(flat_out, flat_mask)
)
else:
masking_fn = None
if masking_fn is not None:
# Mask for the T output will be base on the output of T - 1. In the
# case T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(
tf.zeros_like(o) for o in tf.nest.flatten(output_time_zero)
)
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = tf.nest.pack_sequence_as(inputs, current_input)
mask_t = masking_fn(time)
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
# mask output
flat_output = tf.nest.flatten(output)
flat_mask_output = (
flat_zero_output
if zero_output_for_mask
else tf.nest.flatten(prev_output)
)
flat_new_output = compute_masked_output(
mask_t, flat_output, flat_mask_output
)
# mask states
flat_state = tf.nest.flatten(states)
flat_new_state = tf.nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, tf.Tensor):
new_state.set_shape(state.shape)
flat_final_state = compute_masked_output(
mask_t, flat_new_state, flat_state
)
new_states = tf.nest.pack_sequence_as(
new_states, flat_final_state
)
ta_index_to_write = time if return_all_outputs else 0
output_ta_t = tuple(
ta.write(ta_index_to_write, out)
for ta, out in zip(output_ta_t, flat_new_output)
)
return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(
new_states
)
final_outputs = tf.compat.v1.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs,
)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = tf.nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
flat_state = tf.nest.flatten(states)
flat_new_state = tf.nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, tf.Tensor):
new_state.set_shape(state.shape)
flat_output = tf.nest.flatten(output)
ta_index_to_write = time if return_all_outputs else 0
output_ta_t = tuple(
ta.write(ta_index_to_write, out)
for ta, out in zip(output_ta_t, flat_output)
)
new_states = tf.nest.pack_sequence_as(
initial_states, flat_new_state
)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = tf.compat.v1.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs,
)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = tf.nest.pack_sequence_as(output_time_zero, outputs)
last_output = tf.nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if isinstance(output_, tf.Tensor):
shape = output_.shape.as_list()
if return_all_outputs:
shape[0] = time_steps
else:
shape[0] = 1
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = tf.nest.map_structure(set_shape, outputs)
if not time_major:
outputs = tf.nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_core_export("keras_core._legacy.backend.round")
def round(x):
"""DEPRECATED."""
return tf.round(x)
@keras_core_export("keras_core._legacy.backend.separable_conv2d")
def separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
if len(strides) != 2:
raise ValueError("`strides` must be a tuple of 2 integers.")
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = tf.nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
dilations=dilation_rate,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_core_export("keras_core._legacy.backend.set_value")
def set_value(x, value):
"""DEPRECATED."""
value = np.asarray(value, dtype=x.dtype.name)
x.assign(value)
@keras_core_export("keras_core._legacy.backend.shape")
def shape(x):
"""DEPRECATED."""
return tf.shape(x)
@keras_core_export("keras_core._legacy.backend.sigmoid")
def sigmoid(x):
"""DEPRECATED."""
output = tf.sigmoid(x)
return output
@keras_core_export("keras_core._legacy.backend.sign")
def sign(x):
"""DEPRECATED."""
return tf.sign(x)
@keras_core_export("keras_core._legacy.backend.sin")
def sin(x):
"""DEPRECATED."""
return tf.sin(x)
@keras_core_export("keras_core._legacy.backend.softmax")
def softmax(x, axis=-1):
"""DEPRECATED."""
if x.shape.rank <= 1:
raise ValueError(
f"Cannot apply softmax to a tensor that is 1D. Received input: {x}"
)
if isinstance(axis, int):
output = tf.nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
numerator = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
denominator = tf.reduce_sum(numerator, axis=axis, keepdims=True)
output = numerator / denominator
# Cache the logits to use for crossentropy loss.
output._keras_logits = x
return output
@keras_core_export("keras_core._legacy.backend.softplus")
def softplus(x):
"""DEPRECATED."""
return tf.math.softplus(x)
@keras_core_export("keras_core._legacy.backend.softsign")
def softsign(x):
"""DEPRECATED."""
return tf.math.softsign(x)
@keras_core_export("keras_core._legacy.backend.sparse_categorical_crossentropy")
def sparse_categorical_crossentropy(
target, output, from_logits=False, axis=-1, ignore_class=None
):
"""DEPRECATED."""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
target = cast(target, "int64")
if not from_logits:
epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype)
output = tf.clip_by_value(output, epsilon_, 1 - epsilon_)
output = tf.math.log(output)
# Permute output so that the last axis contains the logits/probabilities.
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(
itertools.chain(
range(axis), range(axis + 1, output_rank), [axis]
)
)
output = tf.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError(
"Cannot compute sparse categorical crossentropy with `axis={}` "
"on an output tensor with unknown rank".format(axis)
)
# Try to adjust the shape so that rank of labels = rank of logits - 1.
output_shape = tf.shape(output)
target_rank = target.shape.ndims
update_shape = (
target_rank is not None
and output_rank is not None
and target_rank != output_rank - 1
)
if update_shape:
target = flatten(target)
output = tf.reshape(output, [-1, output_shape[-1]])
if ignore_class is not None:
valid_mask = tf.not_equal(target, cast(ignore_class, target.dtype))
target = target[valid_mask]
output = output[valid_mask]
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target, logits=output
)
if ignore_class is not None:
res_shape = cast(output_shape[:-1], "int64")
valid_mask = tf.reshape(valid_mask, res_shape)
res = tf.scatter_nd(tf.where(valid_mask), res, res_shape)
res._keras_mask = valid_mask
return res
if update_shape and output_rank >= 3:
# If our output includes timesteps or
# spatial dimensions we need to reshape
res = tf.reshape(res, output_shape[:-1])
return res
@keras_core_export("keras_core._legacy.backend.spatial_2d_padding")
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""DEPRECATED."""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
if data_format == "channels_first":
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return tf.compat.v1.pad(x, pattern)
@keras_core_export("keras_core._legacy.backend.spatial_3d_padding")
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""DEPRECATED."""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
if data_format == "channels_first":
pattern = [
[0, 0],
[0, 0],
[padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]],
[padding[2][0], padding[2][1]],
]
else:
pattern = [
[0, 0],
[padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]],
[padding[2][0], padding[2][1]],
[0, 0],
]
return tf.compat.v1.pad(x, pattern)
@keras_core_export("keras_core._legacy.backend.sqrt")
def sqrt(x):
"""DEPRECATED."""
zero = tf.convert_to_tensor(0.0, x.dtype)
x = tf.maximum(x, zero)
return tf.sqrt(x)
@keras_core_export("keras_core._legacy.backend.square")
def square(x):
"""DEPRECATED."""
return tf.square(x)
@keras_core_export("keras_core._legacy.backend.squeeze")
def squeeze(x, axis):
"""DEPRECATED."""
return tf.squeeze(x, [axis])
@keras_core_export("keras_core._legacy.backend.stack")
def stack(x, axis=0):
"""DEPRECATED."""
return tf.stack(x, axis=axis)
@keras_core_export("keras_core._legacy.backend.std")
def std(x, axis=None, keepdims=False):
"""DEPRECATED."""
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, backend.floatx())
return tf.math.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_core_export("keras_core._legacy.backend.stop_gradient")
def stop_gradient(variables):
"""DEPRECATED."""
if isinstance(variables, (list, tuple)):
return map(tf.stop_gradient, variables)
return tf.stop_gradient(variables)
@keras_core_export("keras_core._legacy.backend.sum")
def sum(x, axis=None, keepdims=False):
"""DEPRECATED."""
return tf.reduce_sum(x, axis, keepdims)
@keras_core_export("keras_core._legacy.backend.switch")
def switch(condition, then_expression, else_expression):
"""DEPRECATED."""
if condition.dtype != tf.bool:
condition = tf.cast(condition, "bool")
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = tf.compat.v1.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError(
"Rank of `condition` should be less than or"
" equal to rank of `then_expression` and "
"`else_expression`. ndim(condition)="
+ str(cond_ndim)
+ ", ndim(then_expression)="
+ str(expr_ndim)
)
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = tf.concat(
[tf.shape(condition), [1] * ndim_diff], axis=0
)
condition = tf.reshape(condition, cond_shape)
expr_shape = tf.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = tf.where(
shape_diff > 0, expr_shape, tf.ones_like(expr_shape)
)
condition = tf.tile(condition, tile_shape)
x = tf.where(condition, then_expression, else_expression)
return x
@keras_core_export("keras_core._legacy.backend.tanh")
def tanh(x):
"""DEPRECATED."""
return tf.tanh(x)
@keras_core_export("keras_core._legacy.backend.temporal_padding")
def temporal_padding(x, padding=(1, 1)):
"""DEPRECATED."""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return tf.compat.v1.pad(x, pattern)
@keras_core_export("keras_core._legacy.backend.tile")
def tile(x, n):
"""DEPRECATED."""
if isinstance(n, int):
n = [n]
return tf.tile(x, n)
@keras_core_export("keras_core._legacy.backend.to_dense")
def to_dense(tensor):
"""DEPRECATED."""
if is_sparse(tensor):
return tf.sparse.to_dense(tensor)
else:
return tensor
@keras_core_export("keras_core._legacy.backend.transpose")
def transpose(x):
"""DEPRECATED."""
return tf.transpose(x)
@keras_core_export("keras_core._legacy.backend.truncated_normal")
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
if seed is None:
seed = np.random.randint(10e6)
return tf.random.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed
)
@keras_core_export("keras_core._legacy.backend.update")
def update(x, new_x):
"""DEPRECATED."""
return tf.compat.v1.assign(x, new_x)
@keras_core_export("keras_core._legacy.backend.update_add")
def update_add(x, increment):
"""DEPRECATED."""
return tf.compat.v1.assign_add(x, increment)
@keras_core_export("keras_core._legacy.backend.update_sub")
def update_sub(x, decrement):
"""DEPRECATED."""
return tf.compat.v1.assign_sub(x, decrement)
@keras_core_export("keras_core._legacy.backend.var")
def var(x, axis=None, keepdims=False):
"""DEPRECATED."""
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, backend.floatx())
return tf.math.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_core_export("keras_core._legacy.backend.variable")
def variable(value, dtype=None, name=None, constraint=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
if hasattr(value, "tocoo"):
sparse_coo = value.tocoo()
indices = np.concatenate(
(
np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1),
),
1,
)
v = tf.SparseTensor(
indices=indices,
values=sparse_coo.data,
dense_shape=sparse_coo.shape,
)
v._keras_shape = sparse_coo.shape
return v
v = tf.Variable(
value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint
)
return v
@keras_core_export("keras_core._legacy.backend.zeros")
def zeros(shape, dtype=None, name=None):
"""DEPRECATED."""
with tf.init_scope():
if dtype is None:
dtype = backend.floatx()
tf_dtype = tf.as_dtype(dtype)
v = tf.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@keras_core_export("keras_core._legacy.backend.zeros_like")
def zeros_like(x, dtype=None, name=None):
"""DEPRECATED."""
return tf.zeros_like(x, dtype=dtype, name=name)
| keras-core/keras_core/legacy/backend.py/0 | {
"file_path": "keras-core/keras_core/legacy/backend.py",
"repo_id": "keras-core",
"token_count": 33849
} | 36 |
import tree
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.utils.naming import auto_name
@keras_core_export(["keras_core.Loss", "keras_core.losses.Loss"])
class Loss:
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`,
`y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
return ops.mean(ops.square(y_pred - y_true), axis=-1)
```
"""
def __init__(self, name=None, reduction="sum_over_batch_size", dtype=None):
self.name = name or auto_name(self.__class__.__name__)
self.reduction = standardize_reduction(reduction)
self.dtype = dtype or backend.floatx()
def __call__(self, y_true, y_pred, sample_weight=None):
in_mask = getattr(y_pred, "_keras_mask", None)
with ops.name_scope(self.name):
y_pred = tree.map_structure(
lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_pred
)
y_true = tree.map_structure(
lambda x: ops.convert_to_tensor(x, dtype=self.dtype), y_true
)
losses = self.call(y_true, y_pred)
out_mask = getattr(losses, "_keras_mask", None)
if in_mask is not None and out_mask is not None:
mask = in_mask & out_mask
elif in_mask is not None:
mask = in_mask
elif out_mask is not None:
mask = out_mask
else:
mask = None
return reduce_weighted_values(
losses,
sample_weight=sample_weight,
mask=mask,
reduction=self.reduction,
dtype=self.dtype,
)
def call(self, y_true, y_pred):
raise NotImplementedError
def get_config(self):
return {"name": self.name, "reduction": self.reduction}
@classmethod
def from_config(cls, config):
return cls(**config)
def standardize_reduction(reduction):
allowed = {"sum_over_batch_size", "sum", None, "none"}
if reduction not in allowed:
raise ValueError(
"Invalid value for argument `reduction`. "
f"Expected on of {allowed}. Received: "
f"reduction={reduction}"
)
return reduction
def squeeze_to_same_rank(x1, x2):
"""Squeeze last dim if ranks differ from expected by exactly 1."""
x1_rank = len(x1.shape)
x2_rank = len(x2.shape)
if x1_rank == x2_rank:
return x1, x2
if x1_rank == x2_rank + 1:
if x1.shape[-1] == 1:
x1 = ops.squeeze(x1, axis=-1)
if x2_rank == x1_rank + 1:
if x2.shape[-1] == 1:
x2 = ops.squeeze(x2, axis=-1)
return x1, x2
def reduce_values(values, reduction="sum_over_batch_size"):
if (
reduction is None
or reduction == "none"
or tuple(values.shape) == ()
or tuple(values.shape) == (0,)
):
return values
loss = ops.sum(values)
if reduction == "sum_over_batch_size":
loss /= ops.cast(
ops.prod(ops.convert_to_tensor(ops.shape(values), dtype="int32")),
loss.dtype,
)
return loss
def reduce_weighted_values(
values,
sample_weight=None,
mask=None,
reduction="sum_over_batch_size",
dtype=None,
):
reduction = standardize_reduction(reduction)
values = ops.convert_to_tensor(values, dtype=dtype)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight, dtype=dtype)
if mask is not None:
mask = ops.convert_to_tensor(mask, dtype=dtype)
# Merge mask and sample weight into sample weight.
sample_weight = apply_mask(
sample_weight, mask, dtype=values.dtype, reduction=reduction
)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, values.dtype)
# Update dimensions of `sample_weight` to match `losses`.
values, sample_weight = squeeze_to_same_rank(values, sample_weight)
values = values * sample_weight
# Apply reduction function to the individual weighted losses.
loss = reduce_values(values, reduction)
return loss
def apply_mask(sample_weight, mask, dtype, reduction):
"""Applies any mask on predictions to sample weights."""
if mask is not None:
mask = ops.cast(mask, dtype=dtype)
if reduction == "sum_over_batch_size":
# Valid entries have weight `total/valid`, while invalid ones
# have 0. When summed over batch, they will be reduced to:
#
# mean(loss * sample_weight * total / valid)
# = sum(loss * sample_weight * total / valid) / total
# = sum(loss * sample_weight) / total * total / valid
# = sum(loss * sample_weight) / valid
total = ops.cast(
ops.prod(ops.convert_to_tensor(ops.shape(mask), dtype="int32")),
dtype,
)
valid = ops.sum(mask) # May be 0!
mask *= total / (valid + backend.epsilon())
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, dtype=dtype)
mask, sample_weight = squeeze_to_same_rank(mask, sample_weight)
sample_weight *= mask
else:
sample_weight = mask
return sample_weight
| keras-core/keras_core/losses/loss.py/0 | {
"file_path": "keras-core/keras_core/losses/loss.py",
"repo_id": "keras-core",
"token_count": 2541
} | 37 |
import numpy as np
from keras_core import backend
from keras_core import initializers
from keras_core import metrics as metrics_module
from keras_core import ops
from keras_core import testing
from keras_core.metrics.metric import Metric
class ExampleMetric(Metric):
def __init__(self, name="mean_square_error", dtype=None):
super().__init__(name=name, dtype=dtype)
self.sum = self.add_variable(
name="sum", shape=(), initializer=initializers.Zeros()
)
self.total = self.add_variable(
name="total",
shape=(),
initializer=initializers.Zeros(),
dtype="int32",
)
def update_state(self, y_true, y_pred):
y_true = ops.convert_to_tensor(y_true)
y_pred = ops.convert_to_tensor(y_pred)
sum = ops.sum((y_true - y_pred) ** 2)
self.sum.assign(self.sum + sum)
batch_size = ops.shape(y_true)[0]
self.total.assign(self.total + batch_size)
def result(self):
return self.sum / (ops.cast(self.total, dtype="float32") + 1e-7)
def reset_state(self):
self.sum.assign(0.0)
self.total.assign(0)
class MetricTest(testing.TestCase):
def test_end_to_end_flow(self):
metric = ExampleMetric(name="mse")
self.assertEqual(metric.name, "mse")
self.assertEqual(len(metric.variables), 2)
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
batch_size = 8
for b in range(0, num_samples // batch_size + 1):
y_true_batch = y_true[b * batch_size : (b + 1) * batch_size]
y_pred_batch = y_pred[b * batch_size : (b + 1) * batch_size]
metric.update_state(y_true_batch, y_pred_batch)
self.assertAllClose(metric.total, 20)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples
)
metric.reset_state()
self.assertEqual(metric.result(), 0.0)
def test_stateless_update_state(self):
metric = ExampleMetric(name="mse")
self.assertEqual(len(metric.variables), 2)
original_variable_values = (
metric.variables[0].numpy(),
metric.variables[1].numpy(),
)
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
batch_size = 8
metric_variables = metric.variables
for b in range(0, num_samples // batch_size + 1):
y_true_batch = y_true[b * batch_size : (b + 1) * batch_size]
y_pred_batch = y_pred[b * batch_size : (b + 1) * batch_size]
metric_variables = metric.stateless_update_state(
metric_variables, y_true_batch, y_pred_batch
)
self.assertAllClose(metric.variables[0], original_variable_values[0])
self.assertAllClose(metric.variables[1], original_variable_values[1])
metric.variables[0].assign(metric_variables[0])
metric.variables[1].assign(metric_variables[1])
self.assertAllClose(metric.total, 20)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples
)
if backend.backend() == "jax":
# Check no side effects.
import jax
@jax.jit
def update(metric_variables, y_true_batch, y_pred_batch):
metric_variables = metric.stateless_update_state(
metric_variables, y_true_batch, y_pred_batch
)
update(metric_variables, y_true_batch, y_pred_batch)
def test_stateless_result(self):
metric = ExampleMetric(name="mse")
res = metric.stateless_result([ops.ones(()) * 12, ops.ones(()) * 3])
self.assertAllClose(res, 4.0)
def test_variable_tracking(self):
# In list
metric = ExampleMetric(name="mse")
metric.more_vars = [backend.Variable(0.0), backend.Variable(1.0)]
self.assertEqual(len(metric.variables), 4)
# In dict
metric = ExampleMetric(name="mse")
metric.more_vars = {
"a": backend.Variable(0.0),
"b": backend.Variable(1.0),
}
self.assertEqual(len(metric.variables), 4)
# In nested structured
metric = ExampleMetric(name="mse")
metric.more_vars = {"a": [backend.Variable(0.0), backend.Variable(1.0)]}
self.assertEqual(len(metric.variables), 4)
def test_submetric_tracking(self):
# Plain attr
metric = ExampleMetric(name="mse")
metric.submetric = ExampleMetric(name="submse")
self.assertEqual(len(metric.variables), 4)
# In list
metric = ExampleMetric(name="mse")
metric.submetrics = [
ExampleMetric(name="submse1"),
ExampleMetric(name="submse2"),
]
self.assertEqual(len(metric.variables), 6)
# In dict
metric = ExampleMetric(name="mse")
metric.submetrics = {
"1": ExampleMetric(name="submse1"),
"2": ExampleMetric(name="submse2"),
}
self.assertEqual(len(metric.variables), 6)
def test_serialization(self):
self.run_class_serialization_test(
ExampleMetric(name="mse"),
custom_objects={"ExampleMetric": ExampleMetric},
)
def test_get_method(self):
metric = metrics_module.get("mse")
self.assertIsInstance(metric, metrics_module.MeanSquaredError)
metric = metrics_module.get("mean_squared_error")
self.assertIsInstance(metric, metrics_module.MeanSquaredError)
metric = metrics_module.get("categorical_accuracy")
self.assertIsInstance(metric, metrics_module.CategoricalAccuracy)
metric = metrics_module.get(None)
self.assertEqual(metric, None)
with self.assertRaises(ValueError):
metrics_module.get("typo")
| keras-core/keras_core/metrics/metric_test.py/0 | {
"file_path": "keras-core/keras_core/metrics/metric_test.py",
"repo_id": "keras-core",
"token_count": 2849
} | 38 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
from keras_core.layers.core.input_layer import Input
from keras_core.models.functional import Functional
from keras_core.models.model import Model
from keras_core.models.model import model_from_json
def _get_model():
input_a = Input(shape=(3,), batch_size=2, name="input_a")
input_b = Input(shape=(3,), batch_size=2, name="input_b")
x = input_a + input_b
x = layers.Dense(5)(x)
outputs = layers.Dense(4)(x)
model = Model([input_a, input_b], outputs)
return model
def _get_model_multi_outputs_list():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
output_b = layers.Dense(1, name="output_b", activation="sigmoid")(x)
model = Model(x, [output_a, output_b])
return model
def _get_model_multi_outputs_list_no_output_names():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1)(x)
output_b = layers.Dense(1, activation="sigmoid")(x)
model = Model(x, [output_a, output_b])
return model
def _get_model_single_output():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
model = Model(x, output_a)
return model
def _get_model_single_output_list():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
model = Model(x, [output_a])
return model
def _get_model_single_output_dict():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
model = Model(x, {"output_a": output_a})
return model
def _get_model_multi_outputs_dict():
x = Input(shape=(3,), name="input_a")
output_a = layers.Dense(1, name="output_a")(x)
output_b = layers.Dense(1, name="output_b", activation="sigmoid")(x)
model = Model(x, {"output_a": output_a, "output_b": output_b})
return model
@pytest.mark.requires_trainable_backend
class ModelTest(testing.TestCase, parameterized.TestCase):
def test_functional_rerouting(self):
model = _get_model()
self.assertIsInstance(model, Functional)
def test_json_serialization(self):
model = _get_model()
json_string = model.to_json()
new_model = model_from_json(json_string)
self.assertEqual(json_string, new_model.to_json())
def test_tuple_input_model_subclass(self):
# https://github.com/keras-team/keras-core/issues/324
class MultiInputModel(Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense1 = layers.Dense(4)
def call(self, inputs):
a, b = inputs
r = self.dense1(a)
return layers.concatenate([r, b])
model = MultiInputModel()
x1 = np.random.rand(3, 3)
x2 = np.random.rand(3, 2)
out = model((x1, x2))
self.assertEqual(out.shape, (3, 6))
def test_reviving_functional_from_config_custom_layer(self):
class CustomDense(layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.dense = layers.Dense(units)
def call(self, x):
return self.dense(x)
inputs = layers.Input((4,))
outputs = CustomDense(10)(inputs)
model = Model(inputs, outputs)
config = model.get_config()
new_model = Model.from_config(
config, custom_objects={"CustomDense": CustomDense}
)
self.assertIsInstance(new_model, Functional)
@parameterized.named_parameters(
("single_output_1", _get_model_single_output, None),
("single_output_2", _get_model_single_output, "list"),
("single_output_3", _get_model_single_output, "dict"),
("single_output_4", _get_model_single_output, "dict_list"),
("single_list_output_1", _get_model_single_output_list, None),
("single_list_output_2", _get_model_single_output_list, "list"),
("single_list_output_3", _get_model_single_output_list, "dict"),
("single_list_output_4", _get_model_single_output_list, "dict_list"),
("single_dict_output_1", _get_model_single_output_dict, None),
("single_dict_output_2", _get_model_single_output_dict, "list"),
("single_dict_output_3", _get_model_single_output_dict, "dict"),
("single_dict_output_4", _get_model_single_output_dict, "dict_list"),
)
def test_functional_single_output(self, model_fn, loss_type):
model = model_fn()
self.assertIsInstance(model, Functional)
loss = "mean_squared_error"
if loss_type == "list":
loss = [loss]
elif loss_type == "dict":
loss = {"output_a": loss}
elif loss_type == "dict_lsit":
loss = {"output_a": [loss]}
model.compile(
optimizer="sgd",
loss=loss,
metrics={
"output_a": ["mean_squared_error", "mean_absolute_error"],
},
weighted_metrics={
"output_a": "mean_squared_error",
},
)
# Fit the model to make sure compile_metrics are built
x = np.random.rand(8, 3)
y = np.random.rand(8, 1)
hist = model.fit(
x,
y,
batch_size=2,
epochs=1,
verbose=0,
)
hist_keys = sorted(hist.history.keys())
ref_keys = sorted(
[
"loss",
"mean_absolute_error",
"mean_squared_error",
"weighted_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_list_losses(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=["mean_squared_error", "binary_crossentropy"],
metrics=[
"mean_squared_error",
["mean_squared_error", "accuracy"],
],
loss_weights=[0.1, 2],
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
# TODO `tf.keras` also outputs individual losses for outputs
ref_keys = sorted(
[
"loss",
# "output_a_loss",
"output_a_mean_squared_error",
"output_b_accuracy",
# "output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_list_losses_abbr(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=["mse", "bce"],
metrics=[
["bce", "mse", "mae"],
["mse", "acc"],
],
loss_weights=[0.1, 2],
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
# TODO `tf.keras` also outputs individual losses for outputs
ref_keys = sorted(
[
"loss",
# "output_a_loss",
"output_a_bce",
"output_a_mae",
"output_a_mse",
"output_b_acc",
# "output_b_loss",
"output_b_mse",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_nested_list_losses(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=["mean_squared_error", ["binary_crossentropy"]],
metrics=[
"mean_squared_error",
["mean_squared_error", "accuracy"],
],
loss_weights=[0.1, 2],
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
# TODO `tf.keras` also outputs individual losses for outputs
ref_keys = sorted(
[
"loss",
# "output_a_loss",
"output_a_mean_squared_error",
"output_b_accuracy",
# "output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_dict_outputs_dict_losses(self):
model = _get_model_multi_outputs_dict()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": ["binary_crossentropy"],
},
metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
weighted_metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(
x,
{"output_a": y1, "output_b": y2},
batch_size=2,
epochs=1,
verbose=0,
)
hist_keys = sorted(hist.history.keys())
# TODO `tf.keras` also outputs individual losses for outputs
ref_keys = sorted(
[
"loss",
# "output_a_loss",
"output_a_mean_squared_error",
"output_a_weighted_mean_squared_error",
"output_b_accuracy",
# "output_b_loss",
"output_b_mean_squared_error",
"output_b_weighted_accuracy",
"output_b_weighted_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_metrics(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
weighted_metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
# TODO `tf.keras` also outputs individual losses for outputs
ref_keys = sorted(
[
"loss",
# "output_a_loss",
"output_a_mean_squared_error",
"output_a_weighted_mean_squared_error",
"output_b_accuracy",
# "output_b_loss",
"output_b_mean_squared_error",
"output_b_weighted_accuracy",
"output_b_weighted_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_metrics_uniq_weighted(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_a": ["mean_squared_error"],
"output_b": ["mean_squared_error"],
},
weighted_metrics={
"output_a": ["mean_squared_error"],
"output_b": ["accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
# TODO `tf.keras` also outputs individual losses for outputs
# `output_b_accuracy` doesn't have `weighted_` in metric name.
# When a metric is only in weighted metrics, it skips `weighted_`
# prefix. This behavior matches`tf.keras`.
ref_keys = sorted(
[
"loss",
# "output_a_loss",
"output_a_mean_squared_error",
"output_a_weighted_mean_squared_error",
"output_b_accuracy",
# "output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_partial_metrics(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_b": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
hist = model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
hist_keys = sorted(hist.history.keys())
# TODO `tf.keras` also outputs individual losses for outputs
ref_keys = sorted(
[
"loss",
# "output_a_loss",
"output_b_accuracy",
# "output_b_loss",
"output_b_mean_squared_error",
]
)
self.assertListEqual(hist_keys, ref_keys)
def test_functional_list_outputs_dict_losses_invalid_keys(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_c": "binary_crossentropy",
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"In the dict argument `loss`, "
"key 'output_c' does not correspond to any model output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_list_outputs_dict_losses_no_output_names(self):
model = _get_model_multi_outputs_list_no_output_names()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={"output_a": "mean_squared_error"},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"In the dict argument `loss`, "
"key 'output_a' does not correspond to any model output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_list_outputs_dict_metrics_invalid_keys(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_c": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"In the dict argument `metrics`, "
"key 'output_c' does not correspond to any model output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_dict_outputs_dict_losses_invalid_keys(self):
model = _get_model_multi_outputs_dict()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_c": "binary_crossentropy",
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"In the dict argument `loss`, "
"key 'output_c' does not correspond to any model output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_dict_outputs_dict_metrics_invalid_keys(self):
model = _get_model_multi_outputs_dict()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss={
"output_a": "mean_squared_error",
"output_b": "binary_crossentropy",
},
metrics={
"output_c": ["mean_squared_error", "accuracy"],
},
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"In the dict argument `metrics`, "
"key 'output_c' does not correspond to any model output",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
def test_functional_list_outputs_invalid_nested_list_losses(self):
model = _get_model_multi_outputs_list()
self.assertIsInstance(model, Functional)
x = np.random.rand(8, 3)
y1 = np.random.rand(8, 1)
y2 = np.random.randint(0, 2, (8, 1))
model.compile(
optimizer="sgd",
loss=[
"mean_squared_error",
["mean_squared_error", "binary_crossentropy"],
],
)
# Fit the model to make sure compile_metrics are built
with self.assertRaisesRegex(
ValueError,
"when providing the `loss` argument as a list, "
"it should have as many entries as the model has outputs",
):
model.fit(x, (y1, y2), batch_size=2, epochs=1, verbose=0)
| keras-core/keras_core/models/model_test.py/0 | {
"file_path": "keras-core/keras_core/models/model_test.py",
"repo_id": "keras-core",
"token_count": 10368
} | 39 |
import collections
import tree
from keras_core.backend import KerasTensor
from keras_core.ops.symbolic_arguments import SymbolicArguments
class Node:
"""A `Node` describes an operation `__call__()` event.
A Keras Function is a DAG with `Node` instances as nodes, and
`KerasTensor` instances as edges. Nodes aren't `Operation` instances,
because a single operation could be called multiple times, which would
result in graph cycles.
A `__call__()` event involves input tensors (and other input arguments),
the operation that was called, and the resulting output tensors.
A `Node` will include all this information.
Since a single `Operation` could be called multiple times,
the `Node` instances are stored on operations as a list.
Each time an operation is called, a node is added to `op._inbound_nodes`.
Each time the output of an operation is used by another operation,
a node is added to `op._outbound_nodes`.
Every `KerasTensor` instance has a `KerasHistory` object attached,
which tracks the `Node` that records the `__call__()` event that created
the tensor. By recursively walking through `Node` instances
via the `KerasHistory` metadata of `KerasTensor` instances, once can
retrieve the entire DAG of a Keras Function.
Args:
operation: The Operation that was called in the `op.__call__()`
event that this node represents.
call_args: The positional arguments the operation was called with.
call_kwargs: The keyword arguments the operation was called with.
outputs: The output tensors of the `op.__call__()` call.
"""
def __init__(
self, operation, call_args=None, call_kwargs=None, outputs=None
):
self.operation = operation
self.arguments = SymbolicArguments(*call_args, **call_kwargs)
self.outputs = [] if outputs is None else tree.flatten(outputs)
for x in self.outputs:
if not isinstance(x, KerasTensor):
raise ValueError(
"All operation outputs must be tensors. "
f"Operation {operation} returned a non-tensor. "
f"Non-tensor received: {x}"
)
zero_history = any(
not x.record_history for x in self.arguments.keras_tensors
)
# If inputs don't have metadata yet, add it.
if not zero_history:
for tensor in self.arguments.keras_tensors:
if not hasattr(tensor, "_keras_history"):
tensor._keras_history = KerasHistory(
operation=None, node_index=0, tensor_index=0
)
# Wire up Node to Operations.
self.operation._inbound_nodes.append(self)
for kt in self.arguments.keras_tensors:
inbound_op = kt._keras_history.operation
if inbound_op is not None: # It's a graph entry point.
inbound_op._outbound_nodes.append(self)
# Set metadata on outputs.
if not zero_history:
node_index = len(self.operation._inbound_nodes) - 1
for i, tensor in enumerate(self.outputs):
tensor._keras_history = KerasHistory(
operation=operation, node_index=node_index, tensor_index=i
)
# Whether this is a root node.
self.is_input = not self.arguments.keras_tensors
def __repr__(self):
return f"<Node operation={self.operation}, id={id(self)}>"
@property
def input_tensors(self):
return self.arguments.keras_tensors
@property
def output_tensors(self):
return self.outputs
@property
def parent_nodes(self):
"""The parent `Node`s.
Returns:
all the `Node`s whose output this node immediately depends on.
"""
node_deps = []
for kt in self.arguments.keras_tensors:
op = kt._keras_history.operation
node_index = kt._keras_history.node_index
if op is not None: # `None` for `Input` tensors.
node_deps.append(op._inbound_nodes[node_index])
return node_deps
class KerasHistory(
collections.namedtuple(
"KerasHistory", ["operation", "node_index", "tensor_index"]
)
):
"""Tracks the Operation call that created a Tensor.
During construction of Keras Functions, this metadata is added to
each Tensor produced as the output of an Operation.
This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `Function` class to
reconstruct the Operations graph.
Attributes:
operation: The Operation instance that produced the Tensor.
node_index: The specific call to the Operation that produced this Tensor.
Operations can be called multiple times in order to share weights. A new
node is created every time an Operation is called. The corresponding
node that represents the call event that produced the Tensor can be
found at `op._inbound_nodes[node_index]`.
tensor_index: The output index for this Tensor.
Always zero if the Operation that produced this Tensor
only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
def is_keras_tensor(obj):
return hasattr(obj, "_keras_history")
| keras-core/keras_core/ops/node.py/0 | {
"file_path": "keras-core/keras_core/ops/node.py",
"repo_id": "keras-core",
"token_count": 2171
} | 40 |
# flake8: noqa
import numpy as np
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.optimizers.adagrad import Adagrad
class AdagradTest(testing.TestCase):
def test_config(self):
optimizer = Adagrad(
learning_rate=0.5,
initial_accumulator_value=0.2,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adagrad(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.5233, 1.5007, 2.5005, 3.5061], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adagrad(
learning_rate=0.2, initial_accumulator_value=0.3, epsilon=1e-6
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963],
[0.9604, 0.9278, 0.9003, 0.8784, 0.8615, 0.8487, 0.8388, 0.8313, 0.8255, 0.8209],
[0.9251, 0.8629, 0.8137, 0.7768, 0.7497, 0.7298, 0.7151, 0.704, 0.6956, 0.6891],
[0.8903, 0.8012, 0.7342, 0.6862, 0.6521, 0.6277, 0.6099, 0.5967, 0.5867, 0.579],
[0.856, 0.7422, 0.6604, 0.6037, 0.5644, 0.5367, 0.5168, 0.5021, 0.491, 0.4825]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adagrad(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adagrad(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| keras-core/keras_core/optimizers/adagrad_test.py/0 | {
"file_path": "keras-core/keras_core/optimizers/adagrad_test.py",
"repo_id": "keras-core",
"token_count": 1719
} | 41 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.optimizers import base_optimizer
if backend.backend() == "tensorflow":
from keras_core.backend.tensorflow.optimizer import TFOptimizer
BackendOptimizer = TFOptimizer
elif backend.backend() == "torch":
from keras_core.backend.torch.optimizers import TorchOptimizer
BackendOptimizer = TorchOptimizer
else:
BackendOptimizer = base_optimizer.BaseOptimizer
@keras_core_export(["keras_core.Optimizer", "keras_core.optimizers.Optimizer"])
class Optimizer(BackendOptimizer):
pass
base_optimizer_keyword_args = base_optimizer.base_optimizer_keyword_args
Optimizer.__doc__ = base_optimizer.BaseOptimizer.__doc__
| keras-core/keras_core/optimizers/optimizer.py/0 | {
"file_path": "keras-core/keras_core/optimizers/optimizer.py",
"repo_id": "keras-core",
"token_count": 263
} | 42 |
from keras_core.saving.object_registration import CustomObjectScope
from keras_core.saving.object_registration import custom_object_scope
from keras_core.saving.object_registration import get_custom_objects
from keras_core.saving.object_registration import get_registered_name
from keras_core.saving.object_registration import get_registered_object
from keras_core.saving.object_registration import register_keras_serializable
from keras_core.saving.saving_api import load_model
from keras_core.saving.serialization_lib import deserialize_keras_object
from keras_core.saving.serialization_lib import serialize_keras_object
| keras-core/keras_core/saving/__init__.py/0 | {
"file_path": "keras-core/keras_core/saving/__init__.py",
"repo_id": "keras-core",
"token_count": 173
} | 43 |
from keras_core.utils.audio_dataset_utils import audio_dataset_from_directory
from keras_core.utils.dataset_utils import split_dataset
from keras_core.utils.file_utils import get_file
from keras_core.utils.image_dataset_utils import image_dataset_from_directory
from keras_core.utils.image_utils import array_to_img
from keras_core.utils.image_utils import img_to_array
from keras_core.utils.image_utils import load_img
from keras_core.utils.image_utils import save_img
from keras_core.utils.io_utils import disable_interactive_logging
from keras_core.utils.io_utils import enable_interactive_logging
from keras_core.utils.io_utils import is_interactive_logging_enabled
from keras_core.utils.model_visualization import model_to_dot
from keras_core.utils.model_visualization import plot_model
from keras_core.utils.numerical_utils import normalize
from keras_core.utils.numerical_utils import to_categorical
from keras_core.utils.progbar import Progbar
from keras_core.utils.python_utils import default
from keras_core.utils.python_utils import is_default
from keras_core.utils.python_utils import removeprefix
from keras_core.utils.python_utils import removesuffix
from keras_core.utils.rng_utils import set_random_seed
from keras_core.utils.sequence_utils import pad_sequences
from keras_core.utils.text_dataset_utils import text_dataset_from_directory
from keras_core.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
| keras-core/keras_core/utils/__init__.py/0 | {
"file_path": "keras-core/keras_core/utils/__init__.py",
"repo_id": "keras-core",
"token_count": 466
} | 44 |
# API Design Guidelines
In general, KerasCV abides to the
[API design guidelines of Keras](https://github.com/keras-team/governance/blob/master/keras_api_design_guidelines.md).
There are a few API guidelines that apply only to KerasCV. These are discussed
in this document.
# Label Names
When working with `bounding_box` and `segmentation_map` labels the
abbreviations `bbox` and `segm` are often used. In KerasCV, we will *not* be
using these abbreviations. This is done to ensure full consistency in our
naming convention. While the team is fond of the abbreviation `bbox`, we are
less fond of `segm`. In order to ensure full consistency, we have decided to
use the full names for label types in our code base.
# Preprocessing Layers
## Strength Parameters
Many augmentation layers take a parameter representing a strength, often called
`factor`. When possible, factor values must conform to the range: `[0, 1]`, with
1 representing the strongest transformation and 0 representing a no-op transform.
The strength of an augmentation should scale linearly with this factor. If needed,
a transformation can be performed to map to a large value range internally. If
this is done, please provide a thorough explanation of the value range semantics in
the docstring.
Additionally, factors should support both float and tuples as inputs. If a float is
passed, such as `factor=0.5`, the layer should default to the range `[0, factor]`.
## BaseImageAugmentationLayer
When implementing preprocessing, we encourage users to subclass the
`keras_cv.layers.preprocessing.BaseImageAugmentationLayer`. This layer provides
a common `call()` method, auto vectorization, and more.
When subclassing `BaseImageAugmentationLayer`, several methods can overridden:
- `BaseImageAugmentationLayer.augment_image()` must be overridden
- `augment_label()` allows updates to be made to labels
- `augment_bounding_box()` allows updates to bounding boxes to be made
[`RandomShear` serves as a canonical example of how to subclass `BaseImageAugmentationLayer`](https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing/random_shear.py)
## Vectorization
`BaseImageAugmentationLayer` requires you to implement augmentations in an
image-wise basis instead of using a vectorized approach. This design choice
was based made on the results found in the
[vectorization\_strategy\_benchmark.py](../benchmarks/vectorization_strategy_benchmark.py)
benchmark.
In short, the benchmark shows that making use of `tf.vectorized_map()` performs
almost identically to a manually vectorized implementation. As such, we have
decided to rely on `tf.vectorized_map()` for performance.

## Color Based Preprocessing Layers
Some preprocessing layers in KerasCV perform color based transformations. This
includes `RandomBrightness`, `Equalize`, `Solarization`, and more.
Preprocessing layers that perform color based transformations make the
following assumptions:
- these layers must accept a `value_range`, which is a tuple of numbers.
- `value_range` must default to `(0, 255)`
- input images may be of any `dtype`
The decision to support inputs of any `dtype` is made based on the nuance that
some Keras layers cast user inputs without the user knowing. For example, if
`Solarization` expected user inputs to be of type `int`, and a custom layer
was accidentally casting inputs to `float32`, it would be a bad user experience
to raise an error asserting that all inputs must be of type `int`.
New preprocessing layers should be consistent with these decisions.
# Codesamples
- Import symbols from top level namespaces in code samples (usage docstring for example).
Prefer:
```python
import keras_cv.layers.StochasticDepth
```
to:
```python
keras_cv.layers.regularization.stochastic_depth.StochasticDepth
```
| keras-cv/API_DESIGN.md/0 | {
"file_path": "keras-cv/API_DESIGN.md",
"repo_id": "keras-cv",
"token_count": 1029
} | 45 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import Grayscale
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
class OldGrayscale(BaseImageAugmentationLayer):
"""Grayscale is a preprocessing layer that transforms RGB images to
Grayscale images.
Input images should have values in the range of [0, 255].
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
output_channels.
Number color channels present in the output image.
The output_channels can be 1 or 3. RGB image with shape
(..., height, width, 3) will have the following shapes
after the `Grayscale` operation:
a. (..., height, width, 1) if output_channels = 1
b. (..., height, width, 3) if output_channels = 3.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
to_grayscale = keras_cv.layers.preprocessing.Grayscale()
augmented_images = to_grayscale(images)
```
"""
def __init__(self, output_channels=1, **kwargs):
super().__init__(**kwargs)
self.output_channels = output_channels
# This layer may raise an error when running on GPU using auto_vectorize
self.auto_vectorize = False
def compute_image_signature(self, images):
# required because of the `output_channels` argument
if isinstance(images, tf.RaggedTensor):
ragged_spec = tf.RaggedTensorSpec(
shape=images.shape[1:3] + [self.output_channels],
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
return tf.TensorSpec(
images.shape[1:3] + [self.output_channels], self.compute_dtype
)
def _check_input_params(self, output_channels):
if output_channels not in [1, 3]:
raise ValueError(
"Received invalid argument output_channels. "
f"output_channels must be in 1 or 3. Got {output_channels}"
)
self.output_channels = output_channels
def augment_image(self, image, transformation=None, **kwargs):
grayscale = tf.image.rgb_to_grayscale(image)
if self.output_channels == 1:
return grayscale
elif self.output_channels == 3:
return tf.image.grayscale_to_rgb(grayscale)
else:
raise ValueError("Unsupported value for `output_channels`.")
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"output_channels": self.output_channels,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(float)
x_train.shape
images = []
num_images = [1000, 2000, 5000, 10000]
results = {}
for aug in [Grayscale, OldGrayscale]:
c = aug.__name__
layer = aug()
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
c = aug.__name__ + " Graph Mode"
layer = aug()
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
# So we can actually see more relevant margins
del results["OldGrayscale"]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
| keras-cv/benchmarks/vectorized_grayscale.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_grayscale.py",
"repo_id": "keras-cv",
"token_count": 2285
} | 46 |
import time
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow import keras
from keras_cv.layers import BaseImageAugmentationLayer
from keras_cv.layers import Solarization
from keras_cv.utils import preprocessing
class OldSolarization(BaseImageAugmentationLayer):
def __init__(
self,
value_range,
addition_factor=0.0,
threshold_factor=0.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.seed = seed
self.addition_factor = preprocessing.parse_factor(
addition_factor,
max_value=255,
seed=seed,
param_name="addition_factor",
)
self.threshold_factor = preprocessing.parse_factor(
threshold_factor,
max_value=255,
seed=seed,
param_name="threshold_factor",
)
self.value_range = value_range
def get_random_transformation(self, **kwargs):
return (
self.addition_factor(dtype=self.compute_dtype),
self.threshold_factor(dtype=self.compute_dtype),
)
def augment_image(self, image, transformation=None, **kwargs):
(addition, threshold) = transformation
image = preprocessing.transform_value_range(
image,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
result = image + addition
result = tf.clip_by_value(result, 0, 255)
result = tf.where(result < threshold, result, 255 - result)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"threshold_factor": self.threshold_factor,
"addition_factor": self.addition_factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["threshold_factor"], dict):
config["threshold_factor"] = keras.utils.deserialize_keras_object(
config["threshold_factor"]
)
if isinstance(config["addition_factor"], dict):
config["addition_factor"] = keras.utils.deserialize_keras_object(
config["addition_factor"]
)
return cls(**config)
class SolarizationTest(tf.test.TestCase):
def test_consistency_with_old_implementation(self):
images = tf.random.uniform(shape=(16, 32, 32, 3))
output = Solarization(
value_range=(0, 1),
threshold_factor=(200, 200),
addition_factor=(100, 100),
)(images)
old_output = OldSolarization(
value_range=(0, 1),
threshold_factor=(200, 200),
addition_factor=(100, 100),
)(images)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [Solarization, OldSolarization]
aug_args = {"value_range": (0, 255), "threshold_factor": 0.5}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_solarization.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_solarization.py",
"repo_id": "keras-cv",
"token_count": 2943
} | 47 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Title: Train an Object Detection Model on Pascal VOC 2007 using KerasCV
Author: [tanzhenyu](https://github.com/tanzhenyu)
Date created: 2022/09/27
Last modified: 2022/09/27
Description: Use KerasCV to train a RetinaNet on Pascal VOC 2007.
"""
import sys
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import flags
from tensorflow import keras
import keras_cv
from keras_cv.callbacks import PyCOCOCallback
flags.DEFINE_string(
"weights_path",
"weights_{epoch:02d}.h5",
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_integer(
"epochs",
18,
"Number of epochs to run for.",
)
flags.DEFINE_string(
"tensorboard_path",
"logs",
"Directory which will be used to store tensorboard logs.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
# parameters from FasterRCNN [paper](https://arxiv.org/pdf/1506.01497.pdf)
# Try to detect an available TPU. If none is present, defaults to
# MirroredStrategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
strategy = tf.distribute.TPUStrategy(tpu)
except ValueError:
# MirroredStrategy is best for a single machine with one or multiple GPUs
strategy = tf.distribute.MirroredStrategy()
print("Number of accelerators: ", strategy.num_replicas_in_sync)
local_batch = 4
global_batch = local_batch * strategy.num_replicas_in_sync
base_lr = 0.01 * global_batch / 16
image_size = [640, 640, 3]
train_ds = tfds.load(
"voc/2007", split="train+validation", with_info=False, shuffle_files=True
)
train_ds = train_ds.concatenate(
tfds.load(
"voc/2012",
split="train+validation",
with_info=False,
shuffle_files=True,
)
)
eval_ds = tfds.load("voc/2007", split="test", with_info=False)
with strategy.scope():
inputs = keras.layers.Input(shape=image_size)
x = inputs
x = keras.applications.resnet.preprocess_input(x)
backbone = keras.applications.ResNet50(
include_top=False, input_tensor=x, weights="imagenet"
)
c2_output, c3_output, c4_output, c5_output = [
backbone.get_layer(layer_name).output
for layer_name in [
"conv2_block3_out",
"conv3_block4_out",
"conv4_block6_out",
"conv5_block3_out",
]
]
backbone = keras.Model(
inputs=inputs,
outputs={2: c2_output, 3: c3_output, 4: c4_output, 5: c5_output},
)
# keras_cv backbone gives 2mAP lower result.
# TODO(ian): should eventually use keras_cv backbone.
# backbone = keras_cv.models.ResNet50(
# include_top=False, weights="imagenet", include_rescaling=False
# ).as_backbone()
model = keras_cv.models.FasterRCNN(
num_classes=20, bounding_box_format="yxyx", backbone=backbone
)
# TODO (tanzhenyu): migrate to KPL, as this is mostly a duplicate of
# https://github.com/tensorflow/models/blob/master/official/vision/ops/preprocess_ops.py#L138
def resize_and_crop_image(
image,
desired_size,
padded_size,
aug_scale_min=1.0,
aug_scale_max=1.0,
seed=1,
method=tf.image.ResizeMethod.BILINEAR,
):
with tf.name_scope("resize_and_crop_image"):
image_size = tf.cast(tf.shape(image)[0:2], tf.float32)
random_jittering = aug_scale_min != 1.0 or aug_scale_max != 1.0
if random_jittering:
random_scale = tf.random.uniform(
[], aug_scale_min, aug_scale_max, seed=seed
)
scaled_size = tf.round(random_scale * desired_size)
else:
scaled_size = desired_size
scale = tf.minimum(
scaled_size[0] / image_size[0], scaled_size[1] / image_size[1]
)
scaled_size = tf.round(image_size * scale)
# Computes 2D image_scale.
image_scale = scaled_size / image_size
# Selects non-zero random offset (x, y) if scaled image is larger than
# desired_size.
if random_jittering:
max_offset = scaled_size - desired_size
max_offset = tf.where(
tf.less(max_offset, 0), tf.zeros_like(max_offset), max_offset
)
offset = max_offset * tf.random.uniform(
[
2,
],
0,
1,
seed=seed,
)
offset = tf.cast(offset, tf.int32)
else:
offset = tf.zeros((2,), tf.int32)
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=method
)
if random_jittering:
scaled_image = scaled_image[
offset[0] : offset[0] + desired_size[0],
offset[1] : offset[1] + desired_size[1],
:,
]
output_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, padded_size[0], padded_size[1]
)
image_info = tf.stack(
[
image_size,
tf.constant(desired_size, dtype=tf.float32),
image_scale,
tf.cast(offset, tf.float32),
]
)
return output_image, image_info
def resize_and_crop_boxes(boxes, image_scale, output_size, offset):
with tf.name_scope("resize_and_crop_boxes"):
# Adjusts box coordinates based on image_scale and offset.
boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
# Clips the boxes.
boxes = clip_boxes(boxes, output_size)
return boxes
def clip_boxes(boxes, image_shape):
if boxes.shape[-1] != 4:
raise ValueError(
"boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])
)
with tf.name_scope("clip_boxes"):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
max_length = [height, width, height, width]
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.unstack(image_shape, axis=-1)
max_length = tf.stack([height, width, height, width], axis=-1)
clipped_boxes = tf.math.maximum(tf.math.minimum(boxes, max_length), 0.0)
return clipped_boxes
def get_non_empty_box_indices(boxes):
# Selects indices if box height or width is 0.
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
indices = tf.where(
tf.logical_and(tf.greater(height, 0), tf.greater(width, 0))
)
return indices[:, 0]
def resize_fn(image, gt_boxes, gt_classes):
image, image_info = resize_and_crop_image(
image, image_size[:2], image_size[:2], 0.8, 1.25
)
gt_boxes = resize_and_crop_boxes(
gt_boxes, image_info[2, :], image_info[1, :], image_info[3, :]
)
indices = get_non_empty_box_indices(gt_boxes)
gt_boxes = tf.gather(gt_boxes, indices)
gt_classes = tf.gather(gt_classes, indices)
return image, gt_boxes, gt_classes
def flip_fn(image, boxes):
if tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32) > 0.5:
image = tf.image.flip_left_right(image)
y1, x1, y2, x2 = tf.split(boxes, num_or_size_splits=4, axis=-1)
boxes = tf.concat([y1, 1.0 - x2, y2, 1.0 - x1], axis=-1)
return image, boxes
def proc_train_fn(bounding_box_format, img_size):
def apply(inputs):
image = inputs["image"]
image = tf.cast(image, tf.float32)
gt_boxes = inputs["objects"]["bbox"]
image, gt_boxes = flip_fn(image, gt_boxes)
gt_boxes = keras_cv.bounding_box.convert_format(
gt_boxes,
images=image,
source="rel_yxyx",
target=bounding_box_format,
)
gt_classes = tf.cast(inputs["objects"]["label"], tf.float32)
image, gt_boxes, gt_classes = resize_fn(image, gt_boxes, gt_classes)
return {
"images": image,
"gt_boxes": gt_boxes,
"gt_classes": gt_classes,
}
return apply
# TODO(tanzhenyu): consider remove padding while reduce function tracing.
def pad_fn(examples):
gt_boxes = examples.pop("gt_boxes")
gt_classes = examples.pop("gt_classes")
gt_boxes = gt_boxes.to_tensor(
default_value=-1.0, shape=[global_batch, 32, 4]
)
gt_classes = gt_classes.to_tensor(
default_value=-1.0, shape=[global_batch, 32]
)
return examples["images"], {
"boxes": gt_boxes,
"classes": gt_classes,
}
train_ds = train_ds.map(
proc_train_fn(bounding_box_format="yxyx", img_size=image_size),
num_parallel_calls=tf.data.AUTOTUNE,
)
train_ds = train_ds.apply(
tf.data.experimental.dense_to_ragged_batch(
global_batch, drop_remainder=True
)
)
train_ds = train_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.shuffle(8)
train_ds = train_ds.prefetch(2)
eval_ds = eval_ds.map(
proc_train_fn(bounding_box_format="yxyx", img_size=image_size),
num_parallel_calls=tf.data.AUTOTUNE,
)
eval_ds = eval_ds.apply(
tf.data.experimental.dense_to_ragged_batch(
global_batch, drop_remainder=True
)
)
eval_ds = eval_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
eval_ds = eval_ds.prefetch(2)
with strategy.scope():
lr_decay = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[12000 * 16 / global_batch, 16000 * 16 / global_batch],
values=[base_lr, 0.1 * base_lr, 0.01 * base_lr],
)
optimizer = keras.optimizers.SGD(
learning_rate=lr_decay, momentum=0.9, global_clipnorm=10.0
)
weight_decay = 0.0001
step = 0
callbacks = [
keras.callbacks.ModelCheckpoint(FLAGS.weights_path, save_weights_only=True),
keras.callbacks.TensorBoard(
log_dir=FLAGS.tensorboard_path, write_steps_per_second=True
),
PyCOCOCallback(eval_ds, bounding_box_format="yxyx"),
]
model.compile(
optimizer=optimizer,
box_loss="Huber",
classification_loss="SparseCategoricalCrossentropy",
rpn_box_loss="Huber",
rpn_classification_loss="BinaryCrossentropy",
)
model.fit(
train_ds, epochs=FLAGS.epochs, validation_data=eval_ds, callbacks=callbacks
)
| keras-cv/examples/training/object_detection/pascal_voc/faster_rcnn.py/0 | {
"file_path": "keras-cv/examples/training/object_detection/pascal_voc/faster_rcnn.py",
"repo_id": "keras-cv",
"token_count": 4874
} | 48 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
from keras_cv import backend
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import tf_ops
from keras_cv.backend.config import keras_3
_ORIGINAL_OPS = copy.copy(backend.ops.__dict__)
_ORIGINAL_SUPPORTS_RAGGED = backend.supports_ragged
# A counter for potentially nested TF data scopes
_IN_TF_DATA_SCOPE = 0
def tf_data(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
if keras_3() and keras.src.utils.backend_utils.in_tf_graph():
with TFDataScope():
return function(*args, **kwargs)
else:
return function(*args, **kwargs)
return wrapper
class TFDataScope:
def __enter__(self):
global _IN_TF_DATA_SCOPE
if _IN_TF_DATA_SCOPE == 0:
for k, v in ops.__dict__.items():
if k in tf_ops.__dict__:
setattr(ops, k, getattr(tf_ops, k))
backend.supports_ragged = lambda: True
_IN_TF_DATA_SCOPE += 1
def __exit__(self, exc_type, exc_value, exc_tb):
global _IN_TF_DATA_SCOPE
_IN_TF_DATA_SCOPE -= 1
if _IN_TF_DATA_SCOPE == 0:
for k, v in ops.__dict__.items():
setattr(ops, k, _ORIGINAL_OPS[k])
backend.supports_ragged = _ORIGINAL_SUPPORTS_RAGGED
_IN_TF_DATA_SCOPE = False
| keras-cv/keras_cv/backend/scope.py/0 | {
"file_path": "keras-cv/keras_cv/backend/scope.py",
"repo_id": "keras-cv",
"token_count": 817
} | 49 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from keras_cv import backend
from keras_cv import bounding_box
from keras_cv.tests.test_case import TestCase
class ToRaggedTest(TestCase):
@pytest.mark.tf_keras_only
def test_converts_to_ragged(self):
bounding_boxes = {
"boxes": np.array(
[[[0, 0, 0, 0], [0, 0, 0, 0]], [[2, 3, 4, 5], [0, 1, 2, 3]]]
),
"classes": np.array([[-1, -1], [-1, 1]]),
"confidence": np.array([[0.5, 0.7], [0.23, 0.12]]),
}
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
self.assertEqual(bounding_boxes["boxes"][1].shape, [1, 4])
self.assertEqual(bounding_boxes["classes"][1].shape, [1])
self.assertEqual(
bounding_boxes["confidence"][1].shape,
[
1,
],
)
self.assertEqual(bounding_boxes["classes"][0].shape, [0])
self.assertEqual(bounding_boxes["boxes"][0].shape, [0, 4])
self.assertEqual(
bounding_boxes["confidence"][0].shape,
[
0,
],
)
@pytest.mark.tf_keras_only
def test_round_trip(self):
original = {
"boxes": np.array(
[
[[0, 0, 0, 0], [-1, -1, -1, -1]],
[[-1, -1, -1, -1], [-1, -1, -1, -1]],
]
),
"classes": np.array([[1, -1], [-1, -1]]),
"confidence": np.array([[0.5, -1], [-1, -1]]),
}
bounding_boxes = bounding_box.to_ragged(original)
bounding_boxes = bounding_box.to_dense(bounding_boxes, max_boxes=2)
self.assertEqual(bounding_boxes["boxes"][1].shape, [2, 4])
self.assertEqual(bounding_boxes["classes"][1].shape, [2])
self.assertEqual(bounding_boxes["classes"][0].shape, [2])
self.assertEqual(bounding_boxes["boxes"][0].shape, [2, 4])
self.assertEqual(bounding_boxes["confidence"][0].shape, [2])
self.assertAllEqual(bounding_boxes["boxes"], original["boxes"])
self.assertAllEqual(bounding_boxes["classes"], original["classes"])
self.assertAllEqual(
bounding_boxes["confidence"], original["confidence"]
)
@pytest.mark.skipif(
backend.supports_ragged() is True,
reason="Only applies to backends which don't support raggeds",
)
def test_backend_without_raggeds_throws(self):
bounding_boxes = {
"boxes": np.array(
[[[0, 0, 0, 0], [0, 0, 0, 0]], [[2, 3, 4, 5], [0, 1, 2, 3]]]
),
"classes": np.array([[-1, -1], [-1, 1]]),
"confidence": np.array([[0.5, 0.7], [0.23, 0.12]]),
}
with self.assertRaisesRegex(NotImplementedError, "support ragged"):
bounding_box.to_ragged(bounding_boxes)
| keras-cv/keras_cv/bounding_box/to_ragged_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/to_ragged_test.py",
"repo_id": "keras-cv",
"token_count": 1645
} | 50 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.core.FactorSampler")
class FactorSampler:
"""FactorSampler represents a strength factor for use in an augmentation
layer.
FactorSampler should be subclassed and implement a `__call__()` method that
returns a tf.float32, or a float. This method will be used by preprocessing
layers to determine the strength of their augmentation. The specific range
of values supported may vary by layer, but for most layers is the range
[0, 1].
"""
def __call__(self, shape=None, dtype="float32"):
raise NotImplementedError(
"FactorSampler subclasses must implement a `__call__()` method."
)
def get_config(self):
return {}
| keras-cv/keras_cv/core/factor_sampler/factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 411
} | 51 |
### The ImageNet Dataset in keras_cv
In order to load ImageNet with KerasCV, you'll need to download the [original ImageNet dataset](https://image-net.org) and parse the images into TFRecords.
Tensorflow provides a [script](https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py) to perform this parsing and upload images to Google Cloud Storage (or optionally to local storage).
Please reference that script's instructions on producing ImageNet TFRecords, and then use the KerasCV loader to load records from wherever you choose to store them.
| keras-cv/keras_cv/datasets/imagenet/README.md/0 | {
"file_path": "keras-cv/keras_cv/datasets/imagenet/README.md",
"repo_id": "keras-cv",
"token_count": 153
} | 52 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.tests.test_case import TestCase
try:
from keras_cv.datasets.waymo import load
from keras_cv.datasets.waymo import transformer
except ImportError:
# Waymo Open Dataset dependency may be missing, in which case we expect
# these tests will be skipped based on the TEST_WAYMO_DEPS environment var.
pass
class WaymoOpenDatasetTransformerTest(TestCase):
def setUp(self):
super().setUp()
self.test_data_path = os.path.abspath(
os.path.join(os.path.abspath(__file__), os.path.pardir, "test_data")
)
@pytest.mark.skipif(
"TEST_WAYMO_DEPS" not in os.environ
or os.environ["TEST_WAYMO_DEPS"] != "true",
reason="Requires Waymo Open Dataset package",
)
def test_load_and_transform(self):
tf_dataset = load(self.test_data_path)
# Extract records into a list.
dataset = list(tf_dataset)
self.assertEqual(len(dataset), 1)
lidar_tensors = next(iter(dataset))
num_boxes = lidar_tensors["label_box"].shape[0]
self.assertEqual(num_boxes, 16)
self.assertNotEqual(lidar_tensors["frame_id"], 0)
self.assertNotEqual(lidar_tensors["timestamp_micros"], 0)
self.assertEqual(lidar_tensors["timestamp_offset"], 0)
self.assertGreater(lidar_tensors["timestamp_micros"], 0)
self.assertAllEqual(
lidar_tensors["label_box_detection_difficulty"],
np.zeros(num_boxes, dtype="int32"),
)
# Laser points.
point_xyz_mean = tf.reduce_mean(lidar_tensors["point_xyz"], axis=0)
self.assertAllClose(
point_xyz_mean, lidar_tensors["pose"][:3, 3], atol=100
)
point_feature_mean = tf.reduce_mean(
lidar_tensors["point_feature"], axis=0
)
self.assertAllGreater(point_feature_mean[0], 0)
self.assertAllGreater(tf.abs(point_feature_mean[1]), 1e-6)
self.assertAllGreater(point_feature_mean[2:4], 0)
self.assertTrue(tf.math.reduce_all(lidar_tensors["point_mask"]))
# Laser labels.
self.assertEqual(lidar_tensors["label_box_id"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_meta"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_class"].shape[0], num_boxes)
self.assertEqual(lidar_tensors["label_box_density"].shape[0], num_boxes)
self.assertTrue(tf.math.reduce_all(lidar_tensors["label_box_mask"]))
self.assertAllGreater(
tf.math.reduce_max(lidar_tensors["label_point_class"]), 0
)
# Multi-frame tensors for augmentation.
augmented_example = next(
iter(tf_dataset.map(transformer.build_tensors_for_augmentation))
)
self.assertEqual(augmented_example["point_clouds"].shape, [183142, 8])
self.assertEqual(augmented_example["bounding_boxes"].shape, [16, 11])
@pytest.mark.skipif(
"TEST_WAYMO_DEPS" not in os.environ
or os.environ["TEST_WAYMO_DEPS"] != "true",
reason="Requires Waymo Open Dataset package",
)
def test_pad_and_transform_to_vehicle(self):
dataset = load(self.test_data_path)
dataset = dataset.map(
lambda x: (
transformer.pad_or_trim_tensors(
transformer.transform_to_vehicle_frame(x)
)
)
)
example = next(iter(dataset))
# Laser points.
self.assertEqual(example["point_xyz"].shape, [199600, 3])
self.assertEqual(example["point_feature"].shape, [199600, 4])
self.assertEqual(example["point_mask"].shape, [199600])
point_xyz_mean = tf.reduce_mean(example["point_xyz"], axis=0)
self.assertAllClose(point_xyz_mean, example["pose"][:3, 3], atol=100)
point_feature_mean = tf.reduce_mean(example["point_feature"], axis=0)
self.assertAllGreater(point_feature_mean[0], 0)
self.assertAllGreater(tf.abs(point_feature_mean[1]), 1e-6)
self.assertAllGreater(point_feature_mean[2:4], 0)
self.assertTrue(tf.math.reduce_any(example["point_mask"]))
# Laser labels.
self.assertEqual(example["label_box_id"].shape[0], 1000)
self.assertEqual(example["label_box_meta"].shape[0], 1000)
self.assertEqual(example["label_box_class"].shape[0], 1000)
self.assertEqual(example["label_box_density"].shape[0], 1000)
self.assertEqual(example["label_box_mask"].shape, [1000])
self.assertTrue(tf.math.reduce_any(example["label_box_mask"]))
self.assertAllGreater(
tf.math.reduce_max(example["label_point_class"]), 0
)
@pytest.mark.skipif(
"TEST_WAYMO_DEPS" not in os.environ
or os.environ["TEST_WAYMO_DEPS"] != "true",
reason="Requires Waymo Open Dataset package",
)
def test_convert_to_center_pillar_inputs(self):
dataset = load(self.test_data_path)
dataset = dataset.map(
lambda x: (
transformer.convert_to_center_pillar_inputs(
transformer.pad_or_trim_tensors(
transformer.transform_to_vehicle_frame(x)
)
)
)
)
example = next(iter(dataset))
# Laser points.
point_clouds = example["point_clouds"]
self.assertEqual(point_clouds["point_xyz"].shape, [199600, 3])
self.assertEqual(point_clouds["point_feature"].shape, [199600, 4])
self.assertEqual(point_clouds["point_mask"].shape, [199600])
point_feature_mean = tf.reduce_mean(
point_clouds["point_feature"], axis=0
)
self.assertAllGreater(point_feature_mean[0], 0)
self.assertAllGreater(tf.abs(point_feature_mean[1]), 1e-6)
self.assertAllGreater(point_feature_mean[2:4], 0)
self.assertTrue(tf.math.reduce_any(point_clouds["point_mask"]))
# Laser labels.
boxes = example["3d_boxes"]
self.assertEqual(boxes["boxes"].shape[0], 1000)
self.assertEqual(boxes["classes"].shape[0], 1000)
self.assertEqual(boxes["difficulty"].shape[0], 1000)
self.assertEqual(boxes["mask"].shape, [1000])
self.assertTrue(tf.math.reduce_any(boxes["mask"]))
self.assertAllGreater(tf.math.reduce_max(boxes["classes"]), 0)
| keras-cv/keras_cv/datasets/waymo/transformer_test.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/waymo/transformer_test.py",
"repo_id": "keras-cv",
"token_count": 3189
} | 53 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.layers.object_detection.roi_sampler import _ROISampler
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_keras_only
class ROISamplerTest(TestCase):
def test_roi_sampler(self):
box_matcher = BoxMatcher(thresholds=[0.3], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# given we only choose 1 positive sample, and `append_label` is False,
# only the 2nd ROI is chosen.
expected_gt_boxes = tf.constant(
[[0.0, 0.0, 0, 0.0], [0.0, 0.0, 0, 0.0]]
)
expected_gt_boxes = expected_gt_boxes[tf.newaxis, ...]
# only the 2nd ROI is chosen, and the negative ROI is mapped to 0.
expected_gt_classes = tf.constant([[10], [0]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
self.assertAllClose(
tf.reduce_max(expected_gt_boxes), tf.reduce_max(sampled_gt_boxes)
)
self.assertAllClose(
tf.reduce_min(expected_gt_classes),
tf.reduce_min(sampled_gt_classes),
)
def test_roi_sampler_small_threshold(self):
box_matcher = BoxMatcher(thresholds=[0.1], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
sampled_rois, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# given we only choose 1 positive sample, and `append_label` is False,
# only the 2nd ROI is chosen. No negative samples exist given we
# select positive_threshold to be 0.1. (the minimum IOU is 1/7)
# given num_sampled_rois=2, it selects the 1st ROI as well.
expected_rois = tf.constant([[5, 5, 10, 10], [0.0, 0.0, 5.0, 5.0]])
expected_rois = expected_rois[tf.newaxis, ...]
# all ROIs are matched to the 2nd gt box.
# the boxes are encoded by dimensions, so the result is
# tx, ty = (5.1 - 5.0) / 5 = 0.02, tx, ty = (5.1 - 2.5) / 5 = 0.52
# then divide by 0.1 as box variance.
expected_gt_boxes = (
tf.constant([[0.02, 0.02, 0.0, 0.0], [0.52, 0.52, 0.0, 0.0]]) / 0.1
)
expected_gt_boxes = expected_gt_boxes[tf.newaxis, ...]
# only the 2nd ROI is chosen, and the negative ROI is mapped to 0.
expected_gt_classes = tf.constant([[10], [10]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
self.assertAllClose(
tf.reduce_max(expected_rois, 1), tf.reduce_max(sampled_rois, 1)
)
self.assertAllClose(
tf.reduce_max(expected_gt_boxes, 1),
tf.reduce_max(sampled_gt_boxes, 1),
)
self.assertAllClose(expected_gt_classes, sampled_gt_classes)
def test_roi_sampler_large_threshold(self):
# the 2nd roi and 2nd gt box has IOU of 0.923, setting
# positive_threshold to 0.95 to ignore it.
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# all ROIs are negative matches, so they are mapped to 0.
expected_gt_boxes = tf.zeros([1, 2, 4], dtype=tf.float32)
# only the 2nd ROI is chosen, and the negative ROI is mapped to 0.
expected_gt_classes = tf.constant([[0], [0]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
# self.assertAllClose(expected_rois, sampled_rois)
self.assertAllClose(expected_gt_boxes, sampled_gt_boxes)
self.assertAllClose(expected_gt_classes, sampled_gt_classes)
def test_roi_sampler_large_threshold_custom_bg_class(self):
# the 2nd roi and 2nd gt box has IOU of 0.923, setting
# positive_threshold to 0.95 to ignore it.
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
background_class=-1,
num_sampled_rois=2,
append_gt_boxes=False,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# all ROIs are negative matches, so they are mapped to 0.
expected_gt_boxes = tf.zeros([1, 2, 4], dtype=tf.float32)
# only the 2nd ROI is chosen, and the negative ROI is mapped to -1 from
# customization.
expected_gt_classes = tf.constant([[-1], [-1]], dtype=tf.int32)
expected_gt_classes = expected_gt_classes[tf.newaxis, ...]
# self.assertAllClose(expected_rois, sampled_rois)
self.assertAllClose(expected_gt_boxes, sampled_gt_boxes)
self.assertAllClose(expected_gt_classes, sampled_gt_classes)
def test_roi_sampler_large_threshold_append_gt_boxes(self):
# the 2nd roi and 2nd gt box has IOU of 0.923, setting
# positive_threshold to 0.95 to ignore it.
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=2,
append_gt_boxes=True,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
_, sampled_gt_boxes, _, sampled_gt_classes, _ = roi_sampler(
rois, gt_boxes, gt_classes
)
# the selected gt boxes should be [0, 0, 0, 0], and [10, 10, 15, 15]
# but the 2nd will be encoded to 0.
self.assertAllClose(tf.reduce_min(sampled_gt_boxes), 0)
self.assertAllClose(tf.reduce_max(sampled_gt_boxes), 0)
# the selected gt classes should be [0, 2 or 10]
self.assertAllLessEqual(tf.reduce_max(sampled_gt_classes), 10)
self.assertAllGreaterEqual(tf.reduce_min(sampled_gt_classes), 0)
def test_roi_sampler_large_num_sampled_rois(self):
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=200,
append_gt_boxes=True,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
rois = rois[tf.newaxis, ...]
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant(
[[10, 10, 15, 15], [2.6, 2.6, 7.6, 7.6], [-1, -1, -1, -1]]
)
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = tf.constant([[2, 10, -1]], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
with self.assertRaisesRegex(ValueError, "must be less than"):
_, _, _ = roi_sampler(rois, gt_boxes, gt_classes)
def test_serialization(self):
box_matcher = BoxMatcher(thresholds=[0.95], match_values=[-1, 1])
roi_sampler = _ROISampler(
bounding_box_format="xyxy",
roi_matcher=box_matcher,
positive_fraction=0.5,
num_sampled_rois=200,
append_gt_boxes=True,
)
sampler_config = roi_sampler.get_config()
new_sampler = _ROISampler.from_config(sampler_config)
self.assertAllEqual(new_sampler.roi_matcher.match_values, [-1, 1])
| keras-cv/keras_cv/layers/object_detection/roi_sampler_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_sampler_test.py",
"repo_id": "keras-cv",
"token_count": 6159
} | 54 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import layers
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.AugMix")
class AugMix(BaseImageAugmentationLayer):
"""Performs the AugMix data augmentation technique.
AugMix aims to produce images with variety while preserving the image
semantics and local statistics. During the augmentation process, each image
is augmented `num_chains` different ways, each way consisting of
`chain_depth` augmentations. Augmentations are sampled from the list:
translation, shearing, rotation, posterization, histogram equalization,
solarization and auto contrast. The results of each chain are then mixed
together with the original image based on random samples from a Dirichlet
distribution.
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written (low, high).
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
severity: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. A value is sampled from the provided
range. If a float is passed, the range is interpreted as
`(0, severity)`. This value represents the level of strength of
augmentations and is in the range [0, 1]. Defaults to 0.3.
num_chains: an integer representing the number of different chains to
be mixed, defaults to 3.
chain_depth: an integer or range representing the number of
transformations in the chains. If a range is passed, a random
`chain_depth` value sampled from a uniform distribution over the
given range is called at the start of the chain. Defaults to [1,3].
alpha: a float value used as the probability coefficients for the
Beta and Dirichlet distributions, defaults to 1.0.
seed: Integer. Used to create a random seed.
References:
- [AugMix paper](https://arxiv.org/pdf/1912.02781)
- [Official Code](https://github.com/google-research/augmix)
- [Unofficial TF Code](https://github.com/szacho/augmix-tf)
Sample Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
augmix = keras_cv.layers.AugMix([0, 255])
augmented_images = augmix(images[:100])
```
"""
def __init__(
self,
value_range,
severity=0.3,
num_chains=3,
chain_depth=[1, 3],
alpha=1.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.value_range = value_range
self.num_chains = num_chains
self.chain_depth = chain_depth
if isinstance(self.chain_depth, int):
self.chain_depth = [self.chain_depth, self.chain_depth]
self.alpha = alpha
self.seed = seed
self.auto_vectorize = False
self.severity = severity
self.severity_factor = preprocessing.parse_factor(
self.severity,
min_value=0.01,
max_value=1.0,
param_name="severity",
seed=self.seed,
)
# initialize layers
self.auto_contrast = layers.AutoContrast(value_range=self.value_range)
self.equalize = layers.Equalization(value_range=self.value_range)
def _sample_from_dirichlet(self, alpha):
gamma_sample = tf.random.gamma(
shape=(),
alpha=alpha,
)
return gamma_sample / tf.reduce_sum(
gamma_sample, axis=-1, keepdims=True
)
def _sample_from_beta(self, alpha, beta):
sample_alpha = tf.random.gamma(
(),
alpha=alpha,
)
sample_beta = tf.random.gamma(
(),
alpha=beta,
)
return sample_alpha / (sample_alpha + sample_beta)
def _sample_depth(self):
return self._random_generator.uniform(
shape=(),
minval=self.chain_depth[0],
maxval=self.chain_depth[1] + 1,
dtype=tf.int32,
)
def _loop_on_depth(self, depth_level, image_aug):
op_index = self._random_generator.uniform(
shape=(), minval=0, maxval=8, dtype=tf.int32
)
image_aug = self._apply_op(image_aug, op_index)
depth_level += 1
return depth_level, image_aug
def _loop_on_width(self, image, chain_mixing_weights, curr_chain, result):
image_aug = tf.identity(image)
chain_depth = self._sample_depth()
depth_level = tf.constant([0], dtype=tf.int32)
depth_level, image_aug = tf.while_loop(
lambda depth_level, image_aug: tf.less(depth_level, chain_depth),
self._loop_on_depth,
[depth_level, image_aug],
)
result += tf.gather(chain_mixing_weights, curr_chain) * image_aug
curr_chain += 1
return image, chain_mixing_weights, curr_chain, result
def _auto_contrast(self, image):
return self.auto_contrast(image)
def _equalize(self, image):
return self.equalize(image)
def _posterize(self, image):
image = preprocessing.transform_value_range(
images=image,
original_range=self.value_range,
target_range=[0, 255],
)
bits = tf.cast(self.severity_factor() * 3, tf.int32)
shift = tf.cast(4 - bits + 1, tf.uint8)
image = tf.cast(image, tf.uint8)
image = tf.bitwise.left_shift(
tf.bitwise.right_shift(image, shift), shift
)
image = tf.cast(image, self.compute_dtype)
return preprocessing.transform_value_range(
images=image,
original_range=[0, 255],
target_range=self.value_range,
)
def _rotate(self, image):
angle = tf.expand_dims(
tf.cast(self.severity_factor() * 30, tf.float32), axis=0
)
shape = tf.cast(tf.shape(image), tf.float32)
return preprocessing.transform(
tf.expand_dims(image, 0),
preprocessing.get_rotation_matrix(angle, shape[0], shape[1]),
)[0]
def _solarize(self, image):
threshold = tf.cast(
tf.cast(self.severity_factor() * 255, tf.int32), tf.float32
)
image = preprocessing.transform_value_range(
image, original_range=self.value_range, target_range=(0, 255)
)
result = tf.clip_by_value(image, 0, 255)
result = tf.where(result < threshold, result, 255 - result)
return preprocessing.transform_value_range(
result, original_range=(0, 255), target_range=self.value_range
)
def _shear_x(self, image):
x = tf.cast(self.severity_factor() * 0.3, tf.float32)
x *= preprocessing.random_inversion(self._random_generator)
transform_x = layers.RandomShear._format_transform(
[1.0, x, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
)
return preprocessing.transform(
images=tf.expand_dims(image, 0), transforms=transform_x
)[0]
def _shear_y(self, image):
y = tf.cast(self.severity_factor() * 0.3, tf.float32)
y *= preprocessing.random_inversion(self._random_generator)
transform_x = self._format_random_shear_transform(
[1.0, 0.0, 0.0, y, 1.0, 0.0, 0.0, 0.0]
)
return preprocessing.transform(
images=tf.expand_dims(image, 0), transforms=transform_x
)[0]
@staticmethod
def _format_random_shear_transform(transform):
transform = tf.convert_to_tensor(transform, dtype=tf.float32)
return transform[tf.newaxis]
def _translate_x(self, image):
shape = tf.cast(tf.shape(image), tf.float32)
x = tf.cast(self.severity_factor() * shape[1] / 3, tf.float32)
x = tf.expand_dims(tf.expand_dims(x, axis=0), axis=0)
x *= preprocessing.random_inversion(self._random_generator)
x = tf.cast(x, tf.int32)
translations = tf.cast(
tf.concat([x, tf.zeros_like(x)], axis=1), dtype=tf.float32
)
return preprocessing.transform(
tf.expand_dims(image, 0),
preprocessing.get_translation_matrix(translations),
)[0]
def _translate_y(self, image):
shape = tf.cast(tf.shape(image), tf.float32)
y = tf.cast(self.severity_factor() * shape[0] / 3, tf.float32)
y = tf.expand_dims(tf.expand_dims(y, axis=0), axis=0)
y *= preprocessing.random_inversion(self._random_generator)
y = tf.cast(y, tf.int32)
translations = tf.cast(
tf.concat([tf.zeros_like(y), y], axis=1), dtype=tf.float32
)
return preprocessing.transform(
tf.expand_dims(image, 0),
preprocessing.get_translation_matrix(translations),
)[0]
def _apply_op(self, image, op_index):
augmented = image
augmented = tf.cond(
op_index == tf.constant([0], dtype=tf.int32),
lambda: self._auto_contrast(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([1], dtype=tf.int32),
lambda: self._equalize(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([2], dtype=tf.int32),
lambda: self._posterize(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([3], dtype=tf.int32),
lambda: self._rotate(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([4], dtype=tf.int32),
lambda: self._solarize(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([5], dtype=tf.int32),
lambda: self._shear_x(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([6], dtype=tf.int32),
lambda: self._shear_y(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([7], dtype=tf.int32),
lambda: self._translate_x(augmented),
lambda: augmented,
)
augmented = tf.cond(
op_index == tf.constant([8], dtype=tf.int32),
lambda: self._translate_y(augmented),
lambda: augmented,
)
return augmented
def get_random_transformation(
self,
image=None,
label=None,
bounding_boxes=None,
keypoints=None,
segmentation_mask=None,
):
# Generate random values of chain_mixing_weights and weight_sample
chain_mixing_weights = self._sample_from_dirichlet(
tf.ones([self.num_chains]) * self.alpha
)
weight_sample = self._sample_from_beta(self.alpha, self.alpha)
# Create a transformation config containing the random values
transformation = {
"chain_mixing_weights": chain_mixing_weights,
"weight_sample": weight_sample,
}
return transformation
def augment_image(self, image, transformation=None, **kwargs):
# Extract chain_mixing_weights and weight_sample from the provided transformation # noqa: E501
chain_mixing_weights = transformation["chain_mixing_weights"]
weight_sample = transformation["weight_sample"]
result = tf.zeros_like(image)
curr_chain = tf.constant([0], dtype=tf.int32)
image, chain_mixing_weights, curr_chain, result = tf.while_loop(
lambda image, chain_mixing_weights, curr_chain, result: tf.less(
curr_chain, self.num_chains
),
self._loop_on_width,
[image, chain_mixing_weights, curr_chain, result],
)
result = weight_sample * image + (1 - weight_sample) * result
return result
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_masks, transformation=None, **kwargs
):
# Extract chain_mixing_weights and weight_sample from the provided transformation # noqa: E501
chain_mixing_weights = transformation["chain_mixing_weights"]
weight_sample = transformation["weight_sample"]
result = tf.zeros_like(segmentation_masks)
curr_chain = tf.constant([0], dtype=tf.int32)
(
segmentation_masks,
chain_mixing_weights,
curr_chain,
result,
) = tf.while_loop(
lambda segmentation_masks, chain_mixing_weights, curr_chain, result: tf.less( # noqa: E501
curr_chain, self.num_chains
),
self._loop_on_width,
[segmentation_masks, chain_mixing_weights, curr_chain, result],
)
# Apply the mixing of segmentation_masks similar to images
result = (
weight_sample * segmentation_masks + (1 - weight_sample) * result
)
return result
def get_config(self):
config = {
"value_range": self.value_range,
"severity": self.severity,
"num_chains": self.num_chains,
"chain_depth": self.chain_depth,
"alpha": self.alpha,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/aug_mix.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/aug_mix.py",
"repo_id": "keras-cv",
"token_count": 6482
} | 55 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import core
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import fill_utils
from keras_cv.utils import preprocessing
def _center_crop(mask, width, height):
masks_shape = tf.shape(mask)
h_diff = masks_shape[0] - height
w_diff = masks_shape[1] - width
h_start = tf.cast(h_diff / 2, tf.int32)
w_start = tf.cast(w_diff / 2, tf.int32)
return tf.image.crop_to_bounding_box(mask, h_start, w_start, height, width)
@keras_cv_export("keras_cv.layers.GridMask")
class GridMask(BaseImageAugmentationLayer):
"""GridMask class for grid-mask augmentation.
Input shape:
Int or float tensor with values in the range [0, 255].
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
ratio_factor: A float, tuple of two floats, or `keras_cv.FactorSampler`.
Ratio determines the ratio from spacings to grid masks.
Lower values make the grid
size smaller, and higher values make the grid mask large.
Floats should be in the range [0, 1]. 0.5 indicates that grid and
spacing will be of equal size. To always use the same value, pass a
`keras_cv.ConstantFactorSampler()`.
Defaults to `(0, 0.5)`.
rotation_factor:
The rotation_factor will be used to randomly rotate the grid_mask
during training. Default to 0.1, which results in an output rotating
by a random amount in the range [-10% * 2pi, 10% * 2pi].
A float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter
clock-wise, while a negative value means clock-wise. When
represented as a single float, this value is used for both the upper
and lower bound. For instance, factor=(-0.2, 0.3) results in an
output rotation by a random amount in the range [-20% * 2pi,
30% * 2pi]. factor=0.2 results in an output rotating by a random
amount in the range [-20% * 2pi, 20% * 2pi].
fill_mode: Pixels inside the gridblock are filled according to the given
mode (one of `{"constant", "gaussian_noise"}`), defaults to
"constant".
- *constant*: Pixels are filled with the same constant value.
- *gaussian_noise*: Pixels are filled with random gaussian noise.
fill_value: an integer represents of value to be filled inside the
gridblock when `fill_mode="constant"`. Valid integer range
[0 to 255]
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_gridmask = keras_cv.layers.preprocessing.GridMask()
augmented_images = random_gridmask(images)
```
References:
- [GridMask paper](https://arxiv.org/abs/2001.04086)
"""
def __init__(
self,
ratio_factor=(0, 0.5),
rotation_factor=0.15,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.ratio_factor = preprocessing.parse_factor(
ratio_factor, param_name="ratio_factor"
)
if isinstance(rotation_factor, core.FactorSampler):
raise ValueError(
"Currently `GridMask.rotation_factor` does not support the "
"`FactorSampler` API. This will be supported in the next Keras "
"release. For now, please pass a float for the "
"`rotation_factor` argument."
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.rotation_factor = rotation_factor
self.random_rotate = cv_layers.RandomRotation(
factor=rotation_factor,
fill_mode="constant",
fill_value=0.0,
seed=seed,
)
self.auto_vectorize = False
self._check_parameter_values()
self.seed = seed
def _check_parameter_values(self):
fill_mode, fill_value = self.fill_mode, self.fill_value
if fill_value not in range(0, 256):
raise ValueError(
f"fill_value should be in the range [0, 255]. Got {fill_value}"
)
if fill_mode not in ["constant", "gaussian_noise", "random"]:
raise ValueError(
'`fill_mode` should be "constant", '
f'"gaussian_noise", or "random". Got `fill_mode`={fill_mode}'
)
def get_random_transformation(
self, image=None, label=None, bounding_boxes=None, **kwargs
):
ratio = self.ratio_factor()
# compute grid mask
input_shape = tf.shape(image)
mask = self._compute_grid_mask(input_shape, ratio=ratio)
# convert mask to single-channel image
mask = tf.cast(mask, tf.float32)
mask = tf.expand_dims(mask, axis=-1)
# randomly rotate mask
mask = self.random_rotate(mask)
# compute fill
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
fill_value = tf.cast(fill_value, dtype=self.compute_dtype)
else:
# gaussian noise
fill_value = self._random_generator.random_normal(
shape=input_shape, dtype=self.compute_dtype
)
return mask, fill_value
def _compute_grid_mask(self, input_shape, ratio):
height = tf.cast(input_shape[0], tf.float32)
width = tf.cast(input_shape[1], tf.float32)
# mask side length
input_diagonal_len = tf.sqrt(tf.square(width) + tf.square(height))
mask_side_len = tf.math.ceil(input_diagonal_len)
# grid unit size
unit_size = self._random_generator.uniform(
shape=(),
minval=tf.math.minimum(height * 0.5, width * 0.3),
maxval=tf.math.maximum(height * 0.5, width * 0.3) + 1,
dtype=tf.float32,
)
rectangle_side_len = tf.cast((ratio) * unit_size, tf.float32)
# sample x and y offset for grid units randomly between 0 and unit_size
delta_x = self._random_generator.uniform(
shape=(), minval=0.0, maxval=unit_size, dtype=tf.float32
)
delta_y = self._random_generator.uniform(
shape=(), minval=0.0, maxval=unit_size, dtype=tf.float32
)
# grid size (number of diagonal units in grid)
grid_size = mask_side_len // unit_size + 1
grid_size_range = tf.range(1, grid_size + 1)
# diagonal corner coordinates
unit_size_range = grid_size_range * unit_size
x1 = unit_size_range - delta_x
x0 = x1 - rectangle_side_len
y1 = unit_size_range - delta_y
y0 = y1 - rectangle_side_len
# compute grid coordinates
x0, y0 = tf.meshgrid(x0, y0)
x1, y1 = tf.meshgrid(x1, y1)
# flatten mesh grid
x0 = tf.reshape(x0, [-1])
y0 = tf.reshape(y0, [-1])
x1 = tf.reshape(x1, [-1])
y1 = tf.reshape(y1, [-1])
# convert coordinates to mask
corners = tf.stack([x0, y0, x1, y1], axis=-1)
mask_side_len = tf.cast(mask_side_len, tf.int32)
rectangle_masks = fill_utils.corners_to_mask(
corners, mask_shape=(mask_side_len, mask_side_len)
)
grid_mask = tf.reduce_any(rectangle_masks, axis=0)
return grid_mask
def augment_image(self, image, transformation=None, **kwargs):
mask, fill_value = transformation
input_shape = tf.shape(image)
# center crop mask
input_height = input_shape[0]
input_width = input_shape[1]
mask = _center_crop(mask, input_width, input_height)
# convert back to boolean mask
mask = tf.cast(mask, tf.bool)
return tf.where(mask, fill_value, image)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"ratio_factor": self.ratio_factor,
"rotation_factor": self.rotation_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/grid_mask.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grid_mask.py",
"repo_id": "keras-cv",
"token_count": 4219
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import layers
from keras_cv.tests.test_case import TestCase
class RandomAspectRatioTest(TestCase):
def test_train_augments_image(self):
# Checks if original and augmented images are different
input_image_shape = (8, 100, 100, 3)
image = tf.random.uniform(shape=input_image_shape)
layer = layers.RandomAspectRatio(factor=(0.9, 1.1))
output = layer(image, training=True)
self.assertNotEqual(output.shape, image.shape)
def test_grayscale(self):
# Checks if original and augmented images are different
input_image_shape = (8, 100, 100, 1)
image = tf.random.uniform(shape=input_image_shape, seed=1223)
layer = layers.RandomAspectRatio(factor=(0.9, 1.1))
output = layer(image, training=True)
self.assertEqual(output.shape[-1], 1)
def test_augment_boxes_ragged(self):
image = tf.zeros([2, 20, 20, 3])
bounding_boxes = {
"boxes": tf.ragged.constant(
[[[0.2, 0.12, 1, 1], [0, 0, 0.5, 0.73]], [[0, 0, 1, 1]]],
dtype=tf.float32,
),
"classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32),
}
input = {"images": image, "bounding_boxes": bounding_boxes}
layer = layers.RandomAspectRatio(
factor=(0.9, 1.1), bounding_box_format="rel_xywh"
)
output = layer(input, training=True)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
self.assertAllClose(bounding_boxes, output["bounding_boxes"])
| keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_aspect_ratio_test.py",
"repo_id": "keras-cv",
"token_count": 939
} | 57 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import core
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomCropAndResize")
class RandomCropAndResize(BaseImageAugmentationLayer):
"""Randomly crops a part of an image and resizes it to provided size.
This implementation takes an intuitive approach, where we crop the images to
a random height and width, and then resize them. To do this, we first sample
a random value for area using `crop_area_factor` and a value for aspect
ratio using `aspect_ratio_factor`. Further we get the new height and width
by dividing and multiplying the old height and width by the random area
respectively. We then sample offsets for height and width and clip them such
that the cropped area does not exceed image boundaries. Finally, we do the
actual cropping operation and resize the image to `target_size`.
Args:
target_size: A tuple of two integers used as the target size to
ultimately crop images to.
crop_area_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. The ratio of area of the cropped part to that
of original image is sampled using this factor. Represents the lower
and upper bounds for the area relative to the original image of the
cropped image before resizing it to `target_size`. For
self-supervised pretraining a common value for this parameter is
`(0.08, 1.0)`. For fine tuning and classification a common value for
this is `0.8, 1.0`.
aspect_ratio_factor: A tuple of two floats, ConstantFactorSampler or
UniformFactorSampler. Aspect ratio means the ratio of width to
height of the cropped image. In the context of this layer, the
aspect ratio sampled represents a value to distort the aspect ratio
by. Represents the lower and upper bound for the aspect ratio of the
cropped image before resizing it to `target_size`. For most tasks,
this should be `(3/4, 4/3)`. To perform a no-op provide the value
`(1.0, 1.0)`.
interpolation: (Optional) A string specifying the sampling method for
resizing, defaults to "bilinear".
seed: (Optional) Used to create a random seed, defaults to None.
"""
def __init__(
self,
target_size,
crop_area_factor,
aspect_ratio_factor,
interpolation="bilinear",
bounding_box_format=None,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self._check_class_arguments(
target_size, crop_area_factor, aspect_ratio_factor
)
self.target_size = target_size
self.aspect_ratio_factor = preprocessing.parse_factor(
aspect_ratio_factor,
min_value=0.0,
max_value=None,
param_name="aspect_ratio_factor",
seed=seed,
)
self.crop_area_factor = preprocessing.parse_factor(
crop_area_factor,
max_value=1.0,
param_name="crop_area_factor",
seed=seed,
)
self.interpolation = interpolation
self.seed = seed
self.bounding_box_format = bounding_box_format
self.force_output_dense_images = True
def get_random_transformation(
self, image=None, label=None, bounding_box=None, **kwargs
):
crop_area_factor = self.crop_area_factor()
aspect_ratio = self.aspect_ratio_factor()
new_height = tf.clip_by_value(
tf.sqrt(crop_area_factor / aspect_ratio), 0.0, 1.0
) # to avoid unwanted/unintuitive effects
new_width = tf.clip_by_value(
tf.sqrt(crop_area_factor * aspect_ratio), 0.0, 1.0
)
height_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, 1.0 - new_height),
maxval=tf.maximum(0.0, 1.0 - new_height),
dtype=tf.float32,
)
width_offset = self._random_generator.uniform(
(),
minval=tf.minimum(0.0, 1.0 - new_width),
maxval=tf.maximum(0.0, 1.0 - new_width),
dtype=tf.float32,
)
y1 = height_offset
y2 = height_offset + new_height
x1 = width_offset
x2 = width_offset + new_width
return [[y1, x1, y2, x2]]
def compute_image_signature(self, images):
return tf.TensorSpec(
shape=(self.target_size[0], self.target_size[1], images.shape[-1]),
dtype=self.compute_dtype,
)
def augment_image(self, image, transformation, **kwargs):
return self._crop_and_resize(image, transformation)
def augment_target(self, target, **kwargs):
return target
def _transform_bounding_boxes(bounding_boxes, transformation):
bounding_boxes = bounding_boxes.copy()
t_y1, t_x1, t_y2, t_x2 = transformation[0]
t_dx = t_x2 - t_x1
t_dy = t_y2 - t_y1
x1, y1, x2, y2 = tf.split(
bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1
)
output = tf.concat(
[
(x1 - t_x1) / t_dx,
(y1 - t_y1) / t_dy,
(x2 - t_x1) / t_dx,
(y2 - t_y1) / t_dy,
],
axis=-1,
)
bounding_boxes["boxes"] = output
return bounding_boxes
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, image=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomCropAndResize()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomCropAndResize(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=image,
)
bounding_boxes = RandomCropAndResize._transform_bounding_boxes(
bounding_boxes, transformation
)
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="rel_xyxy",
images=image,
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=image,
)
return bounding_boxes
def _resize(self, image, **kwargs):
outputs = keras.preprocessing.image.smart_resize(
image, self.target_size, **kwargs
)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
def _check_class_arguments(
self, target_size, crop_area_factor, aspect_ratio_factor
):
if (
not isinstance(target_size, (tuple, list))
or len(target_size) != 2
or not isinstance(target_size[0], int)
or not isinstance(target_size[1], int)
or isinstance(target_size, int)
):
raise ValueError(
"`target_size` must be tuple of two integers. "
f"Received target_size={target_size}"
)
if (
not isinstance(crop_area_factor, (tuple, list, core.FactorSampler))
or isinstance(crop_area_factor, float)
or isinstance(crop_area_factor, int)
):
raise ValueError(
"`crop_area_factor` must be tuple of two positive floats less "
"than or equal to 1 or keras_cv.core.FactorSampler instance. "
f"Received crop_area_factor={crop_area_factor}"
)
if (
not isinstance(
aspect_ratio_factor, (tuple, list, core.FactorSampler)
)
or isinstance(aspect_ratio_factor, float)
or isinstance(aspect_ratio_factor, int)
):
raise ValueError(
"`aspect_ratio_factor` must be tuple of two positive floats or "
"keras_cv.core.FactorSampler instance. Received "
f"aspect_ratio_factor={aspect_ratio_factor}"
)
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return self._crop_and_resize(
segmentation_mask, transformation, method="nearest"
)
def get_config(self):
config = super().get_config()
config.update(
{
"target_size": self.target_size,
"crop_area_factor": self.crop_area_factor,
"aspect_ratio_factor": self.aspect_ratio_factor,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
if isinstance(config["crop_area_factor"], dict):
config["crop_area_factor"] = keras.utils.deserialize_keras_object(
config["crop_area_factor"]
)
if isinstance(config["aspect_ratio_factor"], dict):
config["aspect_ratio_factor"] = (
keras.utils.deserialize_keras_object(
config["aspect_ratio_factor"]
)
)
return cls(**config)
def _crop_and_resize(self, image, transformation, method=None):
image = tf.expand_dims(image, axis=0)
boxes = transformation
# See bit.ly/tf_crop_resize for more details
augmented_image = tf.image.crop_and_resize(
image, # image shape: [B, H, W, C]
boxes, # boxes: (1, 4) in this case; represents area
# to be cropped from the original image
[0], # box_indices: maps boxes to images along batch axis
# [0] since there is only one image
self.target_size, # output size
method=method or self.interpolation,
)
return tf.squeeze(augmented_image, axis=0)
| keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize.py",
"repo_id": "keras-cv",
"token_count": 5099
} | 58 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv import core
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomSaturation(BaseImageAugmentationLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the saturation of the input.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image saturation is impacted. `factor=0.5` makes this layer perform
a no-op operation. `factor=0.0` makes the image to be fully
grayscale. `factor=1.0` makes the image to be fully saturated.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
min_value=0.0,
max_value=1.0,
)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor()
def augment_image(self, image, transformation=None, **kwargs):
# Convert the factor range from [0, 1] to [0, +inf]. Note that the
# tf.image.adjust_saturation is trying to apply the following math
# formula `output_saturation = input_saturation * factor`. We use the
# following method to the do the mapping.
# `y = x / (1 - x)`.
# This will ensure:
# y = +inf when x = 1 (full saturation)
# y = 1 when x = 0.5 (no augmentation)
# y = 0 when x = 0 (full gray scale)
# Convert the transformation to tensor in case it is a float. When
# transformation is 1.0, then it will result in to divide by zero error,
# but it will be handled correctly when it is a one tensor.
transformation = tf.convert_to_tensor(transformation)
adjust_factor = transformation / (1 - transformation)
return tf.image.adjust_saturation(
image, saturation_factor=adjust_factor
)
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
class RandomSaturationTest(TestCase):
def test_preserves_output_shape(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
output = layer(image)
self.assertEqual(image.shape, output.shape)
self.assertNotAllClose(image, output)
def test_no_adjustment_for_factor_point_five(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(0.5, 0.5))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjust_to_grayscale(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(0.0, 0.0))
output = ops.convert_to_numpy(layer(image))
channel_mean = np.mean(output, axis=-1)
channel_values = tf.unstack(output, axis=-1)
# Make sure all the pixel has the same value among the channel dim,
# which is a fully gray RGB.
for channel_value in channel_values:
self.assertAllClose(
channel_mean, channel_value, atol=1e-5, rtol=1e-5
)
def test_adjust_to_full_saturation(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=(1.0, 1.0))
output = ops.convert_to_numpy(layer(image))
channel_mean = np.min(output, axis=-1)
# Make sure at least one of the channel is 0.0 (fully saturated image)
self.assertAllClose(channel_mean, np.zeros((4, 8, 8)))
def test_adjustment_for_non_rgb_value_range(self):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = tf.random.uniform(shape=image_shape) * 100.0
layer = preprocessing.RandomSaturation(factor=(0.5, 0.5))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
output = layer(image)
self.assertNotAllClose(image, output)
def test_with_unit8(self):
image_shape = (4, 8, 8, 3)
image = tf.cast(
tf.random.uniform(shape=image_shape) * 255.0, dtype=tf.uint8
)
layer = preprocessing.RandomSaturation(factor=(0.5, 0.5))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
output = layer(image)
self.assertNotAllClose(image, output)
def test_config(self):
layer = preprocessing.RandomSaturation(factor=(0.3, 0.8))
config = layer.get_config()
self.assertTrue(isinstance(config["factor"], core.UniformFactorSampler))
self.assertEqual(config["factor"].get_config()["lower"], 0.3)
self.assertEqual(config["factor"].get_config()["upper"], 0.8)
def test_correctness_with_tf_adjust_saturation_normalized_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape)
layer = preprocessing.RandomSaturation(factor=fixed_factor)
old_layer = OldRandomSaturation(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-5, rtol=1e-5)
def test_correctness_with_tf_adjust_saturation_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomSaturation(factor=fixed_factor)
old_layer = OldRandomSaturation(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-3, rtol=1e-5)
| keras-cv/keras_cv/layers/preprocessing/random_saturation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_saturation_test.py",
"repo_id": "keras-cv",
"token_count": 3401
} | 59 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.layers.preprocessing.solarization import Solarization
from keras_cv.tests.test_case import TestCase
class SolarizationTest(TestCase):
@parameterized.named_parameters(
("0_255", 0, 255),
("64_191", 64, 191),
("127_128", 127, 128),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_output_values(self, input_value, expected_value):
solarization = Solarization(value_range=(0, 255))
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=expected_value,
dtype="uint8",
)
@parameterized.named_parameters(
("0_245", 0, 245),
("255_0", 255, 0),
)
def test_solarization_with_addition(self, input_value, output_value):
solarization = Solarization(
addition_factor=(10.0, 10.0), value_range=(0, 255)
)
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=output_value,
dtype="float32",
)
@parameterized.named_parameters(
("0_0", 0, 0),
("64_64", 64, 64),
("127_127", 127, 127),
("191_64", 191, 64),
("255_0", 255, 0),
)
def test_only_values_above_threshold_are_solarized(
self, input_value, output_value
):
solarization = Solarization(
threshold_factor=(128, 128), value_range=(0, 255)
)
self._test_input_output(
layer=solarization,
input_value=input_value,
expected_value=output_value,
dtype="uint8",
)
def _test_input_output(self, layer, input_value, expected_value, dtype):
input = np.ones(shape=(2, 224, 224, 3), dtype=dtype) * input_value
expected_output = tf.clip_by_value(
(
np.ones(shape=(2, 224, 224, 3), dtype=layer.compute_dtype)
* expected_value
),
0,
255,
)
output = layer(input)
self.assertAllClose(output, expected_output)
def test_random_augmentation_applied_per_sample(self):
image = tf.random.uniform((16, 16, 3), minval=0, maxval=255)
images = tf.stack([image, image])
layer = Solarization(
value_range=(0, 255), threshold_factor=127, addition_factor=127
)
outputs = layer(images)
self.assertNotAllEqual(outputs[0], outputs[1])
| keras-cv/keras_cv/layers/preprocessing/solarization_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/solarization_test.py",
"repo_id": "keras-cv",
"token_count": 1405
} | 60 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import random
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomDroppingPoints")
class GlobalRandomDroppingPoints(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which randomly drops point during training.
This layer will randomly drop points based on keep_probability.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
drop_rate: A float scalar sets the probability threshold for dropping the
points.
exclude_classes: An optional int scalar or a list of ints. Points with the
specified class(es) will not be dropped.
"""
def __init__(self, drop_rate=None, exclude_classes=None, **kwargs):
super().__init__(**kwargs)
drop_rate = drop_rate if drop_rate else 0.0
if not isinstance(exclude_classes, (tuple, list)):
exclude_classes = [exclude_classes]
if drop_rate > 1:
raise ValueError("drop_rate must be <=1.")
keep_probability = 1 - drop_rate
self._keep_probability = keep_probability
self._exclude_classes = exclude_classes
def get_config(self):
return {
"drop_rate": 1 - self._keep_probability,
"exclude_classes": self._exclude_classes,
}
def get_random_transformation(self, point_clouds, **kwargs):
num_points = point_clouds.get_shape().as_list()[-2]
# Generate mask along point dimension.
random_point_mask = (
random.uniform([1, num_points, 1], minval=0.0, maxval=1)
< self._keep_probability
)
return {"point_mask": random_point_mask}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_mask = transformation["point_mask"]
# Do not add noise to points that are protected by setting the
# corresponding point_noise = 1.0.
protected_points = tf.zeros_like(point_clouds[0, :, -1], dtype=tf.bool)
for excluded_class in self._exclude_classes:
protected_points |= point_clouds[0, :, -1] == excluded_class
point_mask = tf.where(
protected_points[tf.newaxis, :, tf.newaxis], True, point_mask
)
point_clouds = tf.where(point_mask, point_clouds, 0.0)
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points.py",
"repo_id": "keras-cv",
"token_count": 1365
} | 61 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import is_within_any_box3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
ADDITIONAL_POINT_CLOUDS = base_augmentation_layer_3d.ADDITIONAL_POINT_CLOUDS
ADDITIONAL_BOUNDING_BOXES = base_augmentation_layer_3d.ADDITIONAL_BOUNDING_BOXES
POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX
@keras_cv_export("keras_cv.layers.SwapBackground")
class SwapBackground(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which swaps the backgrounds of two scenes during
training.
This layer will extract object point clouds and bounding boxes from an
additional scene and paste it on to the training scene while removing the
objects in the training scene. First, removing all the objects point clouds
and bounding boxes in the training scene. Second, extracting object point
clouds and bounding boxes from an additional scene. Third, removing
backgrounds points clouds in the training scene that overlap with the
additional object bounding boxes. Last, pasting the additional object point
clouds and bounding boxes to the training background scene.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A tuple of two Tensors (point_clouds, bounding_boxes) with the same shape
as input Tensors.
"""
def __init__(self, **kwargs):
# TODO(ianstenbit): Support the model input format.
super().__init__(**kwargs)
self.auto_vectorize = False
def get_config(self):
return {}
def get_random_transformation(
self,
point_clouds,
bounding_boxes,
additional_point_clouds,
additional_bounding_boxes,
**kwargs
):
# Use the current frame bounding boxes to determine valid bounding
# boxes.
bounding_boxes = tf.boolean_mask(
bounding_boxes,
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0,
axis=1,
)
additional_bounding_boxes = tf.boolean_mask(
additional_bounding_boxes,
additional_bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0,
axis=1,
)
# Remove objects in point_clouds.
objects_points_in_point_clouds = is_within_any_box3d(
point_clouds[..., :3],
bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.CLASS],
keepdims=True,
)
point_clouds = tf.where(
~objects_points_in_point_clouds, point_clouds, 0.0
)
# Extract objects from additional_point_clouds.
objects_points_in_additional_point_clouds = is_within_any_box3d(
additional_point_clouds[..., :3],
additional_bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.CLASS],
keepdims=True,
)
additional_point_clouds = tf.where(
objects_points_in_additional_point_clouds,
additional_point_clouds,
0.0,
)
# Remove background points in point_clouds overlaps with
# additional_bounding_boxes.
points_overlaps_additional_bounding_boxes = is_within_any_box3d(
point_clouds[..., :3],
additional_bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.CLASS],
keepdims=True,
)
point_clouds = tf.where(
~points_overlaps_additional_bounding_boxes, point_clouds, 0.0
)
return {
POINT_CLOUDS: point_clouds,
ADDITIONAL_POINT_CLOUDS: additional_point_clouds,
ADDITIONAL_BOUNDING_BOXES: additional_bounding_boxes,
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
original_bounding_boxes_shape = bounding_boxes.get_shape().as_list()
original_point_clouds_shape = point_clouds.get_shape().as_list()
point_clouds = transformation[POINT_CLOUDS]
additional_point_clouds = transformation[ADDITIONAL_POINT_CLOUDS]
num_frames = original_point_clouds_shape[0]
point_clouds_list = []
for frame_index in range(num_frames):
background_point_clouds = tf.boolean_mask(
point_clouds[frame_index],
point_clouds[frame_index, :, POINTCLOUD_LABEL_INDEX] > 0,
axis=0,
)
object_point_clouds = tf.boolean_mask(
additional_point_clouds[frame_index],
additional_point_clouds[frame_index, :, POINTCLOUD_LABEL_INDEX]
> 0,
axis=0,
)
point_clouds_list += [
tf.concat(
[object_point_clouds, background_point_clouds], axis=0
)
]
point_clouds = tf.ragged.stack(point_clouds_list)
bounding_boxes = tf.RaggedTensor.from_tensor(
transformation[ADDITIONAL_BOUNDING_BOXES]
)
return (
point_clouds.to_tensor(shape=original_point_clouds_shape),
bounding_boxes.to_tensor(shape=original_bounding_boxes_shape),
)
def _augment(self, inputs):
result = inputs
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
additional_point_clouds = inputs[ADDITIONAL_POINT_CLOUDS]
additional_bounding_boxes = inputs[ADDITIONAL_BOUNDING_BOXES]
transformation = self.get_random_transformation(
point_clouds=point_clouds,
bounding_boxes=bounding_boxes,
additional_point_clouds=additional_point_clouds,
additional_bounding_boxes=additional_bounding_boxes,
)
point_clouds, bounding_boxes = self.augment_point_clouds_bounding_boxes(
point_clouds,
bounding_boxes=bounding_boxes,
transformation=transformation,
)
result.update(
{POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
)
return result
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background.py",
"repo_id": "keras-cv",
"token_count": 3139
} | 62 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers import TransformerEncoder
from keras_cv.tests.test_case import TestCase
class TransformerEncoderTest(TestCase):
def test_return_type_and_shape(self):
layer = TransformerEncoder(project_dim=128, num_heads=2, mlp_dim=128)
inputs = tf.random.normal([1, 197, 128])
output = layer(inputs, training=True)
self.assertTrue(isinstance(output, tf.Tensor))
self.assertLen(output, 1)
self.assertEquals(output.shape, [1, 197, 128])
def test_wrong_input_dims(self):
layer = TransformerEncoder(project_dim=128, num_heads=2, mlp_dim=128)
# Input dims must equal output dims because of the addition
# of the residual to the final layer
inputs = tf.random.normal([1, 197, 256])
with self.assertRaisesRegexp(
ValueError,
"The input and output dimensionality must be the same, but the "
"TransformerEncoder was provided with 256 and 128",
):
layer(inputs, training=True)
def test_wrong_project_dims(self):
layer = TransformerEncoder(project_dim=256, num_heads=2, mlp_dim=128)
# Input dims must equal output dims because of the addition
# of the residual to the final layer
inputs = tf.random.normal([1, 197, 128])
with self.assertRaisesRegexp(
ValueError,
"The input and output dimensionality must be the same, but the "
"TransformerEncoder was provided with 128 and 256",
):
layer(inputs, training=True)
| keras-cv/keras_cv/layers/transformer_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/transformer_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 804
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from tensorflow import keras
from keras_cv.backend import ops
from keras_cv.losses import FocalLoss
from keras_cv.tests.test_case import TestCase
class ModelGardenFocalLoss(keras.losses.Loss):
def __init__(
self, alpha, gamma, reduction=keras.losses.Reduction.AUTO, name=None
):
self._alpha = alpha
self._gamma = gamma
super().__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
with tf.name_scope("focal_loss"):
y_true = tf.cast(ops.convert_to_numpy(y_true), dtype=tf.float32)
y_pred = tf.cast(ops.convert_to_numpy(y_pred), dtype=tf.float32)
positive_label_mask = tf.equal(y_true, 1.0)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y_pred
)
probs = tf.sigmoid(y_pred)
probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
# With small gamma, the implementation could produce NaN during back
# prop.
modulator = tf.pow(1.0 - probs_gt, self._gamma)
loss = modulator * cross_entropy
weighted_loss = tf.where(
positive_label_mask,
self._alpha * loss,
(1.0 - self._alpha) * loss,
)
return weighted_loss
class FocalLossModelGardenComparisonTest(TestCase):
@parameterized.named_parameters(
("sum", "sum"),
)
def test_model_garden_implementation_has_same_outputs(self, reduction):
focal_loss = FocalLoss(
alpha=0.25, gamma=2.0, from_logits=False, reduction=reduction
)
model_garden_focal_loss = ModelGardenFocalLoss(
alpha=0.25, gamma=2.0, reduction=reduction
)
for _ in range(10):
y_true = np.random.randint(size=(200,), low=0, high=10)
y_true = tf.one_hot(y_true, depth=10)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.random.uniform((200, 10), dtype=tf.float32)
self.assertAllClose(
focal_loss(y_true, tf.sigmoid(y_pred)),
model_garden_focal_loss(y_true, y_pred),
)
| keras-cv/keras_cv/losses/numerical_tests/focal_loss_numerical_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/numerical_tests/focal_loss_numerical_test.py",
"repo_id": "keras-cv",
"token_count": 1285
} | 64 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2SBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone import (
EfficientNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
@pytest.mark.extra_large
class EfficientNetV2PresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This every presets for EfficientNetV2 and is only run manually.
Run with:
`pytest keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets_test.py --run_extra_large`
""" # noqa: E501
@parameterized.named_parameters(
*[(preset, preset) for preset in EfficientNetV2Backbone.presets]
)
def test_load_efficientnet(self, preset):
input_data = np.ones(shape=(2, 224, 224, 3))
model = EfficientNetV2Backbone.from_preset(preset)
model(input_data)
def test_efficientnet_feature_extractor(self):
model = EfficientNetV2SBackbone(
include_rescaling=False,
input_shape=[256, 256, 3],
)
levels = ["P3", "P4"]
layer_names = [model.pyramid_level_inputs[level] for level in levels]
backbone_model = get_feature_extractor(model, layer_names, levels)
inputs = keras.Input(shape=[256, 256, 3])
outputs = backbone_model(inputs)
self.assertLen(outputs, 2)
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(outputs["P3"].shape[:3], (None, 32, 32))
self.assertEquals(outputs["P4"].shape[:3], (None, 16, 16))
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 870
} | 65 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet backbone model.
Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
(CVPR 2015)
- [Based on the original keras.applications ResNet](https://github.com/keras-team/keras/blob/master/keras/applications/resnet.py) # noqa: E501
"""
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone_presets import (
backbone_presets,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
BN_AXIS = 3
BN_EPSILON = 1.001e-5
@keras_cv_export("keras_cv.models.ResNetBackbone")
class ResNetBackbone(Backbone):
"""Instantiates the ResNet architecture.
Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
The difference in ResNetV1 and ResNetV2 rests in the structure of their
individual building blocks. In ResNetV2, the batch normalization and
ReLU activation precede the convolution layers, as opposed to ResNetV1 where
the batch normalization and ReLU activation are applied after the
convolution layers.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_filters: list of ints, number of filters for each stack in
the model.
stackwise_blocks: list of ints, number of blocks for each stack in the
model.
stackwise_strides: list of ints, stride for each stack in the model.
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
block_type: string, one of "basic_block" or "block". The block type to
stack. Use "basic_block" for ResNet18 and ResNet34.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained backbone
model = keras_cv.models.ResNetBackbone.from_preset("resnet50_imagenet")
output = model(input_data)
# Randomly initialized backbone with a custom config
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
*,
stackwise_filters,
stackwise_blocks,
stackwise_strides,
include_rescaling,
input_shape=(None, None, 3),
input_tensor=None,
block_type="block",
**kwargs,
):
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
x = keras.layers.Conv2D(
64, 7, strides=2, use_bias=False, padding="same", name="conv1_conv"
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name="conv1_bn"
)(x)
x = keras.layers.Activation("relu", name="conv1_relu")(x)
x = keras.layers.MaxPooling2D(
3, strides=2, padding="same", name="pool1_pool"
)(x)
num_stacks = len(stackwise_filters)
pyramid_level_inputs = {}
for stack_index in range(num_stacks):
x = apply_stack(
x,
filters=stackwise_filters[stack_index],
blocks=stackwise_blocks[stack_index],
stride=stackwise_strides[stack_index],
block_type=block_type,
first_shortcut=(block_type == "block" or stack_index > 0),
name=f"v2_stack_{stack_index}",
)
pyramid_level_inputs[f"P{stack_index + 2}"] = (
utils.get_tensor_input_name(x)
)
# Create model.
super().__init__(inputs=inputs, outputs=x, **kwargs)
# All references to `self` below this line
self.pyramid_level_inputs = pyramid_level_inputs
self.stackwise_filters = stackwise_filters
self.stackwise_blocks = stackwise_blocks
self.stackwise_strides = stackwise_strides
self.include_rescaling = include_rescaling
self.input_tensor = input_tensor
self.block_type = block_type
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_filters": self.stackwise_filters,
"stackwise_blocks": self.stackwise_blocks,
"stackwise_strides": self.stackwise_strides,
"include_rescaling": self.include_rescaling,
# Remove batch dimension from `input_shape`
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"block_type": self.block_type,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
def apply_basic_block(
x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None
):
"""A basic residual block (v1).
Args:
x: input tensor.
filters: int, filters of the basic layer.
kernel_size: int, kernel size of the bottleneck layer, defaults to 3.
stride: int, stride of the first layer, defaults to 1.
conv_shortcut: bool, uses convolution shortcut if `True`. If `False`
(default), uses identity or pooling shortcut, based on stride.
name: string, optional prefix for the layer names used in the block.
Returns:
Output tensor for the residual block.
"""
if name is None:
name = f"v1_basic_block_{keras.backend.get_uid('v1_basic_block_')}"
if conv_shortcut:
shortcut = keras.layers.Conv2D(
filters,
1,
strides=stride,
use_bias=False,
name=name + "_0_conv",
)(x)
shortcut = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_0_bn"
)(shortcut)
else:
shortcut = x
x = keras.layers.Conv2D(
filters,
kernel_size,
padding="SAME",
strides=stride,
use_bias=False,
name=name + "_1_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
x = keras.layers.Conv2D(
filters,
kernel_size,
padding="SAME",
use_bias=False,
name=name + "_2_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn"
)(x)
x = keras.layers.Add(name=name + "_add")([shortcut, x])
x = keras.layers.Activation("relu", name=name + "_out")(x)
return x
def apply_block(
x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None
):
"""A residual block (v1).
Args:
x: input tensor.
filters: int, filters of the basic layer.
kernel_size: int, kernel size of the bottleneck layer, defaults to 3.
stride: int, stride of the first layer, defaults to 1.
conv_shortcut: bool, uses convolution shortcut if `True`. If `False`
(default), uses identity or pooling shortcut, based on stride.
name: string, optional prefix for the layer names used in the block.
Returns:
Output tensor for the residual block.
"""
if name is None:
name = f"v1_block_{keras.backend.get_uid('v1_block')}"
if conv_shortcut:
shortcut = keras.layers.Conv2D(
4 * filters,
1,
strides=stride,
use_bias=False,
name=name + "_0_conv",
)(x)
shortcut = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_0_bn"
)(shortcut)
else:
shortcut = x
x = keras.layers.Conv2D(
filters, 1, strides=stride, use_bias=False, name=name + "_1_conv"
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_1_relu")(x)
x = keras.layers.Conv2D(
filters,
kernel_size,
padding="SAME",
use_bias=False,
name=name + "_2_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn"
)(x)
x = keras.layers.Activation("relu", name=name + "_2_relu")(x)
x = keras.layers.Conv2D(
4 * filters, 1, use_bias=False, name=name + "_3_conv"
)(x)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_3_bn"
)(x)
x = keras.layers.Add(name=name + "_add")([shortcut, x])
x = keras.layers.Activation("relu", name=name + "_out")(x)
return x
def apply_stack(
x,
filters,
blocks,
stride=2,
name=None,
block_type="block",
first_shortcut=True,
):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: int, filters of the layer in a block.
blocks: int, blocks in the stacked blocks.
stride: int, stride of the first layer in the first block, defaults to
2.
name: string, optional prefix for the layer names used in the block.
block_type: string, one of "basic_block" or "block". The block type to
stack. Use "basic_block" for ResNet18 and ResNet34.
first_shortcut: bool. Use convolution shortcut if `True` (default),
otherwise uses identity or pooling shortcut, based on stride.
Returns:
Output tensor for the stacked blocks.
"""
if name is None:
name = "v1_stack"
if block_type == "basic_block":
block_fn = apply_basic_block
elif block_type == "block":
block_fn = apply_block
else:
raise ValueError(
"""`block_type` must be either "basic_block" or "block". """
f"Received block_type={block_type}."
)
x = block_fn(
x,
filters,
stride=stride,
name=name + "_block1",
conv_shortcut=first_shortcut,
)
for i in range(2, blocks + 1):
x = block_fn(
x, filters, conv_shortcut=False, name=name + "_block" + str(i)
)
return x
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone.py",
"repo_id": "keras-cv",
"token_count": 5269
} | 66 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import pathlib
import numpy as np
import pytest
from keras_cv.backend import ops
from keras_cv.models.backbones.vit_det.vit_det_aliases import ViTDetBBackbone
from keras_cv.models.backbones.vit_det.vit_det_backbone import ViTDetBackbone
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class ViTDetPresetSmokeTest(TestCase):
"""
A smoke test for ViTDet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/detectron2/detectron2_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = np.ones(shape=(1, 1024, 1024, 3))
def test_backbone_output(self):
model = ViTDetBackbone.from_preset("vitdet_base_sa1b")
outputs = model(self.input_batch)
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
expected = np.load(
pathlib.Path(__file__).parent / "data" / "vitdet_base_out.npz"
)
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs),
expected,
atol=1e-5,
rtol=1e-5,
)
def test_applications_model_output(self):
model = ViTDetBBackbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = ViTDetBackbone.from_preset("vitdet_base")
model(self.input_batch)
def test_applications_model_predict(self):
model = ViTDetBBackbone()
# Test that the model XLA compiles
model.predict(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in ViTDetBackbone.presets:
self.assertRegex(ViTDetBackbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
ViTDetBackbone.from_preset("vitdet_nonexistant")
@pytest.mark.extra_large
class ViTDetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for ViTDet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/detectron2/detectron2_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_ViTDet(self):
input_data = np.ones(shape=(1, 1024, 1024, 3))
for preset in ViTDetBackbone.presets:
model = ViTDetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1354
} | 67 |
# Legacy folder
These are models that we intend to migrate to the unified KerasNLP/KerasCV API
but have not yet had the opportunity. Units tests in this folder are run on
every PR.
Do not use legacy models unless they fill a short term need and you are
comfortable moving to the new API once they are migrated. Anything which we
decide to never migrate will be deleted. | keras-cv/keras_cv/models/legacy/README.md/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/README.md",
"repo_id": "keras-cv",
"token_count": 91
} | 68 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
from keras_cv.models.legacy import regnet
from keras_cv.tests.test_case import TestCase
from .models_test import ModelsTest
MODEL_LIST = [
(regnet.RegNetX002, 368, {}),
]
"""
Below are other configurations that we omit from our CI but that can/should
be tested manually when making changes to this model.
(regnet.RegNetX004, 384, {}),
(regnet.RegNetX006, 528, {}),
(regnet.RegNetX008, 672, {}),
(regnet.RegNetX016, 912, {}),
(regnet.RegNetX032, 1008, {}),
(regnet.RegNetX040, 1360, {}),
(regnet.RegNetX064, 1624, {}),
(regnet.RegNetX080, 1920, {}),
(regnet.RegNetX120, 2240, {}),
(regnet.RegNetX160, 2048, {}),
(regnet.RegNetX320, 2520, {}),
"""
class RegNetXTest(ModelsTest, TestCase):
@parameterized.parameters(*MODEL_LIST)
def test_application_base(self, app, _, args):
super()._test_application_base(app, _, args)
@parameterized.parameters(*MODEL_LIST)
def test_application_with_rescaling(self, app, last_dim, args):
super()._test_application_with_rescaling(app, last_dim, args)
@parameterized.parameters(*MODEL_LIST)
def test_application_pooling(self, app, last_dim, args):
super()._test_application_pooling(app, last_dim, args)
@parameterized.parameters(*MODEL_LIST)
def test_application_variable_input_channels(self, app, last_dim, args):
super()._test_application_variable_input_channels(app, last_dim, args)
@parameterized.parameters(*MODEL_LIST)
def test_model_can_be_used_as_backbone(self, app, last_dim, args):
super()._test_model_can_be_used_as_backbone(app, last_dim, args)
| keras-cv/keras_cv/models/legacy/regnetx_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/regnetx_test.py",
"repo_id": "keras-cv",
"token_count": 787
} | 69 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export(
"keras_cv.models.retinanet.FeaturePyramid",
package="keras_cv.models.retinanet",
)
class FeaturePyramid(keras.layers.Layer):
"""Builds the Feature Pyramid with the feature maps from the backbone."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.conv_c7_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, inputs, training=False):
if isinstance(inputs, dict):
c3_output = inputs["P3"]
c4_output = inputs["P4"]
c5_output = inputs["P5"]
else:
c3_output, c4_output, c5_output = inputs
p3_output = self.conv_c3_1x1(c3_output, training=training)
p4_output = self.conv_c4_1x1(c4_output, training=training)
p5_output = self.conv_c5_1x1(c5_output, training=training)
p4_output = p4_output + self.upsample_2x(p5_output, training=training)
p3_output = p3_output + self.upsample_2x(p4_output, training=training)
p3_output = self.conv_c3_3x3(p3_output, training=training)
p4_output = self.conv_c4_3x3(p4_output, training=training)
p5_output = self.conv_c5_3x3(p5_output, training=training)
p6_output = self.conv_c6_3x3(c5_output, training=training)
p7_output = self.conv_c7_3x3(ops.relu(p6_output), training=training)
return p3_output, p4_output, p5_output, p6_output, p7_output
def build(self, input_shape):
p3_channels = input_shape["P3"][-1]
p4_channels = input_shape["P4"][-1]
p5_channels = input_shape["P5"][-1]
self.conv_c3_1x1.build((None, None, None, p3_channels))
self.conv_c4_1x1.build((None, None, None, p4_channels))
self.conv_c5_1x1.build((None, None, None, p5_channels))
self.conv_c3_3x3.build((None, None, None, 256))
self.conv_c4_3x3.build((None, None, None, 256))
self.conv_c5_3x3.build((None, None, None, 256))
self.conv_c6_3x3.build((None, None, None, p5_channels))
self.conv_c7_3x3.build((None, None, None, 256))
self.built = True
def compute_output_shape(self, input_shape):
p3_shape = input_shape["P3"][:-1]
p4_shape = input_shape["P4"][:-1]
p5_shape = input_shape["P5"][:-1]
return (
(tuple(p3_shape) + (256,)),
(tuple(p4_shape) + (256,)),
(tuple(p5_shape) + (256,)),
(tuple(p5_shape) + (256,)),
(tuple(p5_shape) + (256,)),
)
| keras-cv/keras_cv/models/object_detection/retinanet/feature_pyramid.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/feature_pyramid.py",
"repo_id": "keras-cv",
"token_count": 1746
} | 70 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import tensorflow as tf
from tensorflow import keras
class BinaryCrossentropy(keras.losses.Loss):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss for binary (0 or 1) classification applications.
This loss is updated for YoloX by offering support for no axis to mean over.
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` contains probabilities (i.e., values in [0,
1]).
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When >
0, we compute the loss between the predicted labels and a smoothed
version of the true labels, where the smoothing squeezes the labels
towards 0.5. Larger values of `label_smoothing` correspond to
heavier smoothing.
axis: the axis along which to mean the ious. Defaults to `no_reduction`
which implies mean across no axes.
Usage:
```python
model.compile(
loss=BinaryCrossentropy(from_logits=True)
....
)
```
"""
def __init__(
self, from_logits=False, label_smoothing=0.0, axis=None, **kwargs
):
super().__init__(**kwargs)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def call(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
label_smoothing = tf.convert_to_tensor(
self.label_smoothing, dtype=y_pred.dtype
)
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = tf.__internal__.smart_cond.smart_cond(
label_smoothing, _smooth_labels, lambda: y_true
)
if self.axis == "no_reduction":
warnings.warn(
"`axis='no_reduction'` is a temporary API, and the API"
"contract will be replaced in the future with a more generic "
"solution covering all losses."
)
return tf.reduce_mean(
keras.backend.binary_crossentropy(
y_true, y_pred, from_logits=self.from_logits
),
axis=self.axis,
)
return keras.backend.binary_crossentropy(
y_true, y_pred, from_logits=self.from_logits
)
def get_config(self):
config = super().get_config()
config.update(
{
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
)
return config
| keras-cv/keras_cv/models/object_detection/yolox/binary_crossentropy.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/binary_crossentropy.py",
"repo_id": "keras-cv",
"token_count": 1481
} | 71 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.segmentation.segment_anything.sam_presets import (
sam_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
@keras_cv_export(
[
"keras_cv.models.SegmentAnythingModel",
"keras_cv.models.segmentation.SegmentAnythingModel",
],
package="keras_cv.models",
)
class SegmentAnythingModel(Task):
"""
The Segment Anything (SAM) Model.
Args:
backbone (keras_cv.models.Backbone): A feature extractor for the input
images.
prompt_encoder (keras_cv.models.SAMPromptEncoder): A Keras layer to
compute embeddings for points, box, and mask prompt.
mask_decoder (keras_cv.models.SAMMaskDecoder): A Keras layer to
generate segmentation masks given the embeddings generated by the
backbone and the prompt encoder.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
Examples:
>>> import numpy as np
>>> from keras_cv.models import ViTDetBBackbone
>>> from keras_cv.models import SAMPromptEncoder
>>> from keras_cv.models import SAMMaskDecoder
Create all the components of the SAM model:
>>> backbone = ViTDetBBackbone()
>>> prompt_encoder = SAMPromptEncoder()
>>> mask_decoder = SAMMaskDecoder()
Instantiate the model:
>>> sam = SegmentAnythingModel(
... backbone=backbone,
... prompt_encoder=prompt_encoder,
... mask_decoder=mask_decoder
... )
Define the input of the backbone. This must be a batch of images of shape
`(1024, 1024, 3)` for the ViT backbone we are using:
>>> image = np.ones((1, 1024, 1024, 3))
SAM works by prompting the input images. There are three ways to prompt:
(1) Labelled Points: Foreground points (points with label 1) are encoded
such that the output masks generated by the mask decoder contain them
and background points (points with label 0) are encoded such that the
generated masks don't contain them.
(2) Box: A box tells the model which part/crop of the image to segment.
(3) Mask: An input mask can be used to refine the output of the mask
decoder.
These prompts can be mixed and matched but at least one of the prompts
must be present. To turn off a particular prompt, simply exclude it from
the inputs to the model.
# TODO(ianstenbit): Remove the need for the `1` axes, and fix the box shape.
(1) For points prompts, the expected shape is `(batch, num_points, 2)`.
The labels must have a corresponding shape of `(batch, num_points)`.
(2) For box prompt, the expected shape is `(batch, 1, 2, 2)`.
(3) Similarly, mask prompts have shape `(batch, 1, H, W, 1)`.
For example, to pass in all the prompts, do:
>>> points = np.array([[[512., 512.], [100., 100.]]])
>>> # For labels: 1 means foreground point, 0 means background
>>> labels = np.array([[1., 0.]])
>>> box = np.array([[[[384., 384.], [640., 640.]]]])
>>> input_mask = np.ones((1, 1, 256, 256, 1))
Prepare an input dictionary:
>>> inputs = {
... "images": image,
... "points": points,
... "labels": labels,
... "boxes": box,
... "masks": input_mask
... }
...
>>> outputs = sam.predict(inputs)
>>> masks, iou_pred = outputs["masks"], outputs["iou_pred"]
The first mask in the output `masks` (i.e. `masks[:, 0, ...]`) is the best
mask predicted by the model based on the prompts. Other `masks`
(i.e. `masks[:, 1:, ...]`) are alternate predictions that can be used if
they are desired over the first one.
Now, in case of only points and box prompts, simply exclude the masks:
>>> inputs = {
... "images": image,
... "points": points,
... "labels": labels,
... "boxes": box,
... }
...
>>> outputs = sam.predict(inputs)
>>> masks, iou_pred = outputs["masks"], outputs["iou_pred"]
# TODO(ianstenbit): Remove the need for this padding.
Another example is that only points prompts are present.
Note that if point prompts are present but no box prompt is present, the
points must be padded using a zero point and -1 label:
>>> padded_points = np.concatenate(
... [points, np.zeros((1, 1, 2))], axis=1
... )
...
>>> padded_labels = np.concatenate(
... [labels, -np.ones((1, 1))], axis=1
... )
>>> inputs = {
... "images": image,
... "points": padded_points,
... "labels": padded_labels,
... }
...
>>> outputs = sam.predict(inputs)
>>> masks, iou_pred = outputs["masks"], outputs["iou_pred"]
Note that the segment anything model only supports inference and training
isn't support yet. So, calling the `fit` method will fail for now.
""" # noqa: E501
def __init__(self, *, backbone, prompt_encoder, mask_decoder, **kwargs):
# Get the image encoder input -- Images
backbone_input = backbone.input
# Define the prompt encoder inputs -- Prompts
prompt_inputs = {
"points": keras.Input(shape=[None, 2], name="points"),
"labels": keras.Input(shape=[None], name="labels"),
"boxes": keras.Input(shape=[None, 2, 2], name="boxes"),
"masks": keras.Input(shape=[None, None, None, 1], name="masks"),
}
# All Inputs -- Images + Prompts
all_inputs = {"images": backbone_input}
all_inputs.update(prompt_inputs)
# Build the prompt encoder
prompt_embeddings = prompt_encoder(prompt_inputs)
# Define the mask decoder inputs
mask_decoder_inputs = {
"image_embeddings": backbone.output,
"image_pe": prompt_embeddings["dense_positional_embeddings"],
"sparse_prompt_embeddings": prompt_embeddings["sparse_embeddings"],
"dense_prompt_embeddings": prompt_embeddings["dense_embeddings"],
}
# Build the mask decoder
outputs = mask_decoder(mask_decoder_inputs)
super().__init__(inputs=all_inputs, outputs=outputs, **kwargs)
self.backbone = backbone
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
# TODO(ianstenbit): Do something more elegant to handle empty prompts.
def predict_step(self, *args, **kwargs):
if len(args) == 2:
args = (args[0], _add_placeholder_prompts(args[-1]))
else:
args = (_add_placeholder_prompts(args[0]),)
return super().predict_step(*args, **kwargs)
def fit(self, *args, **kwargs):
raise NotImplementedError(
"Segment Anything Model only supports inference for now. Training"
" the model isn't supported yet."
)
def get_config(self):
config = super().get_config()
config.update(
{
"backbone": keras.saving.serialize_keras_object(self.backbone),
"prompt_encoder": keras.saving.serialize_keras_object(
self.prompt_encoder
),
"mask_decoder": keras.saving.serialize_keras_object(
self.mask_decoder
),
}
)
return config
@classmethod
def from_config(cls, config):
config.update(
{
"prompt_encoder": keras.layers.deserialize(
config["prompt_encoder"]
),
"mask_decoder": keras.layers.deserialize(
config["mask_decoder"]
),
}
)
return super().from_config(config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy({**backbone_presets, **sam_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy({**backbone_presets_with_weights, **sam_presets})
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
def _add_placeholder_prompts(inputs):
"""Adds placeholder prompt inputs for a call to SAM.
Because SAM is a functional subclass model, all inputs must be specified in
calls to the model. However, prompt inputs are all optional, so we have to
add placeholders when they're not specified by the user.
"""
inputs = inputs.copy()
# Get the batch shape based on the image input
B = ops.shape(inputs["images"])[0]
# The type of the placeholders must match the existing inputs with respect
# to whether or not they are tensors (as opposed to Numpy arrays).
zeros = ops.zeros if ops.is_tensor(inputs["images"]) else np.zeros
# Fill in missing inputs.
if "points" not in inputs:
inputs["points"] = zeros((B, 0, 2))
if "labels" not in inputs:
inputs["labels"] = zeros((B, 0))
if "boxes" not in inputs:
inputs["boxes"] = zeros((B, 0, 2, 2))
if "masks" not in inputs:
inputs["masks"] = zeros((B, 0, 256, 256, 1))
return inputs
| keras-cv/keras_cv/models/segmentation/segment_anything/sam.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam.py",
"repo_id": "keras-cv",
"token_count": 4041
} | 72 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import ops
class TestCase(tf.test.TestCase, parameterized.TestCase):
"""Base test case class for KerasCV. (Copied from KerasNLP)."""
def assertAllClose(self, x1, x2, atol=1e-6, rtol=1e-6, msg=None):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllClose(x1, x2, atol=atol, rtol=rtol, msg=msg)
def assertAllEqual(self, x1, x2, msg=None):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllEqual(x1, x2, msg=msg)
def assertAllGreaterEqual(self, x1, x2):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllGreaterEqual(x1, x2)
def assertAllLessEqual(self, x1, x2):
x1 = tf.nest.map_structure(convert_to_numpy, x1)
x2 = tf.nest.map_structure(convert_to_numpy, x2)
super().assertAllLessEqual(x1, x2)
def convert_to_numpy(x):
if ops.is_tensor(x) and not isinstance(x, tf.RaggedTensor):
return ops.convert_to_numpy(x)
return x
| keras-cv/keras_cv/tests/test_case.py/0 | {
"file_path": "keras-cv/keras_cv/tests/test_case.py",
"repo_id": "keras-cv",
"token_count": 752
} | 73 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import preprocessing
class MockRandomGenerator:
def __init__(self, value):
self.value = value
def uniform(self, shape, minval, maxval, dtype=None):
del minval, maxval
return tf.constant(self.value, dtype=dtype)
class PreprocessingTestCase(TestCase):
def setUp(self):
super().setUp()
def test_transform_to_standard_range_neg_one_range(self):
x = tf.constant([-1, 0, 1])
x = preprocessing.transform_value_range(
x, original_range=[-1, 1], target_range=[0, 255]
)
self.assertAllClose(x, [0.0, 127.5, 255.0])
def test_transform_to_same_range(self):
x = tf.constant([-1, 0, 1])
x = preprocessing.transform_value_range(
x, original_range=[0, 255], target_range=[0, 255]
)
self.assertAllClose(x, [-1, 0, 1])
def test_transform_to_standard_range(self):
x = tf.constant([8 / 255, 9 / 255, 255 / 255])
x = preprocessing.transform_value_range(
x, original_range=[0, 1], target_range=[0, 255]
)
self.assertAllClose(x, [8.0, 9.0, 255.0])
def test_transform_to_value_range(self):
x = tf.constant([128.0, 255.0, 0.0])
x = preprocessing.transform_value_range(
x, original_range=[0, 255], target_range=[0, 1]
)
self.assertAllClose(x, [128 / 255, 1, 0])
def test_random_inversion(self):
generator = MockRandomGenerator(0.75)
self.assertEqual(preprocessing.random_inversion(generator), -1.0)
generator = MockRandomGenerator(0.25)
self.assertEqual(preprocessing.random_inversion(generator), 1.0)
| keras-cv/keras_cv/utils/preprocessing_test.py/0 | {
"file_path": "keras-cv/keras_cv/utils/preprocessing_test.py",
"repo_id": "keras-cv",
"token_count": 936
} | 74 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from keras_cv import utils
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.utils import assert_matplotlib_installed
try:
import matplotlib.pyplot as plt
except:
plt = None
def _extract_image_batch(images, num_images, batch_size):
def unpack_images(inputs):
return inputs["image"]
num_batches_required = math.ceil(num_images / batch_size)
if isinstance(images, tf.data.Dataset):
images = images.map(unpack_images)
if batch_size == 1:
images = images.ragged_batch(num_batches_required)
sample = next(iter(images.take(1)))
else:
sample = next(iter(images.take(num_batches_required)))
return sample
else:
if len(ops.shape(images)) != 4:
raise ValueError(
"`plot_images_gallery()` requires you to "
"batch your `np.array` samples together."
)
else:
num_samples = (
num_images if num_images <= batch_size else num_batches_required
)
sample = images[:num_samples, ...]
return sample
@keras_cv_export("keras_cv.visualization.plot_image_gallery")
def plot_image_gallery(
images,
value_range,
scale=2,
rows=None,
cols=None,
path=None,
show=None,
transparent=True,
dpi=60,
legend_handles=None,
):
"""Displays a gallery of images.
Usage:
```python
train_ds = tfds.load(
"cats_vs_dogs",
split="train",
with_info=False,
shuffle_files=True,
)
keras_cv.visualization.plot_image_gallery(
train_ds,
value_range=(0, 255),
scale=3,
)
```

Args:
images: a Tensor, `tf.data.Dataset` or NumPy array containing images
to show in the gallery. Note: If using a `tf.data.Dataset`,
images should be present in the `FeaturesDict` under
the key `image`.
value_range: value range of the images. Common examples include
`(0, 255)` and `(0, 1)`.
scale: how large to scale the images in the gallery
rows: (Optional) number of rows in the gallery to show.
Required if inputs are unbatched.
cols: (Optional) number of columns in the gallery to show.
Required if inputs are unbatched.
path: (Optional) path to save the resulting gallery to.
show: (Optional) whether to show the gallery of images.
transparent: (Optional) whether to give the image a transparent
background, defaults to `True`.
dpi: (Optional) the dpi to pass to matplotlib.savefig(), defaults to
`60`.
legend_handles: (Optional) matplotlib.patches List of legend handles.
I.e. passing: `[patches.Patch(color='red', label='mylabel')]` will
produce a legend with a single red patch and the label 'mylabel'.
"""
assert_matplotlib_installed("plot_bounding_box_gallery")
if path is not None and show:
raise ValueError(
"plot_gallery() expects either `path` to be set, or `show` "
"to be true."
)
if isinstance(images, tf.data.Dataset):
sample = next(iter(images.take(1)))
batch_size = (
sample["image"].shape[0] if len(sample["image"].shape) == 4 else 1
) # batch_size from within passed `tf.data.Dataset`
else:
batch_size = (
ops.shape(images)[0] if len(ops.shape(images)) == 4 else 1
) # batch_size from np.array or single image
rows = rows or int(math.ceil(math.sqrt(batch_size)))
cols = cols or int(math.ceil(batch_size // rows))
num_images = rows * cols
images = _extract_image_batch(images, num_images, batch_size)
# Generate subplots
fig, axes = plt.subplots(
nrows=rows,
ncols=cols,
figsize=(cols * scale, rows * scale),
frameon=False,
layout="tight",
squeeze=True,
sharex="row",
sharey="col",
)
fig.subplots_adjust(wspace=0, hspace=0)
if isinstance(axes, np.ndarray) and len(axes.shape) == 1:
expand_axis = 0 if rows == 1 else -1
axes = np.expand_dims(axes, expand_axis)
if legend_handles is not None:
fig.legend(handles=legend_handles, loc="lower center")
# Perform image range transform
images = utils.transform_value_range(
images, original_range=value_range, target_range=(0, 255)
)
images = utils.to_numpy(images)
for row in range(rows):
for col in range(cols):
index = row * cols + col
current_axis = (
axes[row, col] if isinstance(axes, np.ndarray) else axes
)
current_axis.imshow(images[index].astype("uint8"))
current_axis.margins(x=0, y=0)
current_axis.axis("off")
if path is None and show is None:
return fig
if path is not None:
plt.savefig(
fname=path,
pad_inches=0,
bbox_inches="tight",
transparent=transparent,
dpi=dpi,
)
plt.close()
elif show:
plt.show()
plt.close()
| keras-cv/keras_cv/visualization/plot_image_gallery.py/0 | {
"file_path": "keras-cv/keras_cv/visualization/plot_image_gallery.py",
"repo_id": "keras-cv",
"token_count": 2581
} | 75 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.