text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
import numpy as np
import pytest
from absl import logging
from keras import callbacks
from keras import layers
from keras import losses
from keras import optimizers
from keras import testing
from keras.models.sequential import Sequential
class LambdaCallbackTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_lambda_callback(self):
"""Test standard LambdaCallback functionalities with training."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
lambda_log_callback = callbacks.LambdaCallback(
on_train_begin=lambda logs: logging.warning("on_train_begin"),
on_epoch_begin=lambda epoch, logs: logging.warning(
"on_epoch_begin"
),
on_epoch_end=lambda epoch, logs: logging.warning("on_epoch_end"),
on_train_end=lambda logs: logging.warning("on_train_end"),
)
with self.assertLogs(level="WARNING") as logs:
model.fit(
x,
y,
batch_size=batch_size,
validation_split=0.2,
callbacks=[lambda_log_callback],
epochs=5,
verbose=0,
)
self.assertTrue(any("on_train_begin" in log for log in logs.output))
self.assertTrue(any("on_epoch_begin" in log for log in logs.output))
self.assertTrue(any("on_epoch_end" in log for log in logs.output))
self.assertTrue(any("on_train_end" in log for log in logs.output))
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_batches(self):
"""Test LambdaCallback's behavior with batch-level callbacks."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
lambda_log_callback = callbacks.LambdaCallback(
on_train_batch_begin=lambda batch, logs: logging.warning(
"on_train_batch_begin"
),
on_train_batch_end=lambda batch, logs: logging.warning(
"on_train_batch_end"
),
)
with self.assertLogs(level="WARNING") as logs:
model.fit(
x,
y,
batch_size=batch_size,
validation_split=0.2,
callbacks=[lambda_log_callback],
epochs=5,
verbose=0,
)
self.assertTrue(
any("on_train_batch_begin" in log for log in logs.output)
)
self.assertTrue(
any("on_train_batch_end" in log for log in logs.output)
)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_kwargs(self):
"""Test LambdaCallback's behavior with custom defined callback."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
model.fit(
x, y, batch_size=batch_size, epochs=1, verbose=0
) # Train briefly for evaluation to work.
def custom_on_test_begin(logs):
logging.warning("custom_on_test_begin_executed")
lambda_log_callback = callbacks.LambdaCallback(
on_test_begin=custom_on_test_begin
)
with self.assertLogs(level="WARNING") as logs:
model.evaluate(
x,
y,
batch_size=batch_size,
callbacks=[lambda_log_callback],
verbose=0,
)
self.assertTrue(
any(
"custom_on_test_begin_executed" in log
for log in logs.output
)
)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_no_args(self):
"""Test initializing LambdaCallback without any arguments."""
lambda_callback = callbacks.LambdaCallback()
self.assertIsInstance(lambda_callback, callbacks.LambdaCallback)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_additional_kwargs(self):
"""Test initializing LambdaCallback with non-predefined kwargs."""
def custom_callback(logs):
pass
lambda_callback = callbacks.LambdaCallback(
custom_method=custom_callback
)
self.assertTrue(hasattr(lambda_callback, "custom_method"))
@pytest.mark.requires_trainable_backend
def test_lambda_callback_during_prediction(self):
"""Test LambdaCallback's functionality during model prediction."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
def custom_on_predict_begin(logs):
logging.warning("on_predict_begin_executed")
lambda_callback = callbacks.LambdaCallback(
on_predict_begin=custom_on_predict_begin
)
with self.assertLogs(level="WARNING") as logs:
model.predict(
x, batch_size=batch_size, callbacks=[lambda_callback], verbose=0
)
self.assertTrue(
any("on_predict_begin_executed" in log for log in logs.output)
)
| keras/keras/callbacks/lambda_callback_test.py/0 | {
"file_path": "keras/keras/callbacks/lambda_callback_test.py",
"repo_id": "keras",
"token_count": 2950
} | 142 |
import inspect
from keras.api_export import keras_export
from keras.constraints.constraints import Constraint
from keras.constraints.constraints import MaxNorm
from keras.constraints.constraints import MinMaxNorm
from keras.constraints.constraints import NonNeg
from keras.constraints.constraints import UnitNorm
from keras.saving import serialization_lib
from keras.utils.naming import to_snake_case
ALL_OBJECTS = {
Constraint,
MaxNorm,
MinMaxNorm,
NonNeg,
UnitNorm,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_export("keras.constraints.serialize")
def serialize(constraint):
return serialization_lib.serialize_keras_object(constraint)
@keras_export("keras.constraints.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras constraint object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.constraints.get")
def get(identifier):
"""Retrieve a Keras constraint object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret constraint identifier: {identifier}"
)
| keras/keras/constraints/__init__.py/0 | {
"file_path": "keras/keras/constraints/__init__.py",
"repo_id": "keras",
"token_count": 671
} | 143 |
from keras import backend
from keras.dtype_policies import dtype_policy
from keras.saving import serialization_lib
def get(identifier):
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, dtype_policy.DTypePolicy):
return identifier
if isinstance(identifier, dict):
return serialization_lib.deserialize_keras_object(identifier)
if isinstance(identifier, str):
return dtype_policy.DTypePolicy(identifier)
try:
return dtype_policy.DTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
| keras/keras/dtype_policies/__init__.py/0 | {
"file_path": "keras/keras/dtype_policies/__init__.py",
"repo_id": "keras",
"token_count": 292
} | 144 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras import backend
from keras import initializers
from keras import layers
from keras import testing
class GroupedQueryAttentionTest(testing.TestCase, parameterized.TestCase):
def test_basics(self):
self.run_layer_test(
layers.GroupedQueryAttention,
init_kwargs={
"num_query_heads": 2,
"num_key_value_heads": 2,
"head_dim": 2,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=8,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
self.run_layer_test(
layers.GroupedQueryAttention,
init_kwargs={
"num_query_heads": 2,
"num_key_value_heads": 2,
"head_dim": 2,
"use_bias": False,
"dropout": 0.5,
},
input_shape={"query_shape": (2, 8, 16), "value_shape": (2, 4, 16)},
expected_output_shape=(2, 8, 16),
expected_num_trainable_weights=4,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
@parameterized.named_parameters(
("without_key_proj_mha", (4, 8), (2, 8), None, 2, 2),
("with_key_proj_mha", (4, 8), (2, 8), (2, 3), 2, 2),
("without_key_proj_gqa", (4, 8), (2, 8), None, 4, 2),
("with_key_proj_gqa", (4, 8), (2, 8), (2, 3), 4, 2),
("without_key_value_proj_mqa", (4, 8), (2, 8), None, 4, 1),
("with_key_value_proj_mqa", (4, 8), (2, 8), (2, 3), 4, 1),
)
def test_compute_output_shape(
self,
query_dims,
value_dims,
key_dims,
num_query_heads,
num_key_value_heads,
):
"""Test computed shape is equal to the layer output's shape."""
layer = layers.GroupedQueryAttention(
num_query_heads=num_query_heads,
num_key_value_heads=num_key_value_heads,
head_dim=2,
)
batch_size = 7
query_shape = (batch_size,) + query_dims
value_shape = (batch_size,) + value_dims
key_shape = (batch_size,) + key_dims if key_dims else None
query = np.ones(query_shape)
value = np.ones(value_shape)
key = np.ones(key_shape) if key_shape else None
output = layer(query=query, value=value, key=key)
comp_output_shape = layer.compute_output_shape(
query_shape, value_shape, key_shape
)
self.assertEqual(output.shape, comp_output_shape)
@parameterized.named_parameters(
("query_value_dim_mismatch", (2, 4, 8), (2, 2, 7), 2),
("key_value_dim_mismatch", (2, 4, 8), (2, 2, 8), (2, 1, 7)),
)
def test_shape_mismatch_error(self, query_shape, value_shape, key_shape):
"""Test dimension mismatches"""
layer = layers.GroupedQueryAttention(
num_query_heads=4,
num_key_value_heads=4,
head_dim=2,
)
with self.assertRaisesRegex(ValueError, r"must be equal"):
layer.compute_output_shape(query_shape, value_shape, key_shape)
def test_initializer(self):
# Test with a specified initializer.
layer = layers.GroupedQueryAttention(
num_query_heads=16,
num_key_value_heads=16,
head_dim=64,
kernel_initializer=initializers.TruncatedNormal(stddev=0.02),
)
layer.build((2, 4, 8), (2, 4, 8))
# Make sure the sub layers have different kernel init value.
self.assertNotAllClose(
layer._query_dense.kernel,
layer._key_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._value_dense.kernel,
)
self.assertNotAllClose(
layer._query_dense.kernel,
layer._output_dense.kernel,
)
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_query_mask_progagation(self):
"""Test automatic propagation of the query's mask."""
layer = layers.GroupedQueryAttention(
num_query_heads=2, num_key_value_heads=2, head_dim=2
)
self.assertTrue(layer.supports_masking)
query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]])
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.random.normal(size=(3, 3, 8))
output = layer(query=masked_query, value=value)
self.assertAllClose(masked_query._keras_mask, output._keras_mask)
@parameterized.named_parameters(("causal", True), ("not_causal", 0))
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support masking.",
)
def test_masking(self, use_causal_mask):
"""Test that the value and causal masks are taken into account."""
layer = layers.GroupedQueryAttention(
num_query_heads=2, num_key_value_heads=2, head_dim=2
)
query = np.array([[1, 2, 3, 0, 0], [3, 3, 1, 1, 2], [1, 0, 0, 0, 0]])
masked_query = layers.Embedding(4, 8, mask_zero=True)(query)
value = np.array([[5, 4, 0], [3, 0, 0], [2, 1, 1]])
masked_value = layers.Embedding(6, 8, mask_zero=True)(value)
output = layer(
query=masked_query,
value=masked_value,
use_causal_mask=use_causal_mask,
)
mask = np.array(
[[[1, 1, 0]] * 3 + [[0, 0, 0]] * 2]
+ [[[1, 0, 0]] * 5]
+ [[[1, 1, 1]] + [[0, 0, 0]] * 4]
).astype(bool)
if use_causal_mask:
mask = mask & np.array(
[[[1, 0, 0], [1, 1, 0]] + [[1, 1, 1]] * 3]
).astype(bool)
del masked_query._keras_mask
del masked_value._keras_mask
output_with_manual_mask = layer(
query=masked_query, value=masked_value, attention_mask=mask
)
self.assertAllClose(output, output_with_manual_mask)
def test_correctness(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
# Setup layer.
num_heads = 2
key_dim = 2
layer = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=key_dim,
)
layer.build(query.shape, key.shape, value.shape)
# Set layer weights.
kernel = np.identity(key_dim)
# To get an identity kernel we need to add a head dim and repeat on it.
kernel = np.repeat(kernel[:, np.newaxis, :], num_heads, axis=1)
# Zeros for all biases.
bias = np.zeros((2, 2))
output_bias = np.zeros((2,))
layer.set_weights([kernel, bias] * 3 + [kernel, output_bias])
# Call layer and assert output.
output, scores = layer(
query=query,
value=value,
key=key,
return_attention_scores=True,
)
self.assertAllClose(output, [[[5.679, 5.679], [4.32, 4.32]]], atol=1e-3)
self.assertAllClose(
scores,
[[[[0.33, 0.67], [0.67, 0.33]], [[0.33, 0.67], [0.67, 0.33]]]],
atol=1e-3,
)
| keras/keras/layers/attention/grouped_query_attention_test.py/0 | {
"file_path": "keras/keras/layers/attention/grouped_query_attention_test.py",
"repo_id": "keras",
"token_count": 3952
} | 145 |
import numpy as np
from absl.testing import parameterized
from keras import backend
from keras import testing
from keras.backend import KerasTensor
from keras.layers import InputLayer
class InputLayerTest(testing.TestCase, parameterized.TestCase):
# Testing happy path for layer without input tensor
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
def test_input_basic(self, sparse):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
ndim = len(tuple((batch_size,) + input_shape))
init_kwargs = {
"shape": input_shape,
"batch_size": batch_size,
"dtype": dtype,
"sparse": sparse,
}
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
with self.assertRaisesRegex(
ValueError, "`sparse=True` is not supported"
):
InputLayer(**init_kwargs)
return
values = InputLayer(**init_kwargs)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.sparse, sparse)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output.ndim, ndim)
self.assertEqual(values.output.dtype, dtype)
self.assertEqual(values.output.sparse, sparse)
# Testing shape is not None and batch_shape is not None condition
def test_input_error1(self):
input_shape = (2, 3)
with self.assertRaisesRegex(
ValueError, "cannot pass both `shape` and `batch_shape`"
):
InputLayer(shape=input_shape, batch_shape=input_shape)
# Testing batch_size is not None and batch_shape is not None
def test_input_error2(self):
input_shape = (2, 3)
batch_size = 4
with self.assertRaisesRegex(
ValueError, "cannot pass both `batch_size` and `batch_shape`"
):
InputLayer(batch_size=batch_size, batch_shape=input_shape)
# Testing shape is None and batch_shape is None
def test_input_error3(self):
with self.assertRaisesRegex(ValueError, "pass a `shape` argument."):
InputLayer(shape=None, batch_shape=None)
# Testing Input tensor is not Keras tensor
def test_input_tensor_error(self):
input_shape = (2, 3)
batch_size = 4
input_tensor = np.zeros(input_shape)
with self.assertRaisesRegex(
ValueError, "Argument `input_tensor` must be a KerasTensor"
):
InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
)
# Testing happy path for layer with input tensor
def testing_input_tensor(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
input_tensor = KerasTensor(shape=input_shape, dtype=dtype)
values = InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
dtype=dtype,
)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output, input_tensor)
self.assertEqual(values.output.ndim, input_tensor.ndim)
self.assertEqual(values.output.dtype, dtype)
def test_input_shape_deprecated(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
with self.assertWarnsRegex(
UserWarning,
"Argument `input_shape` is deprecated. Use `shape` instead.",
):
layer = InputLayer(
input_shape=input_shape, batch_size=batch_size, dtype=dtype
)
self.assertEqual(layer.batch_shape[0], batch_size)
self.assertEqual(layer.batch_shape[1:], input_shape)
self.assertEqual(layer.dtype, dtype)
self.assertIsInstance(layer.output, KerasTensor)
def test_call_method(self):
layer = InputLayer(shape=(32,))
output = layer.call()
self.assertIsNone(output)
def test_numpy_shape(self):
# non-python int type shapes should be ok
InputLayer(shape=(np.int64(32),))
| keras/keras/layers/core/input_layer_test.py/0 | {
"file_path": "keras/keras/layers/core/input_layer_test.py",
"repo_id": "keras",
"token_count": 2137
} | 146 |
from keras import ops
from keras.api_export import keras_export
from keras.layers.merging.base_merge import Merge
@keras_export("keras.layers.Maximum")
class Maximum(Merge):
"""Computes element-wise maximum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Maximum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.maximum([x1, x2])`
>>> y = keras.layers.Maximum()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.maximum(output, inputs[i])
return output
@keras_export("keras.layers.maximum")
def maximum(inputs, **kwargs):
"""Functional interface to the `keras.layers.Maximum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.maximum([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.maximum([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Maximum(**kwargs)(inputs)
| keras/keras/layers/merging/maximum.py/0 | {
"file_path": "keras/keras/layers/merging/maximum.py",
"repo_id": "keras",
"token_count": 898
} | 147 |
from keras import backend
from keras import ops
from keras.api_export import keras_export
from keras.layers.layer import Layer
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by
randomly setting activations to the negative saturation value.
Args:
rate: Float between 0 and 1. The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
noise_shape: 1D integer tensor representing the shape of the
binary alpha dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the alpha dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding alpha dropout) or in inference mode
(doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
noise_shape = self._get_concrete_noise_shape(
inputs, self.noise_shape
)
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = ops.greater_equal(
ops.random.uniform(noise_shape, seed=self.seed_generator),
self.rate,
)
kept_idx = ops.cast(kept_idx, inputs.dtype)
# Compute affine transformation parameters
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * self.rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def _get_concrete_noise_shape(self, inputs, noise_shape):
if noise_shape is None:
return inputs.shape
concrete_inputs_shape = inputs.shape
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
| keras/keras/layers/regularization/alpha_dropout.py/0 | {
"file_path": "keras/keras/layers/regularization/alpha_dropout.py",
"repo_id": "keras",
"token_count": 1555
} | 148 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras import backend
from keras import layers
from keras import ops
from keras import testing
class Cropping3DTest(testing.TestCase, parameterized.TestCase):
@parameterized.product(
(
{"dim1_cropping": (1, 2), "dim1_expected": (1, 5)}, # both
{"dim1_cropping": (0, 2), "dim1_expected": (0, 5)}, # left only
{"dim1_cropping": (1, 0), "dim1_expected": (1, 7)}, # right only
),
(
{"dim2_cropping": (3, 4), "dim2_expected": (3, 5)}, # both
{"dim2_cropping": (0, 4), "dim2_expected": (0, 5)}, # left only
{"dim2_cropping": (3, 0), "dim2_expected": (3, 9)}, # right only
),
(
{"dim3_cropping": (5, 6), "dim3_expected": (5, 7)}, # both
{"dim3_cropping": (0, 6), "dim3_expected": (0, 7)}, # left only
{"dim3_cropping": (5, 0), "dim3_expected": (5, 13)}, # right only
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d(
self,
dim1_cropping,
dim2_cropping,
dim3_cropping,
data_format,
dim1_expected,
dim2_expected,
dim3_expected,
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
:,
]
)
cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
@parameterized.product(
(
# same cropping values with 3 tuples
{
"cropping": ((2, 2), (2, 2), (2, 2)),
"expected": ((2, 5), (2, 7), (2, 11)),
},
# same cropping values with 1 tuple
{"cropping": (2, 2, 2), "expected": ((2, 5), (2, 7), (2, 11))},
# same cropping values with an integer
{"cropping": 2, "expected": ((2, 5), (2, 7), (2, 11))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d_with_same_cropping(
self, cropping, data_format, expected
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
:,
]
)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
def test_cropping_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 7, None, 13, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 7, None, 13))
cropped = layers.Cropping3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(cropped.shape, (1, 4, None, 2, 5))
else:
self.assertEqual(cropped.shape, (1, 5, 4, None, 2))
@parameterized.product(
(
{"cropping": ((3, 6), (0, 0), (0, 0))},
{"cropping": ((0, 0), (5, 8), (0, 0))},
{"cropping": ((0, 0), (0, 0), (7, 6))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_3d_errors_if_cropping_more_than_available(
self, cropping, data_format
):
input_layer = layers.Input(batch_shape=(3, 7, 9, 13, 5))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=cropping, data_format=data_format)(
input_layer
)
def test_cropping_3d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2, 3, 4))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping="1")
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, 6, 7)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, -6)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), "5"))
@parameterized.product(
(
{"cropping": ((8, 1), (1, 1), (1, 1))},
{"cropping": ((1, 1), (10, 1), (1, 1))},
{"cropping": ((1, 1), (1, 1), (14, 1))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_3d_with_excessive_cropping(self, cropping, data_format):
if data_format == "channels_first":
shape = (3, 5, 7, 9, 13)
input_layer = layers.Input(batch_shape=shape)
else:
shape = (3, 7, 9, 13, 5)
input_layer = layers.Input(batch_shape=shape)
expected_error_msg = (
"Values in `cropping` argument should be smaller than the"
)
with self.assertRaisesRegex(ValueError, expected_error_msg):
layers.Cropping3D(cropping=cropping, data_format=data_format)(
input_layer
)
| keras/keras/layers/reshaping/cropping3d_test.py/0 | {
"file_path": "keras/keras/layers/reshaping/cropping3d_test.py",
"repo_id": "keras",
"token_count": 3896
} | 149 |
from unittest import mock
import pytest
from absl.testing import parameterized
from keras import backend
from keras import ops
from keras import optimizers
from keras import testing
TEST_CASES = [
{
"testcase_name": "adadelta",
"optimizer_class": optimizers.Adadelta,
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "adafactor",
"optimizer_class": optimizers.Adafactor,
"init_kwargs": {"clip_threshold": 0.5},
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "adagrad",
"optimizer_class": optimizers.Adagrad,
"expect_model_sparse_variable_updates": True,
"expect_optimizer_sparse_variable_updates": True,
},
{
"testcase_name": "adam",
"optimizer_class": optimizers.Adam,
},
{
"testcase_name": "adam_amsgrad",
"optimizer_class": optimizers.Adam,
"init_kwargs": {"amsgrad": True},
},
{
"testcase_name": "adamax",
"optimizer_class": optimizers.Adamax,
},
{
"testcase_name": "adamw",
"optimizer_class": optimizers.AdamW,
},
{
"testcase_name": "adamw_amsgrad",
"optimizer_class": optimizers.AdamW,
"init_kwargs": {"amsgrad": True},
},
{
"testcase_name": "ftrl",
"optimizer_class": optimizers.Ftrl,
},
{
"testcase_name": "lion",
"optimizer_class": optimizers.Lion,
},
{
"testcase_name": "loss_scale_optimizer_sgd",
"optimizer_class": lambda: optimizers.LossScaleOptimizer(
optimizers.SGD(learning_rate=0.5)
),
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "nadam",
"optimizer_class": optimizers.Nadam,
},
{
"testcase_name": "rmsprop",
"optimizer_class": optimizers.RMSprop,
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "rmsprop_momentum",
"optimizer_class": optimizers.RMSprop,
"init_kwargs": {"momentum": 0.05},
},
{
"testcase_name": "rmsprop_momentum_centered",
"optimizer_class": optimizers.RMSprop,
"init_kwargs": {"momentum": 0.05, "centered": True},
},
{
"testcase_name": "sgd",
"optimizer_class": optimizers.SGD,
"expect_model_sparse_variable_updates": True,
},
{
"testcase_name": "sgd_momentum",
"optimizer_class": optimizers.SGD,
"init_kwargs": {"momentum": 0.05},
},
{
"testcase_name": "sgd_momentum_nesterov",
"optimizer_class": optimizers.SGD,
"init_kwargs": {"momentum": 0.05, "nesterov": True},
},
]
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
class OptimizerSparseTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(TEST_CASES)
def test_sparse_gradients(
self,
optimizer_class,
init_kwargs={},
expect_model_sparse_variable_updates=False,
expect_optimizer_sparse_variable_updates=False,
):
# This test verifies that:
# - Optimizers use Keras ops everywhere instead of native operators
# (e.g. `ops.add()` instead of `+`) where sparse gradients are handled
# - The used ops handle sparse gradients
# - Optimizers use `self.assign/assign_add/assign_sub` instead of
# calling the method on the variable directly. Otherwise, the sparse
# updates are densified before being applied.
# - For some optimizers, a sparse gradient actually results in a sparse
# variable update as per `expect_model_sparse_variable_updates` and
# `expect_optimizer_sparse_variable_updates`
model_variable = backend.Variable(initializer="ones", shape=(5, 10))
optimizer = optimizer_class(**init_kwargs)
# Mocking "tensorflow.Variable" won't work as it gets substituted with
# the resource variable class.
if backend.backend() == "tensorflow":
import tensorflow as tf
grad = tf.IndexedSlices(0.5 * ops.ones((3, 10)), (0, 2, 4), (5, 10))
sparse_class = tf.IndexedSlices
variable_class = model_variable._value.__class__
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
grad = jax_sparse.BCOO(
(0.5 * ops.ones((3, 10)), ((0,), (2,), (4,))), shape=(5, 10)
)
sparse_class = jax_sparse.JAXSparse
variable_class = model_variable.__class__
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
optimizer_to_patch = (
optimizer.inner_optimizer
if isinstance(optimizer, optimizers.LossScaleOptimizer)
else optimizer
)
model_sparse_variable_updates = False
optimizer_sparse_variable_updates = False
def mock_optimizer_assign(variable, value):
nonlocal model_sparse_variable_updates
nonlocal optimizer_sparse_variable_updates
if isinstance(variable, backend.Variable):
variable = variable._value
if isinstance(value, sparse_class):
if variable is model_variable._value:
model_sparse_variable_updates = True
elif any(variable is v._value for v in optimizer.variables):
optimizer_sparse_variable_updates = True
def mock_variable_assign(variable, value):
# Make an exception for scalar variables
if len(variable.shape):
pytest.fail(
"Optimizer is calling `assign`, `assign_add` or "
"`assign_sub` directly on a variable. Use "
"`self.assign/assign_add/assign_sub(variable, value)` "
"instead to support sparse updates."
)
# patch "_apply_weight_decay" to exclude this special case.
# patch the optimizer "assign" methods to detect sparse udpates.
# patch the tf.Variable "assign" methods to detect direct assign calls.
with mock.patch.object(
optimizer_to_patch, "_apply_weight_decay", autospec=True
), mock.patch.object(
optimizer_to_patch, "assign", autospec=True
) as optimizer_assign, mock.patch.object(
optimizer_to_patch, "assign_add", autospec=True
) as optimizer_assign_add, mock.patch.object(
optimizer_to_patch, "assign_sub", autospec=True
) as optimizer_assign_sub, mock.patch.object(
variable_class, "assign", autospec=True
) as variable_assign, mock.patch.object(
variable_class, "assign_add", autospec=True
) as variable_assign_add, mock.patch.object(
variable_class, "assign_sub", autospec=True
) as variable_assign_sub:
optimizer_assign.side_effect = mock_optimizer_assign
optimizer_assign_add.side_effect = mock_optimizer_assign
optimizer_assign_sub.side_effect = mock_optimizer_assign
variable_assign.side_effect = mock_variable_assign
variable_assign_add.side_effect = mock_variable_assign
variable_assign_sub.side_effect = mock_variable_assign
optimizer.apply([grad], [model_variable])
self.assertEqual(
model_sparse_variable_updates, expect_model_sparse_variable_updates
)
self.assertEqual(
optimizer_sparse_variable_updates,
expect_optimizer_sparse_variable_updates,
)
@parameterized.named_parameters(TEST_CASES)
def test_sparse_correctness(
self, optimizer_class, init_kwargs={}, **kwargs
):
# This test verifies that applying a sparse gradient gives the same
# numerical results as the same dense gradient.
optimizer_sparse = optimizer_class(**init_kwargs)
optimizer_dense = optimizer_class(**init_kwargs)
var_sparse = backend.Variable(initializer="ones", shape=(5, 3, 2))
var_dense = backend.Variable(initializer="ones", shape=(5, 3, 2))
stateless = backend.backend() == "jax"
if stateless:
optimizer_sparse.build([var_sparse])
optimizer_dense.build([var_dense])
optimizer_sparse_vars = optimizer_sparse.variables
optimizer_dense_vars = optimizer_dense.variables
var_sparse_values = [var_sparse.value]
var_dense_values = [var_dense.value]
for i in range(5):
if backend.backend() == "tensorflow":
import tensorflow as tf
grad_sparse = tf.IndexedSlices(
values=ops.ones((3, 3, 2)) * (10.0 - i),
indices=(0, 2, 4),
dense_shape=(5, 3, 2),
)
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
grad_sparse = jax_sparse.BCOO(
(ops.ones((3, 3, 2)) * (10.0 - i), ((0,), (2,), (4,))),
shape=(5, 3, 2),
)
else:
self.fail(
f"Sparse is unsupported with backend {backend.backend()}"
)
grad_dense = ops.convert_to_tensor(grad_sparse, sparse=False)
if stateless:
(
var_sparse_values,
optimizer_sparse_vars,
) = optimizer_sparse.stateless_apply(
optimizer_sparse_vars, [grad_sparse], var_sparse_values
)
(
var_dense_values,
optimizer_dense_vars,
) = optimizer_dense.stateless_apply(
optimizer_dense_vars, [grad_dense], var_dense_values
)
self.assertAllClose(var_sparse_values[0], var_dense_values[0])
else:
optimizer_sparse.apply([grad_sparse], [var_sparse])
optimizer_dense.apply([grad_dense], [var_dense])
self.assertAllClose(var_sparse.value, var_dense.value)
| keras/keras/optimizers/optimizer_sparse_test.py/0 | {
"file_path": "keras/keras/optimizers/optimizer_sparse_test.py",
"repo_id": "keras",
"token_count": 4992
} | 150 |
from keras import backend
from keras import testing
from keras.utils import tracking
class TrackingTest(testing.TestCase):
def test_untracking_in_tracked_list(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1)
v2 = backend.Variable(2)
lst = tracking.TrackedList([], tracker)
lst.append(v1)
lst.append(None)
lst.append(v2)
lst.append(0)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
lst.remove(v1)
self.assertLen(lst, 3)
self.assertLen(tracked_variables, 1)
lst.remove(v2)
self.assertLen(lst, 2)
self.assertLen(tracked_variables, 0)
lst2 = tracking.TrackedList([], tracker)
lst2.append(v1)
lst2.append(None)
lst2.append(v2)
lst2.append(0)
popped_value = lst2.pop()
self.assertEqual(popped_value, 0)
self.assertLen(lst2, 3)
self.assertLen(tracked_variables, 2)
lst2.clear()
self.assertLen(lst2, 0)
self.assertLen(tracked_variables, 0)
lst2.append(v1)
lst2.append(v2)
del lst2[0]
self.assertLen(lst2, 1)
self.assertLen(tracked_variables, 1)
| keras/keras/utils/tracking_test.py/0 | {
"file_path": "keras/keras/utils/tracking_test.py",
"repo_id": "keras",
"token_count": 814
} | 151 |
{
"dockerFile": "Dockerfile",
"postCreateCommand": "sh /setup.sh",
"extensions": ["ms-python.python"],
"settings": {
"files.watcherExclude": {
"**/bazel-*/**": true
},
"search.exclude": {
"**/bazel-*/**": true
}
}
} | tf-keras/.devcontainer/devcontainer.json/0 | {
"file_path": "tf-keras/.devcontainer/devcontainer.json",
"repo_id": "tf-keras",
"token_count": 154
} | 152 |
workspace(name = "org_keras")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# Needed by protobuf
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz",
],
sha256 = "74d544d96f4a5bb630d465ca8bbcfe231e3594e5aae57e1edbf17a6eb3ca2506",
)
load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace")
bazel_skylib_workspace()
# Needed by protobuf
http_archive(
name = "six_archive",
build_file = "//third_party:six.BUILD",
sha256 = "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
strip_prefix = "six-1.16.0",
urls = ["https://pypi.python.org/packages/source/s/six/six-1.16.0.tar.gz"],
)
bind(
name = "six",
actual = "@six_archive//:six",
)
http_archive(
name = "com_google_protobuf",
sha256 = "f66073dee0bc159157b0bd7f502d7d1ee0bc76b3c1eac9836927511bdc4b3fc1",
strip_prefix = "protobuf-3.21.9",
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.9.zip"],
)
# ZLIB. Need by com_google_protobuf.
http_archive(
name = "zlib",
build_file = "@com_google_protobuf//:third_party/zlib.BUILD",
sha256 = "b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30",
strip_prefix = "zlib-1.2.13",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.13.tar.gz",
"https://zlib.net/zlib-1.2.13.tar.gz",
],
)
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()
| tf-keras/WORKSPACE/0 | {
"file_path": "tf-keras/WORKSPACE",
"repo_id": "tf-keras",
"token_count": 872
} | 153 |
# TensorFlow API backwards compatibility test goldens.
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
)
filegroup(
name = "api_golden_v1",
srcs = glob(["v1/*.pbtxt"]),
)
filegroup(
name = "api_golden_v2",
srcs = glob(["v2/*.pbtxt"]),
)
| tf-keras/tf_keras/api/golden/BUILD/0 | {
"file_path": "tf-keras/tf_keras/api/golden/BUILD",
"repo_id": "tf-keras",
"token_count": 157
} | 154 |
# TensorFlow API backwards compatibility tests.
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = ["//tf_keras/api:__subpackages__"],
licenses = ["notice"], # Apache 2.0
)
exports_files([
"README.txt",
"API_UPDATE_WARNING.txt",
])
tf_py_test(
name = "api_compatibility_test",
srcs = ["api_compatibility_test.py"],
data = [
"//tf_keras/api/golden:api_golden_v1",
"//tf_keras/api/golden:api_golden_v2",
"//tf_keras/api/tests:API_UPDATE_WARNING.txt",
"//tf_keras/api/tests:README.txt",
],
python_version = "PY3",
srcs_version = "PY3",
tags = [
"no_oss", # TODO(scottzhu): Fix this in OSS test.
"no_pip",
"no_rocm",
"no_windows", # Bugs due to some paths.
],
deps = [
"//:expect_six_installed",
"//third_party/py/tensorflow",
"//third_party/tensorflow/python/lib/io:file_io",
"//third_party/tensorflow/tools/api/lib:python_object_to_proto_visitor",
"//third_party/tensorflow/tools/common:public_api",
"//third_party/tensorflow/tools/common:traverse",
],
)
| tf-keras/tf_keras/api/tests/BUILD/0 | {
"file_path": "tf-keras/tf_keras/api/tests/BUILD",
"repo_id": "tf-keras",
"token_count": 588
} | 155 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet v2 models for TF-Keras.
MobileNetV2 is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 22 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4
For each of these `alpha` values, weights for 5 different input image sizes
are provided (224, 192, 160, 128, and 96).
The following table describes the performance of
MobileNet on various input sizes:
------------------------------------------------------------------------
MACs stands for Multiply Adds
Classification Checkpoint|MACs (M)|Parameters (M)|Top 1 Accuracy|Top 5 Accuracy
--------------------------|------------|---------------|---------|------------
| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 |
| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 |
| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 |
| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 |
| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 |
| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 |
| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 |
| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 |
| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 |
| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 |
| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 |
| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 |
| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 |
| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 |
| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 |
| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 |
| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 |
| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 |
| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 |
| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 |
| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 |
| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 |
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
"""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.applications import imagenet_utils
from tf_keras.engine import training
from tf_keras.layers import VersionAwareLayers
from tf_keras.utils import data_utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/"
)
layers = None
@keras_export(
"keras.applications.mobilenet_v2.MobileNetV2",
"keras.applications.MobileNetV2",
)
def MobileNetV2(
input_shape=None,
alpha=1.0,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the MobileNetV2 architecture.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
This function returns a TF-Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each TF-Keras Application expects a specific kind of input
preprocessing. For MobileNetV2, call
`tf.keras.applications.mobilenet_v2.preprocess_input` on your inputs before
passing them to the model. `mobilenet_v2.preprocess_input` will scale input
pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: Float, larger than zero, controls the width of the network. This is
known as the width multiplier in the MobileNetV2 paper, but the name is
kept for consistency with `applications.MobileNetV1` model in TF-Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1.0, default number of filters from the paper
are used at each layer.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization), 'imagenet'
(pre-training on ImageNet), or the path to the weights file to be
loaded.
input_tensor: Optional TF-Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction when
`include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional integer number of classes to classify images into, only
to be specified if `include_top` is True, and if no `weights` argument
is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if "layers" in kwargs:
layers = kwargs.pop("layers")
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError(f"Unknown argument(s): {kwargs}")
if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. "
f"Received `weights={weights}`"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top` '
f"as true, `classes` should be 1000. Received `classes={classes}`"
)
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
layer_utils.get_source_inputs(input_tensor)
)
except ValueError:
raise ValueError(
f"input_tensor: {input_tensor}"
"is not type input_tensor. "
f"Received `type(input_tensor)={type(input_tensor)}`"
)
if is_input_t_tensor:
if backend.image_data_format() == "channels_first":
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError(
"input_shape[1] must equal shape(input_tensor)[1] "
"when `image_data_format` is `channels_first`; "
"Received `input_tensor.shape="
f"{input_tensor.shape}`"
f", `input_shape={input_shape}`"
)
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError(
"input_tensor.shape[2] must equal input_shape[1]; "
"Received `input_tensor.shape="
f"{input_tensor.shape}`, "
f"`input_shape={input_shape}`"
)
else:
raise ValueError(
"input_tensor is not a TF-Keras tensor; "
f"Received `input_tensor={input_tensor}`"
)
# If input_shape is None, infer shape from input_tensor.
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError(
"input_tensor must be a valid TF-Keras tensor type; "
f"Received {input_tensor} of type {type(input_tensor)}"
)
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == "channels_first":
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
# If input_shape is None and no input_tensor
elif input_shape is None:
default_size = 224
# If input_shape is not None, assume default size.
else:
if backend.image_data_format() == "channels_first":
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if backend.image_data_format() == "channels_last":
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == "imagenet":
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError(
"If imagenet weights are being loaded, "
"alpha must be one of `0.35`, `0.50`, `0.75`, "
"`1.0`, `1.3` or `1.4` only;"
f" Received `alpha={alpha}`"
)
if rows != cols or rows not in [96, 128, 160, 192, 224]:
rows = 224
logging.warning(
"`input_shape` is undefined or non-square, "
"or `rows` is not in [96, 128, 160, 192, 224]. "
"Weights for input shape (224, 224) will be "
"loaded as the default."
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
first_block_filters = _make_divisible(32 * alpha, 8)
x = layers.Conv2D(
first_block_filters,
kernel_size=3,
strides=(2, 2),
padding="same",
use_bias=False,
name="Conv1",
)(img_input)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="bn_Conv1"
)(x)
x = layers.ReLU(6.0, name="Conv1_relu")(x)
x = _inverted_res_block(
x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0
)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1
)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15
)
x = _inverted_res_block(
x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16
)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we increase the number of output
# channels.
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(
last_block_filters, kernel_size=1, use_bias=False, name="Conv_1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv_1_bn"
)(x)
x = layers.ReLU(6.0, name="out_relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account any potential predecessors of
# `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=f"mobilenetv2_{alpha:0.2f}_{rows}")
# Load weights.
if weights == "imagenet":
if include_top:
model_name = (
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_"
+ str(float(alpha))
+ "_"
+ str(rows)
+ ".h5"
)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
else:
model_name = (
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_"
+ str(float(alpha))
+ "_"
+ str(rows)
+ "_no_top"
+ ".h5"
)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
"""Inverted ResNet block."""
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
in_channels = backend.int_shape(inputs)[channel_axis]
pointwise_conv_filters = int(filters * alpha)
# Ensure the number of filters on the last 1x1 convolution is divisible by
# 8.
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = f"block_{block_id}_"
if block_id:
# Expand with a pointwise 1x1 convolution.
x = layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding="same",
use_bias=False,
activation=None,
name=prefix + "expand",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "expand_BN",
)(x)
x = layers.ReLU(6.0, name=prefix + "expand_relu")(x)
else:
prefix = "expanded_conv_"
# Depthwise 3x3 convolution.
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3), name=prefix + "pad"
)(x)
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=False,
padding="same" if stride == 1 else "valid",
name=prefix + "depthwise",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "depthwise_BN",
)(x)
x = layers.ReLU(6.0, name=prefix + "depthwise_relu")(x)
# Project with a pointwise 1x1 convolution.
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding="same",
use_bias=False,
activation=None,
name=prefix + "project",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "project_BN",
)(x)
if in_channels == pointwise_filters and stride == 1:
return layers.Add(name=prefix + "add")([inputs, x])
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@keras_export("keras.applications.mobilenet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.mobilenet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| tf-keras/tf_keras/applications/mobilenet_v2.py/0 | {
"file_path": "tf-keras/tf_keras/applications/mobilenet_v2.py",
"repo_id": "tf-keras",
"token_count": 9947
} | 156 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks on Text classification with Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.benchmarks import benchmark_util
class TextWithTransformerBenchmark(tf.test.Benchmark):
"""Benchmarks for Text classification with Transformer
using `tf.test.Benchmark`.
"""
def __init__(self):
super().__init__()
self.max_feature = 20000
self.max_len = 200
(self.imdb_x, self.imdb_y), _ = keras.datasets.imdb.load_data(
num_words=self.max_feature
)
self.imdb_x = keras.preprocessing.sequence.pad_sequences(
self.imdb_x, maxlen=self.max_len
)
def _build_model(self):
"""Model from
https://keras.io/examples/nlp/text_classification_with_transformer/."""
embed_dim = 32
num_heads = 2
ff_dim = 32
inputs = keras.layers.Input(shape=(self.max_len,))
embedding_layer = TokenAndPositionEmbedding(
self.max_len, self.max_feature, embed_dim
)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
x = transformer_block(x)
x = keras.layers.GlobalAvgPool1D()(x)
x = keras.layers.Dropout(0.1)(x)
x = keras.layers.Dense(20, activation="relu")(x)
x = keras.layers.Dropout(0.1)(x)
outputs = keras.layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
# In each benchmark test, the required arguments for the
# method `measure_performance` include:
# x: Input data, it could be Numpy or loaded from tfds.
# y: Target data. If `x` is a dataset or generator instance,
# `y` should not be specified.
# loss: Loss function for model.
# optimizer: Optimizer for model.
# Check more details in `measure_performance()` method of
# benchmark_util.
def benchmark_text_classification_bs_128(self):
"""Measure performance with batch_size=128."""
batch_size = 128
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.imdb_x,
y=self.imdb_y,
batch_size=batch_size,
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"transformer", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_text_classification_bs_256(self):
"""Measure performance with batch_size=256."""
batch_size = 256
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.imdb_x,
y=self.imdb_y,
batch_size=batch_size,
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"transformer", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_text_classification_bs_512(self):
"""Measure performance with batch_size=512."""
batch_size = 512
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.imdb_x,
y=self.imdb_y,
batch_size=batch_size,
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"transformer", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_text_classification_bs_512_gpu_2(self):
"""Measure performance with batch_size=512, gpu=1 and
distribution_strategy='mirrored'
"""
batch_size = 512
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.imdb_x,
y=self.imdb_y,
batch_size=batch_size,
num_gpus=2,
distribution_strategy="mirrored",
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"transformer", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
class MultiHeadSelfAttention(keras.layers.Layer):
"""Implement multi head self attention as a TF-Keras layer."""
def __init__(self, embed_dim, num_heads=8):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
if embed_dim % num_heads != 0:
raise ValueError(
f"embedding dimension = {embed_dim} should be divisible"
f"by number of heads = {num_heads}"
)
self.projection_dim = embed_dim // num_heads
self.query_dense = keras.layers.Dense(embed_dim)
self.key_dense = keras.layers.Dense(embed_dim)
self.value_dense = keras.layers.Dense(embed_dim)
self.combine_heads = keras.layers.Dense(embed_dim)
def attention(self, query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
def separate_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs):
# x.shape = [batch_size, seq_len, embedding_dim]
batch_size = tf.shape(inputs)[0]
query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim)
key = self.key_dense(inputs) # (batch_size, seq_len, embed_dim)
value = self.value_dense(inputs) # (batch_size, seq_len, embed_dim)
query = self.separate_heads(
query, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
key = self.separate_heads(
key, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
value = self.separate_heads(
value, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
attention, _ = self.attention(query, key, value)
attention = tf.transpose(
attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len, num_heads, projection_dim)
concat_attention = tf.reshape(
attention, (batch_size, -1, self.embed_dim)
) # (batch_size, seq_len, embed_dim)
output = self.combine_heads(
concat_attention
) # (batch_size, seq_len, embed_dim)
return output
class TransformerBlock(keras.layers.Layer):
"""Implement a Transformer block as a layer."""
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super().__init__()
self.att = MultiHeadSelfAttention(embed_dim, num_heads)
self.ffn = keras.Sequential(
[
keras.layers.Dense(ff_dim, activation="relu"),
keras.layers.Dense(embed_dim),
]
)
self.layernorm1 = keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = keras.layers.Dropout(rate)
self.dropout2 = keras.layers.Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
class TokenAndPositionEmbedding(keras.layers.Layer):
"""Implement embedding layer."""
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = keras.layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)
self.pos_emb = keras.layers.Embedding(
input_dim=maxlen, output_dim=embed_dim
)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py",
"repo_id": "tf-keras",
"token_count": 4457
} | 157 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for saved model benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import tensorflow.compat.v2 as tf
import tf_keras as keras
def save_and_load_benchmark(app):
"""Util for saved model benchmarks."""
trials = 3
model = app(weights=None)
model_name = app.__name__
tmp_dir = tf.compat.v1.test.get_temp_dir()
tf.io.gfile.makedirs(tmp_dir)
save_dir = tempfile.mkdtemp(dir=tmp_dir)
total_save_time = 0
total_load_time = 0
# Run one untimed iteration of saving/loading.
model.save(save_dir, save_format="tf")
keras.models.load_model(save_dir)
for _ in range(trials):
start_time = time.time()
model.save(save_dir, save_format="tf")
total_save_time += time.time() - start_time
start_time = time.time()
keras.models.load_model(save_dir)
total_load_time += time.time() - start_time
save_result = {
"iters": trials,
"wall_time": total_save_time / trials,
"name": f"{model_name}.save",
}
load_result = {
"iters": trials,
"wall_time": total_load_time / trials,
"name": f"{model_name}.load",
}
tf.compat.v1.gfile.DeleteRecursively(save_dir)
return save_result, load_result
| tf-keras/tf_keras/benchmarks/saved_model_benchmarks/saved_model_benchmark_util.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/saved_model_benchmarks/saved_model_benchmark_util.py",
"repo_id": "tf-keras",
"token_count": 730
} | 158 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using tf.distribute.Strategy."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import backend
from tf_keras.distribute import distributed_training_utils
from tf_keras.distribute import distributed_training_utils_v1
from tf_keras.distribute import multi_worker_testing_utils
from tf_keras.distribute import optimizer_combinations
from tf_keras.distribute.strategy_combinations import all_strategies
from tf_keras.distribute.strategy_combinations import (
multi_worker_mirrored_strategies,
)
from tf_keras.distribute.strategy_combinations import (
strategies_minus_default_minus_tpu,
)
from tf_keras.distribute.strategy_combinations import strategies_minus_tpu
from tf_keras.distribute.strategy_combinations import tpu_strategies
from tf_keras.engine import base_layer_utils
from tf_keras.mixed_precision import policy
from tf_keras.optimizers import optimizer as optimizer_base
from tf_keras.optimizers.legacy import (
gradient_descent as gradient_descent_keras,
)
from tf_keras.testing_infra import test_utils
from tf_keras.utils import losses_utils
from tf_keras.utils import np_utils
# isort: off
from tensorflow.python.distribute.cluster_resolver import (
SimpleClusterResolver,
)
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(16, activation="relu", input_shape=_INPUT_SIZE)
)
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation="softmax"))
return model
def simple_subclassed_model(num_labels=_NUM_CLASS):
class _SimpleMLP(keras.Model):
def __init__(self, num_labels):
super().__init__()
self.dense = keras.layers.Dense(num_labels)
def call(self, inputs):
return self.dense(inputs)
return _SimpleMLP(num_labels)
def simple_multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name="input_a")
input_b = keras.layers.Input(shape=(16,), name="input_b")
merged = keras.layers.concatenate([input_a, input_b], name="merge")
output_c = keras.layers.Dense(3, activation="softmax", name="dense_2")(
merged
)
output_d = keras.layers.Dense(2, activation="softmax", name="dense_3")(
merged
)
model = keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d]
)
return model
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = test_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED,
)
(b_train, d_train), (b_test, d_test) = test_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED,
)
(m_train, _), (m_test, _) = test_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED,
)
c_train = np_utils.to_categorical(c_train)
c_test = np_utils.to_categorical(c_test)
d_train = np_utils.to_categorical(d_train)
d_test = np_utils.to_categorical(d_test)
train_data = {
"input_a": a_train,
"input_b": b_train,
"input_m": m_train,
"output_c": c_train,
"output_d": d_train,
}
test_data = {
"input_a": a_test,
"input_b": b_test,
"input_m": m_test,
"output_c": c_test,
"output_d": d_test,
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if backend.is_tpu_strategy(distribution):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name="input")
y = keras.layers.Dense(4, name="dense")(x)
model = keras.Model(x, y)
return model
def get_sample_weights_model():
x = keras.layers.Input(shape=(1,), name="input")
y = keras.layers.Dense(
1, kernel_initializer="ones", bias_initializer="zeros", name="dense"
)(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def convert_numpy_to_dataset_with_unknown_cardinality(inputs, targets=None):
if targets is not None:
input_slices = (inputs, targets)
dummy_op = lambda inp, target: True
else:
input_slices = inputs
dummy_op = lambda inp: True
original_dataset = tf.data.Dataset.from_tensor_slices(input_slices)
ds_with_unknown_cardinality = original_dataset.filter(dummy_op).batch(
10, drop_remainder=True
)
return ds_with_unknown_cardinality
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name="input_a")
b = keras.layers.Input(shape=(5,), name="input_b")
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name="dense_1")
dense_2 = keras.layers.Dense(7, name="dense_2")
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name="dropout")(c)
model = keras.models.Model([a, b], [d, e])
return model
def strategy_minus_tpu_combinations():
return tf.__internal__.test.combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"]
)
def tpu_strategy_combinations():
return tf.__internal__.test.combinations.combine(
distribution=tpu_strategies, mode=["graph", "eager"]
)
def tpu_strategy_combinations_graph_only():
return tf.__internal__.test.combinations.combine(
distribution=tpu_strategies, mode=["graph"]
)
def multi_worker_strategy_combinations_eager_only():
return tf.__internal__.test.combinations.combine(
distribution=multi_worker_mirrored_strategies, mode=["eager"]
)
def all_strategy_combinations():
return (
strategy_minus_tpu_combinations()
+ tpu_strategy_combinations()
+ multi_worker_strategy_combinations_eager_only()
)
def all_strategy_minus_default_and_tpu_combinations():
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
)
def all_strategy_combinations_minus_default():
return (
all_strategy_minus_default_and_tpu_combinations()
+ tpu_strategy_combinations()
+ multi_worker_strategy_combinations_eager_only()
)
def strategy_and_optimizer_combinations():
non_tpu_strategies = tf.__internal__.test.combinations.times(
strategy_minus_tpu_combinations(),
tf.__internal__.test.combinations.combine(
optimizer=[
optimizer_combinations.adagrad_optimizer_v1_fn,
optimizer_combinations.adam_optimizer_v1_fn,
optimizer_combinations.gradient_descent_optimizer_v1_fn,
optimizer_combinations.rmsprop_optimizer_v1_fn,
optimizer_combinations.adadelta_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
optimizer_combinations.adam_optimizer_keras_v2_fn,
optimizer_combinations.adamax_optimizer_keras_v2_fn,
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.nadam_optimizer_keras_v2_fn,
optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
optimizer_combinations.ftrl_optimizer_keras_v2_fn,
]
),
)
tpu_strategies_graph = tf.__internal__.test.combinations.combine(
distribution=tpu_strategies,
mode=["graph"],
optimizer=[
optimizer_combinations.adagrad_optimizer_v1_fn,
optimizer_combinations.adam_optimizer_v1_fn,
optimizer_combinations.gradient_descent_optimizer_v1_fn,
optimizer_combinations.rmsprop_optimizer_v1_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
optimizer_combinations.adam_optimizer_keras_v2_fn,
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
],
)
tpu_strategies_eager = tf.__internal__.test.combinations.combine(
distribution=tpu_strategies,
mode=["eager"],
optimizer=[
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
optimizer_combinations.adam_optimizer_keras_v2_fn,
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
],
)
multi_worker_eager = tf.__internal__.test.combinations.combine(
distribution=multi_worker_mirrored_strategies,
mode=["eager"],
optimizer=[
optimizer_combinations.adadelta_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
optimizer_combinations.adam_optimizer_keras_v2_fn,
optimizer_combinations.adamax_optimizer_keras_v2_fn,
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.nadam_optimizer_keras_v2_fn,
optimizer_combinations.rmsprop_optimizer_keras_v2_fn,
optimizer_combinations.ftrl_optimizer_keras_v2_fn,
],
)
return (
non_tpu_strategies
+ tpu_strategies_eager
+ tpu_strategies_graph
+ multi_worker_eager
)
class BatchCountingCB(keras.callbacks.Callback):
def __init__(self):
super().__init__()
self.train_begin_batches = []
self.train_end_batches = []
self.test_begin_batches = []
self.test_end_batches = []
self.predict_begin_batches = []
self.predict_end_batches = []
def on_train_batch_begin(self, batch, logs=None):
self.train_begin_batches.append(batch)
def on_train_batch_end(self, batch, logs=None):
self.train_end_batches.append(batch)
def on_test_batch_begin(self, batch, logs=None):
self.test_begin_batches.append(batch)
def on_test_batch_end(self, batch, logs=None):
self.test_end_batches.append(batch)
def on_predict_batch_begin(self, batch, logs=None):
self.predict_begin_batches.append(batch)
def on_predict_batch_end(self, batch, logs=None):
self.predict_end_batches.append(batch)
class TestDistributionStrategyWithNumpyArrays(
tf.test.TestCase, parameterized.TestCase
):
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calculating_input_params_no_steps_no_batch_size(
self, distribution
):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(
distribution
):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Default global batch size 32 for input with 64 samples run in 2
# steps
steps, batch_size = distributed_training_utils_v1.get_input_params(
distribution, 64, steps=None, batch_size=None
)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# Computed global batch size 20 is lower than 32 if we pass less
# samples.
steps, batch_size = distributed_training_utils_v1.get_input_params(
distribution, 20, steps=None, batch_size=None
)
self.assertEqual(batch_size, 20 // replica_scale_factor)
self.assertEqual(steps, 1)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calculating_input_params_with_steps_no_batch_size(
self, distribution
):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(
distribution
):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Computed global batch size is correct for number of specified 1
# step
steps, batch_size = distributed_training_utils_v1.get_input_params(
distribution, 64, steps=1, batch_size=None
)
self.assertEqual(batch_size, 64 // replica_scale_factor)
self.assertEqual(steps, 1)
# Computed global batch size is correct for number of specified 2
# steps
steps, batch_size = distributed_training_utils_v1.get_input_params(
distribution, 64, steps=2, batch_size=None
)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# All samples can not be consumed in specified number of steps
with self.assertRaisesRegex(ValueError, "not divisible by steps"):
distributed_training_utils_v1.get_input_params(
distribution, 63, steps=2, batch_size=None
)
# This cases is different for different strategies due to the
# difference in supported batch size being global or per-replica.
if replica_scale_factor == 1:
# Computed global batch size is correct even if not sharadable
(
steps,
batch_size,
) = distributed_training_utils_v1.get_input_params(
distribution, 63, steps=3, batch_size=None
)
self.assertEqual(batch_size, 21)
self.assertEqual(steps, 3)
else:
# Computed global batch size can not be sharded across replicas
with self.assertRaisesRegex(
ValueError,
"could not be sharded evenly across the sync replicas",
):
distributed_training_utils_v1.get_input_params(
distribution, 63, steps=1, batch_size=None
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calculating_input_params_no_steps_with_batch_size(
self, distribution
):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(
distribution
):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils_v1.get_input_params(
distribution, 64, steps=None, batch_size=16
)
self.assertEqual(batch_size, 16)
self.assertEqual(steps, 4 // replica_scale_factor)
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils_v1.get_input_params(
distribution, 64, steps=None, batch_size=32
)
self.assertEqual(batch_size, 32)
self.assertEqual(steps, 2 // replica_scale_factor)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calculating_input_params_with_steps_with_batch_size(
self, distribution
):
with self.cached_session():
# No change to steps and batch size if both specified and feasible
steps, batch_size = distributed_training_utils_v1.get_input_params(
distribution, 64, steps=5, batch_size=3
)
self.assertEqual(batch_size, 3)
self.assertEqual(steps, 5)
# Number of samples is less than global batch size * steps
with self.assertRaisesRegex(
ValueError, "less than samples required"
):
distributed_training_utils_v1.get_input_params(
distribution, 64, steps=10, batch_size=13
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = "mse"
metrics = ["mae"]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(
inputs,
targets,
epochs=1,
batch_size=2,
verbose=0,
validation_data=(inputs, targets),
)
# TODO(anjalisridhar): We need tests for when the batch size and
# steps are smaller and results in a 0 batch_size and steps
# value.
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, batch_size=8)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calling_model_with_mixed_precision(self, distribution):
if isinstance(
distribution,
(
tf.compat.v1.distribute.experimental.ParameterServerStrategy,
tf.distribute.experimental.ParameterServerStrategy,
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
),
):
self.skipTest("b/152097775")
if backend.is_tpu_strategy(distribution):
policy_name = "mixed_bfloat16"
else:
policy_name = "mixed_float16"
with self.cached_session(), distribution.scope(), policy.policy_scope(
policy_name
):
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
x = keras.layers.Input(shape=(3,), name="input")
y = keras.layers.Dense(4, name="dense")(x)
y = keras.layers.Activation("softmax", dtype="float32")(y)
model = keras.Model(x, y)
loss = "mse"
metrics = ["mae"]
model.compile(optimizer, loss, metrics=metrics)
# We need to pass float32 since TPUs do not support float64, even
# though these arrays will immediately be casted to bfloat16 on
# TPUs. We also cannot pass bfloat16, as Numpy does not support it.
inputs = np.zeros((64, 3), dtype="float32")
targets = np.zeros((64, 4), dtype="float32")
model.fit(
inputs,
targets,
epochs=1,
batch_size=2,
verbose=0,
validation_data=(inputs, targets),
)
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, batch_size=8)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_operator_overload_mixed_precision(self, distribution):
# Regression test that tests a fixed bug does not reoccur. Adding an
# AutoCastVariable to a tensor on a TPU, where the variable was the LHS
# of the '+' operator, used to cause the gradient w.r.t. the variable to
# be None.
if isinstance(
distribution,
(
tf.compat.v1.distribute.experimental.ParameterServerStrategy,
tf.distribute.experimental.ParameterServerStrategy,
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
),
):
self.skipTest("b/152097775")
if backend.is_tpu_strategy(distribution):
policy_name = "mixed_bfloat16"
else:
policy_name = "mixed_float16"
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight("v", ())
self.v2 = self.add_weight("v", ())
def call(self, inp):
inp += self.v1
return self.v2 + inp
with self.cached_session(), distribution.scope():
layer = MyLayer(dtype=policy_name)
def run_fn():
x = np.array([1.0])
with tf.GradientTape() as tape:
y = layer(x)
grad_v1, grad_v2 = tape.gradient(y, [layer.v1, layer.v2])
return grad_v1, grad_v2
if tf.executing_eagerly():
run_fn = tf.function(run_fn)
grad_v1, grad_v2 = distribution.run(run_fn)
self.assertIsNotNone(grad_v1)
self.assertIsNotNone(grad_v2)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy
],
mode=["graph", "eager"],
)
)
def test_optimizer_in_cross_replica_context_raises_error(
self, distribution
):
with self.cached_session(), distribution.scope():
model = keras.models.Sequential([keras.layers.Dense(1)])
x = np.array([[1.0]])
with tf.GradientTape() as tape:
y = model(x)
gradients = tape.gradient(y, model.trainable_variables)
optimizer = gradient_descent_keras.SGD()
with self.assertRaisesRegex(
RuntimeError, "cannot be called in cross-replica context"
):
optimizer.apply_gradients(
zip(gradients, model.trainable_variables)
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = multi_input_output_model()
loss = "mse"
model.compile(optimizer, loss)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(
np.random.random((64, 7)), dtype=np.float32
)
output_e_np = np.asarray(
np.random.random((64, 7)), dtype=np.float32
)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and
# steps are smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, batch_size=8)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"]
)
+ tf.__internal__.test.combinations.combine(
distribution=multi_worker_mirrored_strategies, mode=["eager"]
)
)
def test_numpy_with_sample_weights(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
loss = "mse"
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
result = model.evaluate(
inputs,
targets,
batch_size=2,
sample_weight=sample_weights,
verbose=1,
)
# The per sample loss is multiplied by the corresponding sample
# weight. The average of these weighted losses is the return value
# of the `evaluate` call. For example, in the test above the average
# weighted loss is calculated in the following manner:
# batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 =
# 2.75
# batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
# final result = (batch_1 + batch_2) / 2 = 10.625.
# The first time we divide by number of input samples and the second
# time we divide by number of steps/batches that the loss is
# aggregated over.
self.assertAllClose(result, 10.625)
# We now test without passing sample_weights:
# batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
# batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
# final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5
result = model.evaluate(inputs, targets, batch_size=2, verbose=1)
self.assertAllClose(result, 13.5)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
with distribution.scope():
model = multi_input_output_model()
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
loss = "mse"
model.compile(optimizer, loss)
# We take 6 input samples with each input having a dimension of 3 or
# 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs)
# `predict` a list that is equal in length to the number of model
# outputs. In this test our model has two outputs and each element
# of `outs` corresponds to all the samples of one of the model
# outputs.
self.assertLen(outs, 2)
# Each of the output samples have a dimension of 7. We should
# process all the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tpu_strategy_combinations_graph_only(),
tf.__internal__.test.combinations.combine(batch_size=[4, 6]),
)
)
def test_evaluate_with_partial_batch(self, distribution, batch_size):
with self.cached_session():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)
cpu_model = get_model()
cpu_model.compile(optimizer, loss, metrics=metrics)
x = np.random.random((10, 3)).astype("float32")
y = np.random.random((10, 4)).astype("float32")
# As sample size is 10, we batch by 4 so that the last batch is a
# partial batch. Also `evaluate()` using numpy array as inputs
# without distribution strategy uses entire sample as a single
# batch. As so, we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
evaluate_ground_truth = cpu_model.evaluate(x, y)
# We don't compare the loss as loss is currently not computed as
# metric in Keras, the loss value is inaccurate for last partial
# batch due to more weights for the last batch samples.
steps = np.ceil(10.0 / batch_size)
self.assertAllClose(
model_with_ds_strategy.evaluate(
x, y, batch_size=batch_size, steps=steps
)[1:],
evaluate_ground_truth[1:],
atol=1e-5,
rtol=1e-5,
)
# Test that `steps` is inferred correctly when final partial batch
# exists.
self.assertAllClose(
model_with_ds_strategy.evaluate(x, y, batch_size=batch_size)[
1:
],
evaluate_ground_truth[1:],
atol=1e-5,
rtol=1e-5,
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tpu_strategy_combinations_graph_only()
)
)
def test_predict_with_partial_batch(self, distribution):
with self.cached_session():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = "mse"
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
inputs = np.random.random((10, 3)).astype(np.float32)
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch. Also `predict()` using numpy array as inputs
# without distribution strategy uses entire sample as a single
# batch. As so, we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
predict_ground_truth = cpu_model.predict(inputs)
self.assertAllClose(
model_with_ds_strategy.predict(inputs, batch_size=4, steps=3),
predict_ground_truth,
atol=1e-5,
rtol=1e-5,
)
# Test that `steps` is inferred correctly when final partial batch
# exists.
self.assertAllClose(
model_with_ds_strategy.predict(inputs, batch_size=4),
predict_ground_truth,
atol=1e-5,
rtol=1e-5,
)
@tf.__internal__.distribute.combinations.generate(
tpu_strategy_combinations_graph_only()
)
def test_no_target_model(self, distribution):
with self.cached_session():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
self.add_loss(tf.reduce_sum(inputs), inputs=True)
return inputs
with distribution.scope():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
16, activation="relu", input_shape=_INPUT_SIZE
)
)
model.add(MyLayer())
model.add(keras.layers.Dense(_NUM_CLASS, activation="softmax"))
model.compile(optimizer)
inputs = np.zeros((20, 10), np.float32)
model.fit(inputs, epochs=1, steps_per_epoch=2)
model.predict(inputs, steps=1)
model.evaluate(inputs, steps=1)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tpu_strategy_combinations_graph_only()
)
)
def test_predict_multi_output_model_with_partial_batch(self, distribution):
with self.cached_session():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = "mse"
with distribution.scope():
model_with_ds_strategy = (
simple_multi_inputs_multi_outputs_model()
)
model_with_ds_strategy.compile(optimizer, loss)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
input_data, _ = get_multi_inputs_multi_outputs_data()
input_dict = {
"input_a": input_data["input_a"],
"input_b": input_data["input_b"],
}
# As sample size is 200, we batch by 18 so that the last batch is
# a partial batch. Also `fit()` using numpy array as inputs without
# distribution strategy uses entire sample as a single batch. As so,
# we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(
input_dict, batch_size=18, steps=12
),
cpu_model.predict(input_dict),
atol=1e-4,
rtol=1e-4,
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_gradients_are_none(self, distribution):
if not tf.executing_eagerly():
self.skipTest("None gradients are not supported in graph mode")
class DenseWithExtraWeight(keras.layers.Dense):
def build(self, input_shape):
# Gradients w.r.t. extra_weights are None
self.extra_weight_1 = self.add_weight(
"extra_weight_1", shape=(), initializer="ones"
)
super().build(input_shape)
self.extra_weight_2 = self.add_weight(
"extra_weight_2", shape=(), initializer="ones"
)
with distribution.scope():
model = keras.Sequential(
[DenseWithExtraWeight(4, input_shape=(4,))]
)
model.compile("adam", "mse")
inputs = np.random.normal(size=(64, 4))
targets = np.random.normal(size=(64, 4))
old_kernel = model.get_weights()[1]
model.fit(inputs, targets)
new_kernel = model.get_weights()[1]
self.assertNotAllEqual(old_kernel, new_kernel)
class TestDistributionStrategyWithDatasets(
tf.test.TestCase, parameterized.TestCase
):
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=dataset,
validation_steps=2,
)
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=dataset,
validation_steps=2,
)
model.predict(get_predict_dataset(distribution), steps=2)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
user_controlled_model = get_model()
user_controlled_model.compile(
optimizer_fn(0.001),
loss="mse",
metrics=["mae", keras.metrics.CategoricalAccuracy()],
)
interleaved_model = get_model()
interleaved_model.set_weights(
user_controlled_model.get_weights()
)
interleaved_model.compile(
optimizer_fn(0.001),
loss="mse",
metrics=["mae", keras.metrics.CategoricalAccuracy()],
)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset,
epochs=2,
steps_per_epoch=2,
verbose=1,
validation_data=dataset,
validation_steps=2,
shuffle=False,
)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=1,
shuffle=False,
)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2)
)
self.assertEqual(
interleaved_output.history["val_loss"],
[x[0] for x in user_controlled_output],
)
val_mean_absolute_error = interleaved_output.history.get(
"val_mean_absolute_error"
)
if not val_mean_absolute_error:
# The name of the metric changed in TF2.0
val_mean_absolute_error = interleaved_output.history["val_mae"]
self.assertEqual(
val_mean_absolute_error, [x[1] for x in user_controlled_output]
)
self.assertEqual(
interleaved_output.history["val_categorical_accuracy"],
[x[2] for x in user_controlled_output],
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = multi_input_output_model()
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
input_a_np = np.random.random((10, 3)).astype("float32")
input_b_np = np.random.random((10, 5)).astype("float32")
output_d_np = np.random.random((10, 7)).astype("float32")
output_e_np = np.random.random((10, 7)).astype("float32")
# Test with tuples
dataset_tuple = tf.data.Dataset.from_tensor_slices(
((input_a_np, input_b_np), (output_d_np, output_e_np))
)
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = tf.data.Dataset.from_tensor_slices(
(
{"input_a": input_a_np, "input_b": input_b_np},
(output_d_np, output_e_np),
)
)
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_fit_with_dictionary_in_the_dataset_b135161171(self, distribution):
if backend.is_tpu_strategy(distribution):
self.skipTest("b/142805125")
def custom_loss(predict, label, weight):
bce = keras.losses.binary_crossentropy(label, predict)
return tf.reduce_mean(bce * weight)
with self.cached_session():
with distribution.scope():
input_img = keras.layers.Input([64, 64, 3], name="img")
input_lbl = keras.layers.Input([64, 64, 1], name="lbl")
input_weight = keras.layers.Input([64, 64], name="weight")
predict = keras.layers.Conv2D(2, [1, 1], padding="same")(
input_img
)
loss_lambda = keras.layers.Lambda(
lambda x: custom_loss(*x), name="my_loss"
)
my_loss = loss_lambda([predict, input_lbl, input_weight])
model = keras.models.Model(
inputs=[input_img, input_lbl, input_weight],
outputs=[predict, my_loss],
)
model.add_loss(model.get_layer("my_loss").output)
model.compile(optimizer="adam")
if tf.executing_eagerly():
def map_fn(img, lbl, weight):
inputs = {"img": img, "lbl": lbl, "weight": weight}
return (inputs,)
else:
def map_fn(img, lbl, weight):
inputs = {"img": img, "lbl": lbl, "weight": weight}
return inputs, {}
fake_imgs = np.ones([50, 64, 64, 3], dtype=np.float32)
fake_lbls = np.ones([50, 64, 64, 1], dtype=np.float32)
fake_weights = np.ones([50, 64, 64], dtype=np.float32)
data = (
tf.data.Dataset.from_tensor_slices(
(fake_imgs, fake_lbls, fake_weights)
)
.map(map_fn)
.batch(10)
)
model.fit(data)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_fit_eval_and_predict_methods_on_dataset_without_steps(
self, distribution
):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
fit_with_numpy = model.fit(
inputs, targets, epochs=1, batch_size=10
).history
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10, drop_remainder=True)
fit_with_ds = model.fit(dataset, epochs=1).history
eval_with_ds = model.evaluate(dataset)
predict_dataset = tf.data.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.batch(10, drop_remainder=True)
predict_with_ds = model.predict(predict_dataset)
self.assertAllClose(
fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4
)
self.assertAllClose(
eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4
)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_predict_on_dataset_with_unknown_cardinality_without_steps(
self, distribution, mode
):
if mode == "graph" and backend.is_tpu_strategy(distribution):
self.skipTest("partial batch not supported with TPU in graph mode.")
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((20, 3), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
predict_with_numpy = model.predict(inputs, batch_size=10)
predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs
)
self.assertEqual(
keras.backend.get_value(
tf.data.experimental.cardinality(predict_dataset)
),
tf.data.experimental.UNKNOWN_CARDINALITY,
)
predict_with_ds = model.predict(predict_dataset)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_on_dataset_with_unknown_cardinality_without_steps(
self, distribution, mode
):
# TODO(b/155867206): Investigate why this test occasionally segfaults on
# TPU in eager mode.
if mode == "eager" and backend.is_tpu_strategy(distribution):
self.skipTest("caused segfault with TPU in eager mode.")
if mode == "graph" and backend.is_tpu_strategy(distribution):
self.skipTest("partial batch not supported with TPU in graph mode.")
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.zeros((100, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
fit_with_numpy = model.fit(
inputs, targets, epochs=1, batch_size=10
).history
fit_with_numpy_multiple_epochs = model.fit(
inputs, targets, epochs=2, batch_size=10
).history
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs, targets
)
predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs
)
self.assertEqual(
keras.backend.get_value(
tf.data.experimental.cardinality(dataset)
),
tf.data.experimental.UNKNOWN_CARDINALITY,
)
self.assertEqual(
keras.backend.get_value(
tf.data.experimental.cardinality(predict_dataset)
),
tf.data.experimental.UNKNOWN_CARDINALITY,
)
eval_with_ds = model.evaluate(dataset)
predict_with_ds = model.predict(predict_dataset)
self.assertAllClose(
eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4
)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4
)
fit_with_ds = model.fit(dataset, epochs=1).history
fit_with_ds_multiple_epochs = model.fit(dataset, epochs=2).history
self.assertAllClose(
fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4
)
self.assertAllClose(
fit_with_numpy_multiple_epochs,
fit_with_ds_multiple_epochs,
atol=1e-4,
rtol=1e-4,
)
@tf.__internal__.distribute.combinations.generate(
tpu_strategy_combinations_graph_only()
)
def test_on_dataset_with_unknown_cardinality(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
model.compile(
tf.compat.v1.train.GradientDescentOptimizer(0.001),
loss,
metrics=metrics,
)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs, targets
)
predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs
)
self.assertEqual(
keras.backend.get_value(
tf.data.experimental.cardinality(dataset)
),
tf.data.experimental.UNKNOWN_CARDINALITY,
)
self.assertEqual(
keras.backend.get_value(
tf.data.experimental.cardinality(predict_dataset)
),
tf.data.experimental.UNKNOWN_CARDINALITY,
)
eval_with_ds = model.evaluate(dataset, steps=100)
predict_with_ds = model.predict(predict_dataset, steps=100)
self.assertAllClose(
eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4
)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4
)
with self.assertRaisesRegex(
ValueError, "Number of steps could not be inferred"
):
model.fit(dataset, epochs=1)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@tf.__internal__.distribute.combinations.generate(
strategy_and_optimizer_combinations()
)
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = "mse"
model.compile(optimizer(), loss)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.one_device_strategy,
],
mode=["graph", "eager"],
)
)
def test_dataset_wrong_input_shape(self, distribution, mode):
if mode == "graph":
self.skipTest(
"TODO(b/120943676, b/120957836): Re-enable for graph once the "
"validation code is restored."
)
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = "mse"
model.compile(optimizer, loss)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegex(ValueError, "is incompatible with"):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu # noqa: E501
],
mode=["graph", "eager"],
)
)
def test_dataset_external_batch_input_validation(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = "mse"
model.compile(optimizer, loss)
# Batching is done outside tf.data's `batch`
inputs = np.zeros((100, 10, 3), dtype=np.float32)
targets = np.zeros((100, 10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
)
)
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can
# compare meaningful values. Currently we don't pass the learning phase
# if the Lambda layer uses the learning phase.
with self.cached_session():
with distribution.scope():
x = keras.layers.Input(shape=(1,), name="input")
y = keras.layers.Dense(1, kernel_initializer="ones")(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.005)
loss = "mse"
metrics = ["acc"]
model.compile(optimizer, loss, metrics=metrics)
batch_size = 8
if isinstance(
distribution,
(
tf.distribute.MirroredStrategy,
tf.compat.v1.distribute.MirroredStrategy,
),
):
# MirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(batch_size)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history["acc"][0], 0, 0)
with distribution.scope():
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix
# b/117431185. evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = tf.data.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(batch_size)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps
ref_output = np.ones((160, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def testOptimizerWithCallbacks(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = "mse"
model.compile(optimizer, loss)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)],
)
self.assertAllClose(
0.001, keras.backend.get_value(model.optimizer.lr)
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tpu_strategy_combinations_graph_only(),
tf.__internal__.test.combinations.combine(batch_size=[4, 6]),
)
)
def test_evaluate_with_dataset_with_partial_batch(
self, distribution, batch_size
):
with self.cached_session():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = "mse"
metrics = ["mae", keras.metrics.CategoricalAccuracy()]
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)
cpu_model = get_model()
cpu_model.compile(optimizer, loss, metrics=metrics)
x = np.random.random((10, 3)).astype("float32")
y = np.random.random((10, 4)).astype("float32")
dataset = tf.data.Dataset.from_tensor_slices((x, y))
# As sample size is 10, we make the last batch a partial batch.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
dataset_with_partial_batch = dataset.batch(batch_size)
# We don't compare the loss as loss is currently not computed as
# metric in Keras, the loss value is inaccurate for last partial
# batch due to more weights for the last batch samples.
steps = np.ceil(10.0 / batch_size)
self.assertAllClose(
model_with_ds_strategy.evaluate(
dataset_with_partial_batch, steps=steps
)[1:],
cpu_model.evaluate(dataset_with_partial_batch, steps=steps)[1:],
atol=1e-5,
rtol=1e-5,
)
self.assertAllClose(
model_with_ds_strategy.evaluate(dataset_with_partial_batch)[1:],
cpu_model.evaluate(dataset_with_partial_batch)[1:],
atol=1e-5,
rtol=1e-5,
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tpu_strategy_combinations_graph_only()
)
)
def test_predict_with_dataset_with_partial_batch(self, distribution):
with self.cached_session():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = "mse"
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
inputs = np.random.random((10, 3)).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs))
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch.
dataset_with_partial_batch = dataset.batch(4)
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(
dataset_with_partial_batch, steps=3
),
cpu_model.predict(dataset_with_partial_batch, steps=3),
atol=1e-5,
rtol=1e-5,
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tpu_strategy_combinations_graph_only()
)
)
def test_predict_multi_output_model_with_dataset_with_partial_batch(
self, distribution
):
with self.cached_session():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001)
loss = "mse"
with distribution.scope():
model_with_ds_strategy = (
simple_multi_inputs_multi_outputs_model()
)
model_with_ds_strategy.compile(optimizer, loss)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
input_data, _ = get_multi_inputs_multi_outputs_data()
input_dict = {
"input_a": input_data["input_a"],
"input_b": input_data["input_b"],
}
dataset = tf.data.Dataset.from_tensor_slices(input_dict)
# As sample size is 200, we batch by 18 using 12 steps per epoch so
# that the last batch is a partial batch.
dataset_with_partial_batch = dataset.batch(18)
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(
dataset_with_partial_batch, steps=12
),
cpu_model.predict(dataset_with_partial_batch, steps=12),
atol=1e-4,
rtol=1e-4,
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_minus_default()
)
def test_match_model_input_matches_with_dataset_tensors(self, distribution):
def _create_model_input_output_tensors():
input_a = keras.layers.Input(
shape=(16,), name="z_input_sorted_last"
)
input_b = keras.layers.Input(
shape=(32,), name="a_input_sorted_first"
)
intermediate_a = keras.layers.Dense(10)(input_a)
intermediate_b = keras.layers.Dense(10)(input_b)
merged = keras.layers.Add()([intermediate_a, intermediate_b])
output = keras.layers.Dense(2)(merged)
return input_a, input_b, output
input_dict = {
"z_input_sorted_last": np.random.rand(32, 16).astype(np.float32),
"a_input_sorted_first": np.random.rand(32, 32).astype(np.float32),
}
target = np.ones((32, 2), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((input_dict, target))
dataset = dataset.batch(4, drop_remainder=True)
with self.cached_session():
with distribution.scope():
input_a, input_b, output = _create_model_input_output_tensors()
# `input_a`, which has input name that comes last in
# alphanumeric order, is the first input of the model input
# layers. If tensors from `input_dict` is blindly flattened and
# passed to model inputs incorrectly, this would result in
# `input_a` input layer matching with tensor
# `a_input_sorted_first` and would result in shape mismatch.
model_with_array_input = keras.models.Model(
inputs=[input_a, input_b], outputs=output
)
model_with_array_input.compile("sgd", "mse")
model_weights = model_with_array_input.get_weights()
model_with_array_input_fit = model_with_array_input.fit(
dataset, steps_per_epoch=1, epochs=1
).history
input_a, input_b, output = _create_model_input_output_tensors()
model_with_dict_input = keras.models.Model(
inputs={
"z_input_sorted_last": input_a,
"a_input_sorted_first": input_b,
},
outputs=output,
)
model_with_dict_input.compile("sgd", "mse")
model_with_dict_input.set_weights(model_weights)
model_with_dict_input_fit = model_with_dict_input.fit(
dataset, steps_per_epoch=1, epochs=1
).history
self.assertAllClose(
model_with_dict_input_fit,
model_with_array_input_fit,
atol=1e-4,
rtol=1e-4,
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"]
)
+ tf.__internal__.test.combinations.combine(
distribution=multi_worker_mirrored_strategies, mode=["eager"]
)
)
def test_dataset_with_sample_weights(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.001)
loss = "mse"
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = tf.data.Dataset.from_tensor_slices(
(inputs, targets, sample_weights)
).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multiplied by the corresponding sample
# weight. The average of these weighted losses is the return value
# of the `evaluate` call. For example, in the test above the average
# weighted loss is calculated in the following manner:
# batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 =
# 2.75
# batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
# final result = (batch_1 + batch_2) / 2 = 10.625.
# The first time we divide by number of input samples and the second
# time we divide by number of steps/batches that the loss is
# aggregated over.
self.assertAllClose(result, 10.625)
# We now test without passing sample_weights:
# batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
# batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
# final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5
ds = tf.data.Dataset.from_tensor_slices((inputs, targets)).batch(2)
result = model.evaluate(ds, verbose=1)
self.assertAllClose(result, 13.5)
class TestDistributionStrategyWithDatasetsFile(
tf.test.TestCase, parameterized.TestCase
):
def setUp(self):
super().setUp()
self.input_file_name = os.path.join(
self.get_temp_dir(), "input.tfrecord"
)
inputs = np.zeros((20, 3), dtype=np.float32)
input_dataset = tf.data.Dataset.from_tensor_slices(inputs)
input_dataset = input_dataset.map(tf.io.serialize_tensor)
writer = tf.data.experimental.TFRecordWriter(self.input_file_name)
writer.write(input_dataset)
# TODO(wxinyi): add a multi-worker test for TPU
@tf.__internal__.distribute.combinations.generate(
multi_worker_strategy_combinations_eager_only()
)
def test_predict_on_dataset_shard_options_file_multi_worker_mirrored(
self, distribution, mode
):
# This test is to verify if we successfully switch auto_shard_policy of
# a input dataset inside model.predict with MultiWorkerMirroredStrategy
# to AutoShardPolicy.DATA. Since there is only one input file for
# multiple workers, AutoShardPolicy.AUTO or AutoShardPolicy.FILE will
# lead to an error. However, since we switch to AutoShardPolicy.DATA in
# model.predict, no error is raised.
del mode
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = "mse"
model.compile(optimizer, loss)
dataset = tf.data.TFRecordDataset(self.input_file_name)
dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.float32))
dummy_op = lambda inp: True
dataset = dataset.filter(dummy_op).batch(8, drop_remainder=True)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.FILE
)
dataset = dataset.with_options(options)
model.predict(dataset, steps=1)
class TestRegularizerLoss(tf.test.TestCase, parameterized.TestCase):
class IdentityRegularizer(keras.regularizers.Regularizer):
def __call__(self, x):
return tf.identity(x)
class AddLayer(keras.layers.Layer):
def build(self, _):
self.v = self.add_weight(
"v",
(),
initializer="ones",
regularizer=TestRegularizerLoss.IdentityRegularizer(),
)
def call(self, inputs):
return inputs + self.v
@staticmethod
def loss_fn(_, y_pred):
return tf.reduce_mean(y_pred)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
all_strategy_combinations_minus_default()
)
)
def test_regularizer_loss(self, distribution):
batch_size = 2
if not distributed_training_utils.global_batch_size_supported(
distribution
):
batch_size //= distribution.num_replicas_in_sync
# Given an input x, which is always 1, and variable v, this model
# computes Loss=x+v+regularizer_loss, where regularizer_loss=v and
# the variable is initialized to 1. Therefore, this model computes
# Loss=1+2v, and so the gradient dLoss/dv = 2. This gradient of 2 is
# averaged over all examples in a batch and then multiplied by the
# learning rate of 1. As a result, the model update for one batch
# should subtract 2 from v, resulting in v being -1. If the
# regularizer loss is not scaled correctly by number of replicas,
# the variable value will be incorrect when number of replicas >1.
# For e.g. it will be -2 if num replicas = 2.
with distribution.scope():
x = keras.layers.Input(shape=(1,), batch_size=batch_size)
y = TestRegularizerLoss.AddLayer()(x)
model = keras.models.Model(inputs=x, outputs=y)
opt = gradient_descent_keras.SGD(1.0)
model.compile(opt, loss=TestRegularizerLoss.loss_fn)
model.fit(
x=np.array([[1.0], [1.0]], dtype=np.float32),
y=np.array([[1.0], [1.0]], dtype=np.float32),
batch_size=batch_size,
)
v = model.get_weights()[0]
self.assertEqual(-1.0, v)
@test_utils.run_all_without_tensor_float_32(
"Uses Dense layers, which call matmul"
)
class TestDistributionStrategyWithKerasModels(
tf.test.TestCase, parameterized.TestCase
):
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_distribution_strategy_on_sequential_model(self, distribution):
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = simple_sequential_model()
loss = "mse"
model.compile(optimizer, loss)
inputs = np.zeros((20, 10), np.float32)
targets = np.zeros((20, 2), np.float32)
model.fit(inputs, targets, epochs=1, batch_size=10)
model.predict(inputs, batch_size=10)
model.evaluate(inputs, targets, batch_size=10)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations()
)
def test_distribution_strategy_on_functional_model(self, distribution):
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = "mse"
model.compile(optimizer, loss)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model.fit(inputs, targets, epochs=1)
model.predict(inputs)
model.evaluate(inputs, targets)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_distributed_dataset(self, distribution):
with distribution.scope():
class CBCounter(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
self.train_batches = 0
self.test_batches = 0
def on_epoch_end(self, batch, logs=None):
self.epochs += 1
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
model = keras.Sequential([keras.layers.Dense(1)])
model.compile("sgd", "mse")
cb_counter = CBCounter()
x, y = np.ones((100, 10)), np.ones((100, 1))
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.batch(10).repeat(2)
ds = distribution.experimental_distribute_dataset(ds)
val_ds = tf.data.Dataset.from_tensor_slices((x, y))
val_ds = val_ds.batch(20)
val_ds = distribution.experimental_distribute_dataset(val_ds)
model.fit(
ds,
steps_per_epoch=10,
validation_data=val_ds,
validation_steps=5,
epochs=2,
callbacks=[cb_counter],
)
self.assertEqual(cb_counter.train_batches, 20)
self.assertEqual(cb_counter.test_batches, 10)
self.assertEqual(cb_counter.epochs, 2)
# Check for `steps_per_epoch`.
if distribution.num_replicas_in_sync > 1:
with self.assertRaisesRegex(
ValueError, "distributed dataset, you must specify"
):
model.fit(ds, epochs=2)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_distributed_datasets_from_function(self, distribution):
with distribution.scope():
class CBCounter(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
self.train_batches = 0
self.test_batches = 0
def on_epoch_end(self, batch, logs=None):
self.epochs += 1
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
model = keras.Sequential([keras.layers.Dense(1)])
model.compile("sgd", "mse")
cb_counter = CBCounter()
def make_dataset(_):
x, y = np.ones((100, 10)), np.ones((100, 1))
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.batch(5).repeat()
return ds
ds = distribution.distribute_datasets_from_function(make_dataset)
val_ds = distribution.distribute_datasets_from_function(
make_dataset
)
model.fit(
ds,
steps_per_epoch=10,
validation_data=val_ds,
validation_steps=5,
epochs=2,
callbacks=[cb_counter],
)
self.assertEqual(cb_counter.train_batches, 20)
self.assertEqual(cb_counter.test_batches, 10)
self.assertEqual(cb_counter.epochs, 2)
# Check for `steps_per_epoch`.
if distribution.num_replicas_in_sync > 1:
with self.assertRaisesRegex(
ValueError, "distributed dataset, you must specify"
):
model.fit(ds, epochs=2)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_host_training_loop(self, distribution):
if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy):
self.skipTest("b/172032817")
with distribution.scope():
inputs = keras.Input((10, 10, 3))
x = keras.layers.Conv2D(3, kernel_size=3)(inputs)
x = keras.layers.Flatten()(x)
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile("sgd", "mse", steps_per_execution=10)
bc = BatchCountingCB()
x, y = np.ones((100, 10, 10, 3)), np.ones((100, 1))
model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 10, 20, 30, 40])
self.assertEqual(bc.train_end_batches, [9, 19, 29, 39, 49])
model.evaluate(x, y, batch_size=2, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0, 10, 20, 30, 40])
self.assertEqual(bc.test_end_batches, [9, 19, 29, 39, 49])
model.predict(x, batch_size=2, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0, 10, 20, 30, 40])
self.assertEqual(bc.predict_end_batches, [9, 19, 29, 39, 49])
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_host_training_loop_last_partial_execution(self, distribution):
if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy):
self.skipTest("b/172032817")
with distribution.scope():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile("sgd", "mse", steps_per_execution=20)
bc = BatchCountingCB()
x, y = np.ones((100, 10)), np.ones((100, 1))
model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 20, 40])
self.assertEqual(bc.train_end_batches, [19, 39, 49])
model.evaluate(x, y, batch_size=2, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0, 20, 40])
self.assertEqual(bc.test_end_batches, [19, 39, 49])
model.predict(x, batch_size=2, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
self.assertEqual(bc.predict_end_batches, [19, 39, 49])
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_host_training_loop_dataset_unknown_size(self, distribution):
if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy):
self.skipTest("b/172032817")
with distribution.scope():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile("sgd", "mse", steps_per_execution=20)
x, y = np.ones((100, 10)), np.ones((100, 1))
ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
ds = ds.filter(lambda *args, **kwargs: True) # Makes the size UNKNOWN.
bc = BatchCountingCB()
with self.assertRaisesRegex(ValueError, "steps_per_execution"):
model.fit(ds, epochs=2, callbacks=[bc])
train_ds = ds.repeat(2)
model.fit(train_ds, steps_per_epoch=50, epochs=2, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 20, 40, 0, 20, 40])
self.assertEqual(bc.train_end_batches, [19, 39, 49, 19, 39, 49])
with self.assertRaisesRegex(ValueError, "steps_per_execution"):
model.evaluate(ds, callbacks=[bc])
test_ds = ds.repeat(2)
model.evaluate(test_ds, steps=50, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0, 20, 40])
self.assertEqual(bc.test_end_batches, [19, 39, 49])
predict_ds = ds.repeat(2)
model.predict(predict_ds, steps=50, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
self.assertEqual(bc.predict_end_batches, [19, 39, 49])
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_host_training_loop_truncate_to_epoch(self, distribution):
if isinstance(distribution, tf.distribute.MultiWorkerMirroredStrategy):
self.skipTest("b/172032817")
with distribution.scope():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile("sgd", "mse", steps_per_execution=500)
x, y = np.ones((100, 10)), np.ones((100, 1))
bc = BatchCountingCB()
model.fit(x, y, batch_size=2, epochs=2, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 0])
self.assertEqual(bc.train_end_batches, [49, 49])
x, y = np.ones((50, 10)), np.ones((50, 1))
model.evaluate(x, y, batch_size=2, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0])
self.assertEqual(bc.test_end_batches, [24])
x = np.ones((50, 10))
model.predict(x, batch_size=2, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0])
self.assertEqual(bc.predict_end_batches, [24])
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_gradient_clipping(self, distribution):
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = tf.Variable(1.0)
self.v2 = tf.Variable(1.0)
def call(self, x):
return 3 * self.v1 - 3 * self.v2
x, y = np.ones((10, 1)), np.ones((10, 1))
with distribution.scope():
layer = MyLayer()
model = keras.Sequential([layer])
optimizer = gradient_descent_keras.SGD(
1.0, clipnorm=2.0, clipvalue=2.0
)
model.compile(optimizer, "mae")
if isinstance(
distribution,
(
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
),
):
with self.assertRaisesRegex(ValueError, "not supported"):
model.fit(x, y, batch_size=10, epochs=1)
else:
model.fit(x, y, batch_size=10, epochs=1)
self.assertAllClose(self.evaluate(layer.v1), 3.0)
self.assertAllClose(self.evaluate(layer.v2), -1.0)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_custom_gradient_transformation(self, distribution):
if isinstance(
distribution,
(
tf.distribute.experimental.CentralStorageStrategy,
tf.compat.v1.distribute.experimental.CentralStorageStrategy,
),
):
self.skipTest("Not supported with `CentralStorageStrategy`")
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = tf.Variable(1.0)
self.v2 = tf.Variable(-1.0)
def call(self, x):
return x + self.v1 + self.v2
def custom_transform(grads_and_vars):
# Always set gradients to 1.
return [(tf.ones_like(g), v) for g, v in grads_and_vars]
x, y = np.ones((10, 1)), np.ones((10, 1))
with distribution.scope():
layer = MyLayer()
model = keras.Sequential([layer])
optimizer = gradient_descent_keras.SGD(
1.0, gradient_transformers=[custom_transform]
)
model.compile(optimizer, "mae")
model.fit(x, y, batch_size=10, epochs=1)
self.assertAllClose(self.evaluate(layer.v1), 0.0)
self.assertAllClose(self.evaluate(layer.v2), -2.0)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
all_strategy_combinations_minus_default()
)
)
def test_distribution_strategy_one_dimensional(self, distribution):
with distribution.scope():
inp = keras.layers.Input(shape=(10,))
out = keras.layers.Dense(3, activation="softmax")(inp)
model = keras.Model(inputs=[inp], outputs=[out])
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
x = np.random.random((64, 10)).astype("float32")
y = np.random.randint(3, size=64)
model.fit(x, y, epochs=1, steps_per_epoch=2)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["graph", "eager"],
reduction=[
losses_utils.ReductionV2.AUTO,
losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
losses_utils.ReductionV2.SUM,
],
)
)
def test_distribution_strategy_with_loss_reduction_types(
self, distribution, reduction
):
np.random.seed(_RANDOM_SEED)
def _get_model():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer="zeros")(inputs)
x2 = keras.layers.Dense(10, kernel_initializer="zeros")(x1)
outputs = keras.layers.Dense(1, kernel_initializer="zeros")(x2)
model = keras.Model(inputs, outputs)
return model
x = np.random.random((64, 10))
y = np.random.random((64, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(32)
model = _get_model()
model.compile(
"sgd", loss=keras.losses.MeanSquaredError(reduction=reduction)
)
history = model.fit(dataset, steps_per_epoch=2, epochs=1, shuffle=False)
with distribution.scope():
ds_model = _get_model()
ds_model.compile(
"sgd", loss=keras.losses.MeanSquaredError(reduction=reduction)
)
ds_history = ds_model.fit(
dataset, steps_per_epoch=2, epochs=1, shuffle=False
)
self.assertArrayNear(
history.history["loss"], ds_history.history["loss"], 1e-5
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
all_strategy_combinations_minus_default()
)
)
def test_distribution_strategy_with_symbolic_add_loss(
self, mode, distribution
):
def _make_model_with_add_loss():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer="zeros")(inputs)
x2 = keras.layers.Dense(10, kernel_initializer="zeros")(x1)
outputs = keras.layers.Dense(1, kernel_initializer="zeros")(x2)
model = keras.Model(inputs, outputs)
model.add_loss(tf.reduce_mean(x1))
model.add_loss(tf.reduce_mean(outputs))
return model
x = np.ones((64, 10)).astype("float32")
model = _make_model_with_add_loss()
model.compile("sgd")
history = model.fit(x, epochs=1)
with distribution.scope():
ds_model = _make_model_with_add_loss()
ds_model.compile("sgd")
ds_history = ds_model.fit(x, epochs=1)
self.assertAllClose(history.history, ds_history.history)
# TODO(omalleyt): Investigate flakiness and re-enable.
@tf.__internal__.distribute.combinations.generate(
all_strategy_minus_default_and_tpu_combinations()
)
def test_distribution_strategy_with_callable_add_loss(self, distribution):
def _make_model():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer="zeros")(inputs)
x2 = keras.layers.Dense(10, kernel_initializer="zeros")(x1)
d = keras.layers.Dense(1, kernel_initializer="zeros")
outputs = d(x2)
model = keras.Model(inputs, outputs)
model.add_loss(lambda: 100.0 * tf.reduce_mean(d.kernel))
return model
x = np.ones((64, 10)).astype("float32")
y = np.ones((64, 1)).astype("float32")
model = _make_model()
self.assertLen(model.losses, 1)
model.compile("sgd", "mse")
history = model.fit(x, y, steps_per_epoch=2, epochs=1)
with distribution.scope():
ds_model = _make_model()
self.assertLen(ds_model.losses, 1)
ds_model.compile("sgd", "mse")
ds_history = ds_model.fit(x, y, steps_per_epoch=2, epochs=1)
self.assertAllClose(history.history, ds_history.history)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
all_strategy_minus_default_and_tpu_combinations()
)
)
def test_distribution_strategy_with_add_metric_in_call(self, distribution):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(
name="bias", initializer="zeros", shape=()
)
def call(self, inputs):
self.add_metric(
tf.reduce_mean(inputs), name="bias", aggregation="mean"
)
return inputs + self.bias
def _make_model_with_add_metric():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer="zeros")(inputs)
x2 = Bias()(x1)
outputs = keras.layers.Dense(1, kernel_initializer="zeros")(x2)
model = keras.Model(inputs, outputs)
return model
x = np.ones((64, 10)).astype("float32")
y = np.ones((64, 1)).astype("float32")
model = _make_model_with_add_metric()
self.assertLen(model.metrics, 1)
model.compile("sgd", "mse")
history = model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2
)
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile("sgd", "mse")
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2
)
# includes stateful loss metric in eager.
metrics_len = 2 if tf.executing_eagerly() else 1
self.assertLen(ds_model.metrics, metrics_len)
self.assertAllClose(history.history, ds_history.history)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)
)
def test_distribution_strategy_with_add_metric_object(self, distribution):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(
name="bias", initializer="zeros", shape=()
)
self.mean = keras.metrics.Mean(name="mean")
def call(self, inputs):
self.add_metric(self.mean(inputs))
return inputs + self.bias
def _make_model_with_add_metric_object():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer="zeros")(inputs)
x2 = Bias()(x1)
outputs = keras.layers.Dense(1, kernel_initializer="zeros")(x2)
model = keras.Model(inputs, outputs)
return model
x = np.ones((64, 10)).astype("float32")
y = np.ones((64, 1)).astype("float32")
model = _make_model_with_add_metric_object()
self.assertLen(model.metrics, 1)
model.compile("sgd", "mse")
history = model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2
)
with distribution.scope():
ds_model = _make_model_with_add_metric_object()
self.assertLen(ds_model.metrics, 1)
ds_model.compile("sgd", "mse")
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2
)
# includes stateful loss metric in eager.
metrics_len = 2 if tf.executing_eagerly() else 1
self.assertLen(ds_model.metrics, metrics_len)
self.assertAllClose(history.history, ds_history.history)
@tf.__internal__.distribute.combinations.generate(
# TODO(phillypham): Why does validation_steps > 1 not work on TPUs?
tf.__internal__.test.combinations.times(
all_strategy_minus_default_and_tpu_combinations()
)
)
def test_distribution_strategy_with_add_metric_outside_call(
self, distribution
):
def _make_model_with_add_metric():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer="zeros")(inputs)
outputs = keras.layers.Dense(1, kernel_initializer="zeros")(x1)
model = keras.Model(inputs, outputs)
model.add_metric(
tf.reduce_mean(x1), name="mid_mean", aggregation="mean"
)
return model
x = np.ones((64, 10)).astype("float32")
y = np.ones((64, 1)).astype("float32")
model = _make_model_with_add_metric()
self.assertLen(model.metrics, 1)
model.compile("sgd", "mse")
history = model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2
)
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile("sgd", "mse")
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2
)
# includes stateful loss metric in eager.
metrics_len = 2 if tf.executing_eagerly() else 1
self.assertLen(ds_model.metrics, metrics_len)
self.assertAllClose(history.history, ds_history.history)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategies_minus_tpu
+ multi_worker_mirrored_strategies,
mode=["eager"],
)
)
def test_sparse_tensor_outputs(self, distribution):
class ToSparse(keras.layers.Layer):
"""Create a sparse tensor based on a given dense tensor."""
def call(self, inputs):
indices = tf.where(tf.not_equal(inputs, 0))
values = tf.gather_nd(inputs, indices)
shape = tf.shape(inputs, out_type="int64")
return tf.SparseTensor(indices, values, dense_shape=shape)
model = keras.Sequential([ToSparse()])
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
output = model.predict(input_data, batch_size=2)
expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
expected_values = np.array([1, 2, 3])
expected_dense_shape = np.array([2, 3])
self.assertAllEqual(output.indices, expected_indices)
self.assertAllEqual(output.values, expected_values)
self.assertAllEqual(output.dense_shape, expected_dense_shape)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategies_minus_tpu
+ multi_worker_mirrored_strategies,
mode=["eager"],
)
)
def test_ragged_tensor_outputs(self, distribution):
class ToRagged(keras.layers.Layer):
"""Create a ragged tensor based on a given dense tensor."""
def __init__(self, padding, ragged_rank=1, **kwargs):
super().__init__(**kwargs)
self._padding = padding
self._ragged_rank = ragged_rank
def call(self, inputs):
return tf.RaggedTensor.from_tensor(
inputs, padding=self._padding, ragged_rank=self._ragged_rank
)
model = keras.Sequential([ToRagged(padding=0)])
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
output = model.predict(input_data, batch_size=2)
expected_values = [[1], [2, 3]]
self.assertAllEqual(expected_values, output)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategies_minus_default_minus_tpu
+ tpu_strategies
+ multi_worker_mirrored_strategies,
mode=["eager"],
)
)
def test_correctness_of_add_loss_with_merge_call(self, distribution):
batch_size = 32
def _get_model():
inputs = keras.layers.Input(shape=(1,))
labels = keras.layers.Input(shape=(1,))
x = keras.layers.Dense(10, activation="relu")(inputs)
y = keras.layers.Dense(1)(x)
model = keras.models.Model([inputs, labels], y)
model.add_loss(keras.losses.mean_squared_error(labels, y))
return model
def _get_data():
x_train = np.random.rand(64, 1)
y_train = 3 * x_train
x_train = x_train.astype("float32")
y_train = y_train.astype("float32")
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(batch_size)
return dataset
with distribution.scope():
model = _get_model()
optimizer = gradient_descent_keras.SGD(0.2)
@tf.function
def train_step(dist_inputs):
def step_fn(inputs):
with tf.GradientTape() as tape:
logits = model(inputs)
# Invoke a merge_call()
tf.distribute.get_replica_context().merge_call(
lambda d: None
)
# Verify that there is only one loss on the model.
assert len(model.losses) == 1
loss_from_model = (
tf.reduce_sum(model.losses) * 1.0 / batch_size
)
# Compute loss in this loop.
loss = keras.losses.mean_squared_error(
inputs[1], logits
)
loss = tf.nn.compute_average_loss(
loss, global_batch_size=batch_size
)
# Verify that the loss computed in this loop is
# equivalent to the loss from the model that was added
# via add_loss.
tf.compat.v1.assert_equal(loss, loss_from_model)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(
zip(grads, model.trainable_variables)
)
return loss
per_replica_losses = distribution.run(
step_fn, args=(dist_inputs,)
)
return distribution.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
dataset = distribution.experimental_distribute_dataset(_get_data())
for _ in range(2):
for x in dataset:
train_step(x)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["graph", "eager"])
)
def test_unimplemented_parameter_server_strategy(self):
cluster_spec = multi_worker_testing_utils.create_in_process_cluster(
num_workers=3, num_ps=2
)
cluster_resolver = SimpleClusterResolver(
cluster_spec=tf.train.ClusterSpec(cluster_spec),
task_type="worker",
task_id=1,
num_accelerators={"GPU": 0},
)
distribution = (
tf.compat.v1.distribute.experimental.ParameterServerStrategy(
cluster_resolver
)
)
self.assertIsInstance(
distribution,
tf.compat.v1.distribute.experimental.ParameterServerStrategy,
)
with self.assertRaisesRegex(
NotImplementedError, "ParameterServerStrategy*"
):
with distribution.scope():
model = simple_sequential_model()
optimizer = tf.compat.v1.train.RMSPropOptimizer(
learning_rate=0.001
)
loss = "mse"
model.compile(optimizer, loss)
# Models to exercise inserting ancillary layers with add_loss and add_metric.
def _functional_with_add_loss_and_metric(input_shape, num_classes, l1, l2):
inputs = keras.Input(input_shape, name="images")
x = keras.layers.Conv2D(32, kernel_size=5, activation="relu")(inputs)
x = keras.layers.MaxPooling2D(pool_size=2)(x)
x = keras.layers.Conv2D(64, kernel_size=5, activation="relu")(x)
x = keras.layers.MaxPooling2D(pool_size=2)(x)
# Apply L2 regularization to embedding. Use a mix of TensorFlow ops and
# layers to exercise all code paths.
x = keras.layers.Flatten(name="embedding")(x)
l2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(x), -1))
# Apply L1 regularization to next layer.
x = keras.layers.Dense(1024, activation="relu", name="sparse_embedding")(x)
l1_loss = keras.layers.Lambda(
lambda x: tf.reduce_mean(tf.reduce_sum(x, -1)), name="l1_loss"
)(x)
outputs = keras.layers.Dense(num_classes, name="logits")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Weight regularization terms.
model.add_loss(keras.layers.Lambda(lambda x: x * l2)(l2_loss))
model.add_metric(l2_loss, aggregation="mean", name="l2_loss")
model.add_loss(l1_loss * l1)
model.add_metric(l1_loss, aggregation="mean", name="l1_loss")
return model
def _sequential_with_add_loss_and_metric(input_shape, num_classes, l1, l2):
model = keras.Sequential(
[
keras.layers.Conv2D(
32, kernel_size=5, activation="relu", input_shape=input_shape
),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(64, kernel_size=5, activation="relu"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(name="embedding"),
keras.layers.Dense(
1024, activation="relu", name="sparse_embedding"
),
keras.layers.Dense(num_classes, name="logits"),
]
)
# Extract layer outputs, add regularization terms, and rescale the metric.
# Use a mix of TensorFlow ops and layers to exercise all code paths.
x = model.get_layer("sparse_embedding").get_output_at(-1)
l1_loss = l1 * tf.reduce_mean(tf.reduce_sum(x, -1))
model.add_loss(l1_loss)
model.add_metric(
keras.layers.Lambda(lambda x: tf.divide(x, l1))(l1_loss),
aggregation="mean",
name="l1_loss",
)
x = model.get_layer("embedding").get_output_at(-1)
l2_loss = keras.layers.Lambda(
lambda x: l2 * tf.reduce_mean(tf.reduce_sum(x * x, -1)), name="l2_loss"
)(x)
model.add_loss(l2_loss)
model.add_metric(l2_loss / l2, aggregation="mean", name="l2_loss")
return model
def _functional_with_layer_reuse(input_shape, num_classes, l1, l2):
base_model = keras.Sequential(
[
keras.layers.Conv2D(
32, kernel_size=5, activation="relu", input_shape=input_shape
),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(64, kernel_size=5, activation="relu"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(1024, activation="relu"),
keras.layers.Dense(num_classes, name="logits"),
]
)
inputs = keras.Input(input_shape, name="images")
logits = base_model(inputs)
model = keras.Model(inputs=inputs, outputs=logits)
# Reuse sequential layer and create new nodes.
zero_logits = base_model(tf.zeros_like(inputs))
one_logits = base_model(tf.ones_like(inputs))
# L2 loss.
l2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(logits - zero_logits), -1))
model.add_loss(l2_loss * l2)
model.add_metric(l2_loss, aggregation="mean", name="l2_loss")
# L1 loss.
l1_loss = tf.reduce_mean(tf.reduce_sum(tf.abs(logits - one_logits), -1))
model.add_loss(l1_loss * l1)
model.add_metric(l1_loss, aggregation="mean", name="l1_loss")
return model
class TestDistributionStrategyWithMultipleAddLossAndMetricCalls(
tf.test.TestCase, parameterized.TestCase
):
"""Tests complex models with multiple add loss and metric calls."""
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
all_strategy_combinations_minus_default(),
tf.__internal__.test.combinations.combine(
model_fn=[
_functional_with_add_loss_and_metric,
_sequential_with_add_loss_and_metric,
_functional_with_layer_reuse,
],
l1=[0.01],
l2=[0.1],
),
)
)
def test_fit_and_evaluate(self, distribution, model_fn, l1, l2):
# Make fake MNIST-like image data.
np.random.seed(_RANDOM_SEED)
dataset = tf.data.Dataset.from_tensor_slices(
(
np.random.uniform(size=(64, 28, 28, 1)).astype(np.float32),
np.random.randint(0, 10, size=(64,)),
)
)
dataset = dataset.shuffle(64).batch(
8 * distribution.num_replicas_in_sync, drop_remainder=True
)
# Make model with distribution strategy and initialize with dataset
# shape.
input_shape = tf.data.experimental.get_structure(dataset)[0].shape[1:]
with distribution.scope():
model = model_fn(input_shape, 10, l1, l2)
model.compile(
optimizer=keras.optimizers.adam_legacy.Adam(1e-4),
loss=keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
),
metrics=[
keras.metrics.SparseCategoricalAccuracy(),
keras.metrics.SparseCategoricalCrossentropy(
from_logits=True
),
],
)
# Non-eager training doesn't support steps_per_epoch=None.
for unused_epoch in range(2):
model.fit(dataset)
results = dict(zip(model.metrics_names, model.evaluate(dataset)))
# Sanity checks.
self.assertBetween(results["sparse_categorical_accuracy"], 0.02, 1.0)
self.assertGreater(results["l2_loss"], 0.0)
self.assertGreater(results["l1_loss"], 0.0)
# Assert correctness of the loss calculation and updating of metrics.
self.assertNear(
results["l1_loss"] * l1
+ results["l2_loss"] * l2
+ results["sparse_categorical_crossentropy"],
results["loss"],
1e-6,
)
class DeterministicModel(keras.Model):
"""Deterministic Model that always outputs the same initial result.
It verifies the `call` method is run inside the same distribution
strategy that the model was initially passed.
"""
def __init__(self, strategy):
super().__init__()
self.x = None
self.strategy = strategy
def build(self, input_shape):
self.x = tf.Variable(tf.ones(shape=()))
def call(self, inputs, training=None, mask=None):
active_strategy = tf.distribute.get_strategy()
if active_strategy is not self.strategy:
raise ValueError("Model must execute call w/ the original strategy")
return self.x * inputs
class TestModelCapturesStrategy(tf.test.TestCase, parameterized.TestCase):
"""Tests that model creation captures the strategy."""
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_fit_and_evaluate(self, distribution):
dataset = tf.data.Dataset.from_tensor_slices(
(tf.ones(shape=(64,)), tf.ones(shape=(64,)))
)
dataset = dataset.batch(8 * distribution.num_replicas_in_sync)
# Make model with distribution strategy
with distribution.scope():
model = DeterministicModel(distribution)
optimizer = keras.optimizers.adam_legacy.Adam(1e-4)
# Compile & evaluate the model outside of the distribution strategy
# scope
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=["binary_accuracy"],
)
# Call `optimizer.iterations` out of strategy scope.
self.assertEqual(model.optimizer.iterations.numpy(), 0)
# Non-eager training doesn't support steps_per_epoch=None.
for unused_epoch in range(2):
model.fit(dataset)
results = model.evaluate(dataset)
results = dict(zip(model.metrics_names, results))
# Check that the metrics have a result we expect
self.assertEqual(results["binary_accuracy"], 1.0)
self.assertAllClose(results["loss"], 0.0)
# Assert that all metric/optimizer/model variables were made in the
# distribution strategy (Test that compile uses the captured
# distribution strategy)
metric_vars = tf.nest.flatten(
[metric.variables for metric in model.metrics]
)
for var in metric_vars:
self.assertTrue(
distribution.extended.variable_created_in_scope(var)
)
for var in model.optimizer._weights:
self.assertTrue(
distribution.extended.variable_created_in_scope(var)
)
for var in model.variables:
self.assertTrue(
distribution.extended.variable_created_in_scope(var)
)
# Make sure the metric must be created in the same scope as the model:
# This shouldn't raise any validation errors
with distribution.scope():
metric = keras.metrics.BinaryAccuracy()
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[metric],
)
# This should raise an error because the metric is constructed
# outside of the scope, and not by compile
if tf.distribute.has_strategy():
with self.assertRaisesRegex(
ValueError, "All metrics must be created in"
):
model.compile(
optimizer=keras.optimizers.adam_v2.Adam(1e-4),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.BinaryAccuracy()],
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu, # noqa: E501
mode=["eager"],
)
)
def test_optimizer(self, distribution):
temp_dir = os.path.join(self.get_temp_dir(), "ckpt")
def create_model():
model = keras.models.Sequential(
[
keras.layers.Dense(1),
]
)
model.compile(optimizer=keras.optimizers.Adam(), loss="mse")
model.build([None, 1]) # create weights.
return model
model = create_model()
x = y = tf.ones(shape=(1, 1))
model.fit(x=x, y=y, batch_size=1)
model.save_weights(temp_dir)
with distribution.scope():
model = create_model()
model.load_weights(temp_dir)
if isinstance(model.optimizer, optimizer_base.Optimizer):
model.optimizer.build(model.trainable_variables)
variables = model.optimizer.variables
else:
variables = model.optimizer.variables()
self.assertNotEmpty(variables)
self.assertTrue(
distributed_training_utils.is_distributed_variable(variables[0])
)
with distribution.scope():
model = create_model()
# create/restore slot variables outside of scope is fine.
model.load_weights(temp_dir)
if isinstance(model.optimizer, optimizer_base.Optimizer):
# V3 optimizer has to restore variables in scope.
return
# From this point on, the optimizer must be a V2 optimizer.
self.assertNotEmpty(model.optimizer.variables())
self.assertTrue(
distributed_training_utils.is_distributed_variable(
model.optimizer.variables()[0]
)
)
if __name__ == "__main__":
base_layer_utils.enable_v2_dtype_behavior()
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/distribute/distribute_strategy_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/distribute_strategy_test.py",
"repo_id": "tf-keras",
"token_count": 59914
} | 159 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateful tf.keras LSTM models using DistributionStrategy."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.distribute import keras_correctness_test_base
from tf_keras.optimizers.legacy import (
gradient_descent as gradient_descent_keras,
)
def strategies_for_stateful_embedding_model():
"""Returns TPUStrategy with single core device assignment."""
return [
tf.__internal__.distribute.combinations.tpu_strategy_one_core,
]
def test_combinations_for_stateful_embedding_model():
return tf.__internal__.test.combinations.combine(
distribution=strategies_for_stateful_embedding_model(),
mode="graph",
use_numpy=False,
use_validation_data=False,
)
class DistributionStrategyStatefulLstmModelCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase # noqa: E501
):
def get_model(
self,
max_words=10,
initial_weights=None,
distribution=None,
input_shapes=None,
):
del input_shapes
batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids = keras.layers.Input(
shape=(max_words,),
batch_size=batch_size,
dtype=np.int32,
name="words",
)
word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(
word_ids
)
lstm_embed = keras.layers.LSTM(
units=4, return_sequences=False, stateful=True
)(word_embed)
preds = keras.layers.Dense(2, activation="softmax")(lstm_embed)
model = keras.Model(inputs=[word_ids], outputs=[preds])
if initial_weights:
model.set_weights(initial_weights)
optimizer_fn = gradient_descent_keras.SGD
model.compile(
optimizer=optimizer_fn(learning_rate=0.1),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
# TODO(jhseu): Disabled to fix b/130808953. Need to investigate why it
# doesn't work and enable for DistributionStrategy more generally.
@tf.__internal__.distribute.combinations.generate(
test_combinations_for_stateful_embedding_model()
)
def disabled_test_stateful_lstm_model_correctness(
self, distribution, use_numpy, use_validation_data
):
self.run_correctness_test(
distribution, use_numpy, use_validation_data, is_stateful_model=True
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
)
)
def test_incorrectly_use_multiple_cores_for_stateful_lstm_model(
self, distribution, use_numpy, use_validation_data
):
with self.assertRaisesRegex(
ValueError, "not yet supported with tf.distribute.Strategy"
):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True,
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/distribute/keras_stateful_lstm_model_correctness_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/keras_stateful_lstm_model_correctness_test.py",
"repo_id": "tf-keras",
"token_count": 1746
} | 160 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing saving/loading with DS."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.distribute import model_combinations
_RANDOM_SEED = 1337
_DEFAULT_FUNCTION_KEY = "serving_default"
_TOLERANCE = 1e-30
# TPU uses bfloat16 for computation in hardware underlying, so it has less
# precision than CPU/GPU.
_TPU_TOLERANCE = 1e-7
PREDICT_STEPS = 1
simple_models = [
model_combinations.simple_functional_model,
model_combinations.simple_sequential_model,
model_combinations.simple_subclass_model,
]
strategies = [
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations.tpu_strategy,
tf.__internal__.distribute.combinations.tpu_strategy_packed_var,
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
]
def simple_models_with_strategies():
return tf.__internal__.test.combinations.combine(
model_and_input=simple_models, distribution=strategies, mode=["eager"]
)
def simple_models_with_strategy_pairs():
return tf.__internal__.test.combinations.combine(
model_and_input=simple_models,
distribution_for_saving=strategies,
distribution_for_restoring=strategies,
mode=["eager"],
)
def tfmodule_models_with_strategies():
return tf.__internal__.test.combinations.combine(
model_and_input=[model_combinations.simple_tfmodule_model],
distribution=strategies,
mode=["eager"],
)
def tfmodule_models_with_strategy_pairs():
return tf.__internal__.test.combinations.combine(
model_and_input=[model_combinations.simple_tfmodule_model],
distribution_for_saving=strategies,
distribution_for_restoring=strategies,
mode=["eager"],
)
def load_and_run_with_saved_model_api(
distribution, saved_dir, predict_dataset, output_name
):
"""Loads a saved_model using tf.saved_model API, and runs it."""
func = tf.saved_model.load(saved_dir)
if distribution:
dist_predict_dataset = distribution.experimental_distribute_dataset(
predict_dataset
)
per_replica_predict_data = next(iter(dist_predict_dataset))
result = distribution.run(
func.signatures[_DEFAULT_FUNCTION_KEY],
args=(per_replica_predict_data,),
)
result = result[output_name]
# Convert the per_replica value to a list, then concatenate them
reduced = distribution.experimental_local_results(result)
concat = tf.concat(reduced, 0)
return concat
else:
result = func.signatures[_DEFAULT_FUNCTION_KEY](
next(iter(predict_dataset))
)
return result[output_name]
class TestSavedModelBase(tf.test.TestCase, parameterized.TestCase):
"""Base class for testing saving/loading with DS."""
def setUp(self):
np.random.seed(_RANDOM_SEED)
tf.compat.v1.set_random_seed(_RANDOM_SEED)
self._root_dir = "base"
super().setUp()
def _save_model(self, model, saved_dir):
"""Save the given model to the given saved_dir.
This method needs to be implemented by the subclasses.
Args:
model: a keras model object to save.
saved_dir: a string representing the path to save the keras model
"""
raise NotImplementedError("must be implemented in descendants")
def _load_and_run_model(
self, distribution, saved_dir, predict_dataset, output_name="output_1"
):
"""Load the model and run 1 step of predict with it.
This method must be implemented by the subclasses.
Args:
distribution: the distribution strategy used to load the model. None
if no distribution strategy is used
saved_dir: the string representing the path where the model is saved.
predict_dataset: the data used to do the predict on the model for
cross_replica context.
output_name: the string representing the name of the output layer of
the model.
"""
raise NotImplementedError("must be implemented in descendants")
def _train_model(self, model, x_train, y_train, batch_size):
training_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)
)
training_dataset = training_dataset.repeat()
training_dataset = training_dataset.batch(batch_size)
# Train the model for 1 epoch
model.fit(x=training_dataset, epochs=1, steps_per_epoch=100)
def _predict_with_model(self, distribution, model, predict_dataset):
return model.predict(predict_dataset, steps=PREDICT_STEPS)
def _get_predict_dataset(self, x_predict, batch_size):
predict_dataset = tf.data.Dataset.from_tensor_slices(x_predict)
predict_dataset = predict_dataset.repeat()
predict_dataset = predict_dataset.batch(batch_size)
return predict_dataset
def run_test_save_no_strategy_restore_strategy(
self, model_and_input, distribution
):
"""Save a model without DS, and restore it with DS."""
saved_dir = os.path.join(self.get_temp_dir(), "0")
model = model_and_input.get_model()
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
predict_dataset = self._get_predict_dataset(x_predict, batch_size)
self._train_model(model, x_train, y_train, batch_size)
result_before_save = self._predict_with_model(
None, model, predict_dataset
)
self._save_model(model, saved_dir)
with distribution.scope():
result_after_save = self._load_and_run_model(
distribution=distribution,
saved_dir=saved_dir,
predict_dataset=predict_dataset,
)
self.assertAllClose(result_before_save, result_after_save)
def run_test_save_strategy_restore_no_strategy(
self, model_and_input, distribution, save_in_scope
):
"""Save a model with DS, and restore it without DS."""
saved_dir = os.path.join(self.get_temp_dir(), "1")
with distribution.scope():
model = model_and_input.get_model()
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
self._train_model(model, x_train, y_train, batch_size)
predict_dataset = self._get_predict_dataset(x_predict, batch_size)
result_before_save = self._predict_with_model(
distribution, model, predict_dataset
)
if save_in_scope:
with distribution.scope():
self._save_model(model, saved_dir)
else:
self._save_model(model, saved_dir)
load_result = self._load_and_run_model(
distribution=None,
saved_dir=saved_dir,
predict_dataset=predict_dataset,
)
self.assertAllClose(result_before_save, load_result)
def run_test_save_strategy_restore_strategy(
self,
model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope,
):
"""Save a model with DS, and restore it with potentially different
DS."""
saved_dir = os.path.join(self.get_temp_dir(), "2")
with distribution_for_saving.scope():
model = model_and_input.get_model()
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
self._train_model(model, x_train, y_train, batch_size)
predict_dataset = self._get_predict_dataset(x_predict, batch_size)
result_before_save = self._predict_with_model(
distribution_for_saving, model, predict_dataset
)
if save_in_scope:
with distribution_for_saving.scope():
self._save_model(model, saved_dir)
else:
self._save_model(model, saved_dir)
with distribution_for_restoring.scope():
load_result = self._load_and_run_model(
distribution=distribution_for_restoring,
saved_dir=saved_dir,
predict_dataset=predict_dataset,
)
self.assertAllClose(result_before_save, load_result)
def run_test_save_strategy(
self, model_and_input, distribution, save_in_scope
):
"""Save a model with DS."""
saved_dir = os.path.join(self.get_temp_dir(), "3")
with distribution.scope():
model = model_and_input.get_model()
x_train, y_train, _ = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
self._train_model(model, x_train, y_train, batch_size)
if save_in_scope:
with distribution.scope():
self._save_model(model, saved_dir)
else:
self._save_model(model, saved_dir)
return saved_dir
| tf-keras/tf_keras/distribute/saved_model_test_base.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/saved_model_test_base.py",
"repo_id": "tf-keras",
"token_count": 4348
} | 161 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import metrics
from tf_keras.dtensor import dtensor_api as dtensor
from tf_keras.dtensor import test_util
from tf_keras.utils import tf_utils
class MetricsTest(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
global_ids = test_util.create_device_ids_array((2, 2))
local_device_ids = np.ravel(global_ids).tolist()
mesh_dict = {
"CPU": dtensor.Mesh(
["X", "Y"],
global_ids,
local_device_ids,
test_util.create_device_list((2, 2), "CPU"),
)
}
self.mesh = self.configTestMesh(mesh_dict)
tf_utils.set_random_seed(1337)
@parameterized.parameters(
(metrics.Accuracy, {}),
(metrics.AUC, {}),
(metrics.BinaryAccuracy, {}),
(metrics.BinaryCrossentropy, {}),
(metrics.BinaryIoU, {}),
(metrics.CategoricalAccuracy, {}),
(metrics.CategoricalCrossentropy, {}),
(metrics.CategoricalHinge, {}),
(metrics.CosineSimilarity, {}),
(metrics.FalseNegatives, {}),
(metrics.FalsePositives, {}),
(metrics.Hinge, {}),
(metrics.IoU, {"num_classes": 3, "target_class_ids": [1]}),
(metrics.KLDivergence, {}),
(metrics.LogCoshError, {}),
(metrics.Mean, {}),
(metrics.MeanAbsoluteError, {}),
(metrics.MeanAbsolutePercentageError, {}),
(metrics.MeanIoU, {"num_classes": 3}),
(metrics.MeanRelativeError, {"normalizer": [1, 3, 2, 3]}),
(metrics.MeanSquaredError, {}),
(metrics.MeanSquaredLogarithmicError, {}),
(metrics.OneHotIoU, {"num_classes": 3, "target_class_ids": [1]}),
(metrics.OneHotMeanIoU, {"num_classes": 3}),
(metrics.Poisson, {}),
(metrics.Precision, {}),
(metrics.PrecisionAtRecall, {"recall": 0.5}),
(metrics.Recall, {}),
(metrics.RecallAtPrecision, {"precision": 0.5}),
(metrics.RootMeanSquaredError, {}),
(metrics.SensitivityAtSpecificity, {"specificity": 0.5}),
(metrics.SparseCategoricalAccuracy, {}),
(metrics.SparseCategoricalCrossentropy, {}),
(metrics.SparseTopKCategoricalAccuracy, {}),
(metrics.SpecificityAtSensitivity, {"sensitivity": 0.5}),
(metrics.SquaredHinge, {}),
(metrics.Sum, {}),
(metrics.TopKCategoricalAccuracy, {}),
(metrics.TrueNegatives, {}),
(metrics.TruePositives, {}),
)
def test_metric_layout(self, metric_cls, init_args):
metric = metric_cls(**init_args, mesh=self.mesh)
for weight in metric.non_trainable_weights:
self.assertIsInstance(weight, dtensor.DVariable)
self.assertTrue(weight.layout.is_fully_replicated())
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/dtensor/metrics_test.py/0 | {
"file_path": "tf-keras/tf_keras/dtensor/metrics_test.py",
"repo_id": "tf-keras",
"token_count": 1571
} | 162 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras' base preprocessing layer."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.engine import base_preprocessing_layer
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# Define a test-only implementation of BasePreprocessingLayer to validate
# its correctness directly.
class AddingPreprocessingLayer(base_preprocessing_layer.PreprocessingLayer):
def build(self, input_shape):
super().build(input_shape)
self.sum = tf.Variable(0.0, dtype=tf.float32)
def update_state(self, data):
self.sum.assign_add(tf.reduce_sum(tf.cast(data, tf.float32)))
def reset_state(self):
self.sum.assign(0.0)
def set_total(self, sum_value):
"""This is an example of how a subclass would implement a direct setter.
Args:
sum_value: The total to set.
"""
self.sum.assign(sum_value)
def call(self, inputs):
return inputs + self.sum
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class PreprocessingLayerTest(test_combinations.TestCase):
def test_adapt_bad_input_fails(self):
"""Test that non-Dataset/Numpy inputs cause a reasonable error."""
input_dataset = {"foo": 0}
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(
ValueError, "Failed to find data adapter"
):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(ValueError, "requires a"):
layer.adapt(input_dataset)
def test_adapt_infinite_dataset_fails(self):
"""Test that preproc layers fail if an infinite dataset is passed."""
input_dataset = tf.data.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]])
).repeat()
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(ValueError, "infinite dataset"):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(
ValueError, ".*infinite number of elements.*"
):
layer.adapt(input_dataset)
def test_setter_update(self):
"""Test the prototyped setter method."""
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
layer.set_total(15)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
def test_pre_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
def test_post_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
def test_pre_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = tf.data.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]])
)
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
def test_post_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = tf.data.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]])
)
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
def test_weight_based_state_transfer(self):
"""Test that preproc layers can transfer state via get/set weights.."""
def get_model():
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])
model, layer = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
# Create a new model and verify it has no state carryover.
weights = model.get_weights()
model_2, _ = get_model()
self.assertAllEqual([[1], [2], [3]], model_2.predict([1.0, 2.0, 3.0]))
# Transfer state from model to model_2 via get/set weights.
model_2.set_weights(weights)
self.assertAllEqual(
[[16], [17], [18]], model_2.predict([1.0, 2.0, 3.0])
)
def test_loading_without_providing_class_fails(self):
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.variables_initializer(model.variables))
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
with self.assertRaisesRegex(
ValueError, "Unknown layer: 'AddingPreprocessingLayer'"
):
_ = keras.models.load_model(output_path)
def test_adapt_sets_input_shape_rank(self):
"""Check that `.adapt()` sets the `input_shape`'s rank."""
# Shape: (3,1,2)
adapt_dataset = np.array(
[[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]], dtype=np.float32
)
layer = AddingPreprocessingLayer()
layer.adapt(adapt_dataset)
input_dataset = np.array(
[[[1.0, 2.0], [3.0, 4.0]], [[3.0, 4.0], [5.0, 6.0]]],
dtype=np.float32,
)
layer(input_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, None, None))
def test_adapt_doesnt_overwrite_input_shape(self):
"""Check that `.adapt()` doesn't change the `input_shape`."""
# Shape: (3, 1, 2)
adapt_dataset = np.array(
[[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]], dtype=np.float32
)
layer = AddingPreprocessingLayer(input_shape=[1, 2])
layer.adapt(adapt_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, 1, 2))
class PreprocessingLayerV1Test(test_combinations.TestCase):
def test_adapt_fails(self):
"""Test that calling adapt leads to a runtime error."""
input_dataset = {"foo": 0}
with tf.Graph().as_default():
layer = AddingPreprocessingLayer()
with self.assertRaisesRegex(
RuntimeError, "`adapt` is only supported in tensorflow v2"
):
layer.adapt(input_dataset)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/base_preprocessing_layer_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/base_preprocessing_layer_test.py",
"repo_id": "tf-keras",
"token_count": 3991
} | 163 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""InputSpec tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tf_keras.engine import input_spec
class InputSpecTest(tf.test.TestCase):
def test_axes_initialization(self):
input_spec.InputSpec(shape=[1, None, 2, 3], axes={3: 5, "2": 2})
with self.assertRaisesRegex(ValueError, "Axis 4 is greater than"):
input_spec.InputSpec(shape=[1, None, 2, 3], axes={4: 5})
with self.assertRaisesRegex(
TypeError, "Argument `axes` must be a dict"
):
input_spec.InputSpec(shape=[1, None, 2, 3], axes={"string": 5})
class InputSpecToTensorShapeTest(tf.test.TestCase):
def test_defined_shape(self):
spec = input_spec.InputSpec(shape=[1, None, 2, 3])
self.assertAllEqual(
[1, None, 2, 3], input_spec.to_tensor_shape(spec).as_list()
)
def test_defined_ndims(self):
spec = input_spec.InputSpec(ndim=5)
self.assertAllEqual(
[None] * 5, input_spec.to_tensor_shape(spec).as_list()
)
spec = input_spec.InputSpec(ndim=0)
self.assertAllEqual([], input_spec.to_tensor_shape(spec).as_list())
spec = input_spec.InputSpec(ndim=3, axes={1: 3, -1: 2})
self.assertAllEqual(
[None, 3, 2], input_spec.to_tensor_shape(spec).as_list()
)
def test_undefined_shapes(self):
spec = input_spec.InputSpec(max_ndim=5)
with self.assertRaisesRegex(ValueError, "unknown TensorShape"):
input_spec.to_tensor_shape(spec).as_list()
spec = input_spec.InputSpec(min_ndim=5, max_ndim=5)
with self.assertRaisesRegex(ValueError, "unknown TensorShape"):
input_spec.to_tensor_shape(spec).as_list()
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/input_spec_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/input_spec_test.py",
"repo_id": "tf-keras",
"token_count": 1006
} | 164 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras training and evaluation routines for eager execution."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import training_utils
from tf_keras.engine import training_utils_v1
from tf_keras.mixed_precision import loss_scale_optimizer
from tf_keras.utils import losses_utils
# isort: off
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.platform import tf_logging as logging
def _eager_loss_fn(outputs, targets, loss_fn, output_name):
with backend.name_scope(output_name + "_loss"):
loss = loss_fn(targets, outputs)
return loss
def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None):
"""Calculates the metrics for each output of the given model.
Args:
model: The model on which metrics are being calculated.
outputs: The outputs of the given model.
targets: The predictions or targets of the given model.
sample_weights: Optional list of sample weights for each output.
masks: Optional list of masks for each output.
Returns:
Returns the metric results for each output of the model.
"""
outputs = tf.nest.flatten(outputs)
targets = tf.nest.flatten(targets)
# Invoke all(weighted and unweighted) metrics.
metric_results = []
if targets:
# Insert None values corresponding to the targets that need to be
# skipped on the model.
if len(model._targets) != len(targets):
new_targets = [
None if t is None else targets.pop(0) for t in model._targets
]
targets = new_targets
metric_results = model._handle_metrics(
outputs,
targets=targets,
sample_weights=sample_weights,
masks=masks,
return_weighted_and_unweighted_metrics=True,
skip_target_masks=model._prepare_skip_target_masks(),
)
# Add metric results from the `add_metric` metrics.
metric_results.extend(
[
m.result()
for m in model.metrics
if m not in model._compile_metric_functions
]
)
return metric_results
def _model_loss(
model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False,
):
"""Calculates the loss for a given model.
Args:
model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input
arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss, loss value calculated using the
specified loss function and masks for each output. The total loss
includes regularization losses and applies masking and sample weighting
to the loss value.
"""
# TODO(psv): Dedup code here with graph mode prepare_total_loss() fn.
# Used to keep track of the total loss value (stateless).
# eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
# loss_weight_2 * output_2_loss_fn(...) +
# layer losses.
total_loss = 0
kwargs = {}
if model._expects_training_arg:
kwargs["training"] = training
if len(inputs) == 1 and not isinstance(inputs, dict):
inputs = inputs[0]
# Allow mixed `NumPy` and `EagerTensor` input here.
if any(
isinstance(input_t, (np.ndarray, float, int))
for input_t in tf.nest.flatten(inputs)
):
inputs = tf.nest.map_structure(tf.convert_to_tensor, inputs)
outs = model(inputs, **kwargs)
outs = tf.nest.flatten(outs)
if targets:
targets = training_utils_v1.cast_if_floating_dtype_and_mismatch(
targets, outs
)
# TODO(sallymatson/psv): check if we should do same mismatch fix for weights
if sample_weights:
sample_weights = [
training_utils_v1.cast_if_floating_dtype(tf.convert_to_tensor(val))
if val is not None
else None
for val in sample_weights
]
masks = [getattr(t, "_keras_mask", None) for t in outs]
targets = tf.nest.flatten(targets)
# Used to keep track of individual output losses.
output_losses = []
with backend.name_scope("loss"):
loss_fns = [
loss_fn for loss_fn in model.loss_functions if loss_fn is not None
]
custom_losses = model.losses # Regularization losses
if not loss_fns and not custom_losses:
if training:
raise ValueError(
"The model cannot be trained "
"because it has no loss to optimize."
)
else:
raise ValueError(
"The model cannot be evaluated "
"because it has no loss to compute."
)
for i, loss_fn in enumerate(loss_fns):
weights = sample_weights[i] if sample_weights else None
mask = masks[i]
with backend.name_scope(model.output_names[i] + "_loss"):
if mask is not None:
mask = tf.cast(mask, outs[i].dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update dimensions of weights to match with mask if
# possible.
weights = tf.cast(weights, outs[i].dtype)
(
mask,
_,
weights,
) = losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=weights
)
weights *= mask
if hasattr(loss_fn, "reduction"):
per_sample_losses = loss_fn.call(targets[i], outs[i])
weighted_losses = losses_utils.compute_weighted_loss(
per_sample_losses,
sample_weight=weights,
reduction=losses_utils.ReductionV2.NONE,
)
loss_reduction = loss_fn.reduction
# `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE`
# for all compile use cases.
if loss_reduction == losses_utils.ReductionV2.AUTO:
loss_reduction = (
losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
)
# Compute the stateless loss value.
output_loss = losses_utils.reduce_weighted_loss(
weighted_losses, reduction=loss_reduction
)
else:
# Compute the stateless loss value for a custom loss class.
# Here we assume that the class takes care of loss reduction
# because if this class returns a vector value we cannot
# differentiate between use case where a custom optimizer
# expects a vector loss value vs unreduced per-sample loss
# value.
output_loss = loss_fn(
targets[i], outs[i], sample_weight=weights
)
loss_reduction = (
losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
)
# If the number of outputs is 1 then we don't append the loss metric
# associated with each model output. When there are multiple outputs
# associated with a model, each output's loss is calculated and
# returned as part of the loss_metrics.
if len(model.outputs) > 1:
# Keep track of the stateful output loss result.
output_losses.append(output_loss_metrics[i](output_loss))
# Scale output loss for distribution. For custom losses we assume
# reduction was mean.
if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:
output_loss = losses_utils.scale_loss_for_distribution(
output_loss
)
total_loss += model._loss_weights_list[i] * output_loss
# Add regularization losses
if custom_losses:
total_loss += losses_utils.scale_loss_for_distribution(
tf.add_n(custom_losses)
)
return outs, total_loss, output_losses, masks
def _process_single_batch(
model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False,
):
"""Calculate the loss and gradient for one input batch.
The model weights are updated if training is set to True.
Args:
model: Model whose loss has to be calculated.
inputs: List of input arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: The boolean represents if the weights of the model are
updated. 'fit' methods will set this to True while 'evaluate' methods
will set this to False.
Returns:
output of the model, total loss, the loss and the mask
associated with each output.
Raises:
ValueError: If the model has no loss to optimize.
"""
with backend.eager_learning_phase_scope(
1 if training else 0
), training_utils.RespectCompiledTrainableState(model):
with GradientTape() as tape:
outs, total_loss, output_losses, masks = _model_loss(
model,
inputs,
targets,
output_loss_metrics=output_loss_metrics,
sample_weights=sample_weights,
training=training,
)
if isinstance(
model.optimizer, loss_scale_optimizer.LossScaleOptimizer
):
scaled_total_loss = model.optimizer.get_scaled_loss(total_loss)
else:
scaled_total_loss = total_loss
if training:
trainable_weights = model.trainable_weights
if trainable_weights:
# TODO(tanzheny) b/132690565: Provide mechanism for user to
# override model.train_on_batch.
if hasattr(model, "_backwards"):
model._backwards(tape, scaled_total_loss)
else:
grads = tape.gradient(scaled_total_loss, trainable_weights)
if isinstance(
model.optimizer, loss_scale_optimizer.LossScaleOptimizer
):
grads = model.optimizer.get_unscaled_gradients(grads)
model.optimizer.apply_gradients(
zip(grads, trainable_weights)
)
else:
logging.warning(
"The list of trainable weights is empty. Make sure that"
" you are not setting model.trainable to False before "
"compiling the model."
)
return outs, total_loss, output_losses, masks
def train_on_batch(
model, inputs, targets, sample_weights=None, output_loss_metrics=None
):
"""Calculates the loss and gradient updates for one input batch.
Args:
model: Model whose loss has to be calculated.
inputs: Input batch data.
targets: Target batch data.
sample_weights: Sample weight batch data.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
Returns:
Dict with three items:
'total_loss': list with a single tensor for overall loss,
'output_losses': list of tensors for loss corresponding to each of the
model output. Could be a empty list when model has only one output.
'metrics': list of tensors for metric specified.
"""
inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)
outs, total_loss, output_losses, masks = _process_single_batch(
model,
inputs,
targets,
sample_weights=sample_weights,
training=True,
output_loss_metrics=output_loss_metrics,
)
if not isinstance(outs, list):
outs = [outs]
metrics_results = _eager_metrics_fn(
model, outs, targets, sample_weights=sample_weights, masks=masks
)
total_loss = tf.nest.flatten(total_loss)
return {
"total_loss": total_loss,
"output_losses": output_losses,
"metrics": metrics_results,
}
def test_on_batch(
model, inputs, targets, sample_weights=None, output_loss_metrics=None
):
"""Calculates the loss for one input batch.
Args:
model: Model whose loss has to be calculated.
inputs: Input batch data.
targets: Target batch data.
sample_weights: Sample weight batch data.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
Returns:
Dict with three items:
'total_loss': single tensor for overall loss,
'output_losses': list of tensors for loss corresponding to each of the
model output. Could be a empty list when model has only one output.
'metrics': list of tensors for metric specified.
"""
inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)
with backend.eager_learning_phase_scope(0):
outs, total_loss, output_losses, masks = _model_loss(
model,
inputs,
targets,
sample_weights=sample_weights,
training=False,
output_loss_metrics=output_loss_metrics,
)
if not isinstance(outs, list):
outs = [outs]
metrics_results = _eager_metrics_fn(
model, outs, targets, sample_weights=sample_weights, masks=masks
)
total_loss = tf.nest.flatten(total_loss)
return {
"total_loss": total_loss,
"output_losses": output_losses,
"metrics": metrics_results,
}
| tf-keras/tf_keras/engine/training_eager_v1.py/0 | {
"file_path": "tf-keras/tf_keras/engine/training_eager_v1.py",
"repo_id": "tf-keras",
"token_count": 6905
} | 165 |
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//tf_keras:friends",
"//third_party/tensorflow/python/feature_column:__subpackages__", # For unit testing
"//third_party/tensorflow/python/tpu:__subpackages__", # For unit testing
"//third_party/tensorflow_decision_forests:__subpackages__", # for DenseFeatures
],
licenses = ["notice"],
)
py_library(
name = "feature_column",
srcs = ["__init__.py"],
srcs_version = "PY3",
deps = [
":base_feature_layer",
":dense_features",
":dense_features_v2",
":sequence_feature_column",
],
)
py_library(
name = "base_feature_layer",
srcs = ["base_feature_layer.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/engine:base_layer",
"//tf_keras/utils:generic_utils",
],
)
py_library(
name = "dense_features",
srcs = [
"dense_features.py",
],
srcs_version = "PY3",
deps = [
":base_feature_layer",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
],
)
py_library(
name = "dense_features_v2",
srcs = [
"dense_features_v2.py",
],
srcs_version = "PY3",
deps = [
":base_feature_layer",
":dense_features",
"//:expect_tensorflow_installed",
"//tf_keras/utils:tf_contextlib",
],
)
tf_py_test(
name = "dense_features_test",
srcs = ["dense_features_test.py"],
tags = ["no_pip"],
deps = [
":dense_features",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "dense_features_v2_test",
srcs = ["dense_features_v2_test.py"],
tags = ["no_pip"],
deps = [
":dense_features_v2",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
py_library(
name = "sequence_feature_column",
srcs = ["sequence_feature_column.py"],
srcs_version = "PY3",
deps = [
":base_feature_layer",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
],
)
tf_py_test(
name = "sequence_feature_column_test",
srcs = ["sequence_feature_column_test.py"],
deps = [
":sequence_feature_column",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "sequence_feature_column_integration_test",
srcs = ["sequence_feature_column_integration_test.py"],
python_version = "PY3",
srcs_version = "PY3",
tags = ["no_pip"],
deps = [
":dense_features",
":sequence_feature_column",
"//:expect_tensorflow_installed",
"//tf_keras/layers/core",
"//tf_keras/layers/merging",
"//tf_keras/layers/rnn",
"//tf_keras/metrics", # Import it here since base_layer didn't import it due to circular dependency.
],
)
| tf-keras/tf_keras/feature_column/BUILD/0 | {
"file_path": "tf-keras/tf_keras/feature_column/BUILD",
"repo_id": "tf-keras",
"token_count": 1574
} | 166 |
# TF-Keras Integration Test
This package contains integration tests that ensure the correct interaction
between TF-Keras and other Tensorflow high level APIs, like dataset, TF function
and distribution strategy, etc.
There are a few guidelines for the tests under this package.
*. Only use the public TF API.
*. Test should focus on the end to end use case between TF-Keras and other TF high
level API. Unit test will be a better place for behavior testing for the
individual APIs.
| tf-keras/tf_keras/integration_test/README.md/0 | {
"file_path": "tf-keras/tf_keras/integration_test/README.md",
"repo_id": "tf-keras",
"token_count": 119
} | 167 |
import tensorflow as tf
from tensorflow import keras
from tf_keras.integration_test.models.input_spec import InputSpec
TIMESTEPS = 64
INPUT_DIM = 50
OUTPUT_DIM = 40
NUM_RNN_LAYERS = 2
RNN_UNITS = 32
def get_input_preprocessor():
return None
def get_data_spec(batch_size):
return (
InputSpec((batch_size, TIMESTEPS, INPUT_DIM)),
InputSpec((batch_size, 1), dtype="int64", range=[0, OUTPUT_DIM]),
)
def ctc_loss(y_true, y_pred):
batch_length = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(
shape=(batch_length, 1), dtype="int64"
)
label_length = label_length * tf.ones(
shape=(batch_length, 1), dtype="int64"
)
return keras.backend.ctc_batch_cost(
y_true, y_pred, input_length, label_length
)
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
input_spectrogram = keras.layers.Input((None, INPUT_DIM), name="input")
x = keras.layers.Reshape((-1, INPUT_DIM, 1), name="expand_dim")(
input_spectrogram
)
x = keras.layers.Conv2D(
filters=32,
kernel_size=[11, 41],
strides=[2, 2],
padding="same",
use_bias=False,
name="conv_1",
)(x)
x = keras.layers.BatchNormalization(name="conv_1_bn")(x)
x = keras.layers.ReLU(name="conv_1_relu")(x)
x = keras.layers.Conv2D(
filters=32,
kernel_size=[11, 21],
strides=[1, 2],
padding="same",
use_bias=False,
name="conv_2",
)(x)
x = keras.layers.BatchNormalization(name="conv_2_bn")(x)
x = keras.layers.ReLU(name="conv_2_relu")(x)
x = keras.layers.Reshape((-1, x.shape[-2] * x.shape[-1]))(x)
for i in range(1, NUM_RNN_LAYERS + 1):
recurrent = keras.layers.GRU(
units=RNN_UNITS,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
return_sequences=True,
reset_after=True,
name=f"gru_{i}",
)
x = keras.layers.Bidirectional(
recurrent, name=f"bidirectional_{i}", merge_mode="concat"
)(x)
if i < NUM_RNN_LAYERS:
x = keras.layers.Dropout(rate=0.5)(x)
x = keras.layers.Dense(units=RNN_UNITS * 2, name="dense_1")(x)
x = keras.layers.ReLU(name="dense_1_relu")(x)
x = keras.layers.Dropout(rate=0.5)(x)
output = keras.layers.Dense(units=OUTPUT_DIM + 1, activation="softmax")(x)
model = keras.Model(input_spectrogram, output, name="DeepSpeech_2")
if compile:
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-4),
loss=ctc_loss,
jit_compile=jit_compile,
)
return model
def get_custom_objects():
return {"ctc_loss": ctc_loss}
| tf-keras/tf_keras/integration_test/models/ctc_speech_rnn.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/models/ctc_speech_rnn.py",
"repo_id": "tf-keras",
"token_count": 1472
} | 168 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softmax activation layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
def _large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using tf.float16
Args:
tensor_type: a dtype to determine the type.
Returns:
a large negative number.
"""
# In case of dtype=float16 (e.g., for mixed-precision), the largest
# negative number (dtypes.float16.min) is divided by 2, in order to
# avoid overflows when summing negative inputs.
if tensor_type == tf.float16:
return tf.float16.min / 2.0
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation function.
Example without mask:
>>> inp = np.asarray([[1., 2., 1.]])
>>> layer = tf.keras.layers.Softmax()
>>> layer(inp).numpy()
array([[0.21194157, 0.5761169 , 0.21194157]], dtype=float32)
>>> mask = np.asarray([[True, False, True]], dtype=bool)
>>> layer(inp, mask).numpy()
array([[0.5, 0. , 0.5]], dtype=float32)
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
Call arguments:
inputs: The inputs, or logits to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs, mask=None):
if mask is not None:
# Since mask is 1.0 for positions we want to keep and 0.0 for masked
# positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -1e.9 for masked positions.
adder = (1.0 - tf.cast(mask, inputs.dtype)) * (
_large_compatible_negative(inputs.dtype)
)
# Since we are adding it to the raw scores before the softmax, this
# is effectively the same as removing these entirely.
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return tf.exp(
inputs
- tf.reduce_logsumexp(inputs, axis=self.axis, keepdims=True)
)
else:
return backend.softmax(inputs, axis=self.axis[0])
return backend.softmax(inputs, axis=self.axis)
def get_config(self):
config = {"axis": self.axis}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| tf-keras/tf_keras/layers/activation/softmax.py/0 | {
"file_path": "tf-keras/tf_keras/layers/activation/softmax.py",
"repo_id": "tf-keras",
"token_count": 1566
} | 169 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras base class for convolution layers."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.utils import conv_utils
class Conv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Args:
rank: An integer, the rank of the convolution, e.g. "2" for 2D
convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution). Could be "None", eg in the case of
depth wise convolution.
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros
evenly to the left/right or up/down of the input such that output has
the same height/width dimension as the input. `"causal"` results in
causal (dilated) convolutions, e.g. `output[t]` does not depend on
`input[t+1:]`.
data_format: A string, one of `channels_last` (default) or
`channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters / groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel. If None,
the default initializer (glorot_uniform) will be used.
bias_initializer: An initializer for the bias vector. If None, the default
initializer (zeros) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
conv_op=None,
**kwargs,
):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs,
)
self.rank = rank
if isinstance(filters, float):
filters = int(filters)
if filters is not None and filters <= 0:
raise ValueError(
"Invalid value for argument `filters`. "
"Expected a strictly positive value. "
f"Received filters={filters}."
)
self.filters = filters
self.groups = groups or 1
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, "kernel_size"
)
self.strides = conv_utils.normalize_tuple(
strides, rank, "strides", allow_zero=True
)
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self._validate_init()
self._is_causal = self.padding == "causal"
self._channels_first = self.data_format == "channels_first"
self._tf_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2
)
def _validate_init(self):
if self.filters is not None and self.filters % self.groups != 0:
raise ValueError(
"The number of filters must be evenly divisible by the "
"number of groups. Received: groups={}, filters={}".format(
self.groups, self.filters
)
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0(s). Received: %s"
% (self.kernel_size,)
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0(s). Received: %s"
% (self.strides,)
)
if self.padding == "causal":
from tf_keras.layers.convolutional.conv1d import Conv1D
from tf_keras.layers.convolutional.separable_conv1d import (
SeparableConv1D,
)
if not isinstance(self, (Conv1D, SeparableConv1D)):
raise ValueError(
"Causal padding is only supported for `Conv1D`"
"and `SeparableConv1D`."
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
"the number of groups. Received groups={}, but the input "
"has {} channels (full input shape is {}).".format(
self.groups, input_channel, input_shape
)
)
kernel_shape = self.kernel_size + (
input_channel // self.groups,
self.filters,
)
# compute_output_shape contains some validation logic for the input
# shape, and make sure the output shape has all positive dimensions.
self.compute_output_shape(input_shape)
self.kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
self.built = True
def convolution_op(self, inputs, kernel):
if self.padding == "causal":
tf_padding = "VALID" # Causal padding handled in `call`.
elif isinstance(self.padding, str):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
return tf.nn.convolution(
inputs,
kernel,
strides=list(self.strides),
padding=tf_padding,
dilations=list(self.dilation_rate),
data_format=self._tf_data_format,
name=self.__class__.__name__,
)
# TODO(b/213173659): remove this when grouped convolutions are fully
# supported on the CPU for compiled functions. For now, we need this as a
# workaround for CPU support.
@tf.function(jit_compile=True)
def _jit_compiled_convolution_op(self, inputs, kernel):
return self.convolution_op(inputs, kernel)
def call(self, inputs):
input_shape = inputs.shape
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = tf.pad(inputs, self._compute_causal_padding(inputs))
if self.groups > 1:
outputs = self._jit_compiled_convolution_op(
inputs, tf.convert_to_tensor(self.kernel)
)
else:
outputs = self.convolution_op(inputs, self.kernel)
if self.use_bias:
output_rank = outputs.shape.rank
if self.rank == 1 and self._channels_first:
# nn.bias_add does not accept a 1D input tensor.
bias = tf.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
# Handle multiple batch dimensions.
if output_rank is not None and output_rank > 2 + self.rank:
def _apply_fn(o):
return tf.nn.bias_add(
o, self.bias, data_format=self._tf_data_format
)
outputs = conv_utils.squeeze_batch_dims(
outputs, _apply_fn, inner_rank=self.rank + 1
)
else:
outputs = tf.nn.bias_add(
outputs, self.bias, data_format=self._tf_data_format
)
if not tf.executing_eagerly() and input_shape.rank:
# Infer the static output shape:
out_shape = self.compute_output_shape(input_shape)
outputs.set_shape(out_shape)
if self.activation is not None:
return self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i],
)
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
batch_rank = len(input_shape) - self.rank - 1
try:
if self.data_format == "channels_last":
return tf.TensorShape(
input_shape[:batch_rank]
+ self._spatial_output_shape(input_shape[batch_rank:-1])
+ [self.filters]
)
else:
return tf.TensorShape(
input_shape[:batch_rank]
+ [self.filters]
+ self._spatial_output_shape(input_shape[batch_rank + 1 :])
)
except ValueError:
raise ValueError(
"One of the dimensions in the output is <= 0 "
f"due to downsampling in {self.name}. Consider "
"increasing the input size. "
f"Received input shape {input_shape} which would produce "
"output shape with a zero or negative value in a "
"dimension."
)
def _recreate_conv_op(self, inputs):
return False
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"groups": self.groups,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self, inputs):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if getattr(inputs.shape, "ndims", None) is None:
batch_rank = 1
else:
batch_rank = len(inputs.shape) - 2
if self.data_format == "channels_last":
causal_padding = [[0, 0]] * batch_rank + [[left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0]] * batch_rank + [[0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == "channels_first":
return -1 - self.rank
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError(
"The channel dimension of the inputs should be defined. "
f"The input_shape received is {input_shape}, "
f"where axis {channel_axis} (0-based) "
"is the channel dimension, which found to be `None`."
)
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == "causal":
op_padding = "valid"
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
| tf-keras/tf_keras/layers/convolutional/base_conv.py/0 | {
"file_path": "tf-keras/tf_keras/layers/convolutional/base_conv.py",
"repo_id": "tf-keras",
"token_count": 7841
} | 170 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for separable convolutional layers."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
class SeparableConv1DTest(test_combinations.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session():
test_utils.layer_test(
keras.layers.SeparableConv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size),
)
@parameterized.named_parameters(
("padding_valid", {"padding": "valid"}),
("padding_same", {"padding": "same"}),
("padding_same_dilation_2", {"padding": "same", "dilation_rate": 2}),
("padding_causal", {"padding": "causal"}),
("strides", {"strides": 2}),
("dilation_rate", {"dilation_rate": 2}),
("depth_multiplier", {"depth_multiplier": 2}),
)
def test_separable_conv1d(self, kwargs):
kwargs["filters"] = 2
kwargs["kernel_size"] = 3
self._run_test(kwargs)
def test_separable_conv1d_regularizers(self):
kwargs = {
"filters": 3,
"kernel_size": 3,
"padding": "valid",
"depthwise_regularizer": "l2",
"pointwise_regularizer": "l2",
"bias_regularizer": "l2",
"activity_regularizer": "l2",
"strides": 1,
}
with self.cached_session():
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv1d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
"filters": 3,
"kernel_size": 3,
"padding": "valid",
"pointwise_constraint": p_constraint,
"depthwise_constraint": d_constraint,
"bias_constraint": b_constraint,
"strides": 1,
}
with self.cached_session():
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_separable_conv1d_invalid_strides_and_dilation_rate(self):
kwargs = {"strides": 2, "dilation_rate": 2}
with self.assertRaisesRegex(
ValueError, r"""`strides > 1` not supported in conjunction"""
):
keras.layers.SeparableConv1D(filters=1, kernel_size=2, **kwargs)
@test_combinations.run_all_keras_modes
class SeparableConv2DTest(test_combinations.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session():
test_utils.layer_test(
keras.layers.SeparableConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size),
)
@parameterized.named_parameters(
("padding_valid", {"padding": "valid"}),
("padding_same", {"padding": "same"}),
("padding_same_dilation_2", {"padding": "same", "dilation_rate": 2}),
("strides", {"strides": 2}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
("data_format", {"data_format": "channels_first"}),
("dilation_rate", {"dilation_rate": 2}),
("depth_multiplier", {"depth_multiplier": 2}),
)
def test_separable_conv2d(self, kwargs):
kwargs["filters"] = 2
kwargs["kernel_size"] = 3
if "data_format" not in kwargs or tf.test.is_gpu_available(
cuda_only=True
):
self._run_test(kwargs)
def test_separable_conv2d_regularizers(self):
kwargs = {
"filters": 3,
"kernel_size": 3,
"padding": "valid",
"depthwise_regularizer": "l2",
"pointwise_regularizer": "l2",
"bias_regularizer": "l2",
"activity_regularizer": "l2",
"strides": 1,
}
with self.cached_session():
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv2d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
"filters": 3,
"kernel_size": 3,
"padding": "valid",
"pointwise_constraint": p_constraint,
"depthwise_constraint": d_constraint,
"bias_constraint": b_constraint,
"strides": 1,
}
with self.cached_session():
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_separable_conv2d_invalid_strides_and_dilation_rate(self):
kwargs = {"strides": [2, 1], "dilation_rate": [2, 1]}
with self.assertRaisesRegex(
ValueError, r"""`strides > 1` not supported in conjunction"""
):
keras.layers.SeparableConv2D(filters=1, kernel_size=2, **kwargs)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/convolutional/separable_conv_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/convolutional/separable_conv_test.py",
"repo_id": "tf-keras",
"token_count": 3224
} | 171 |
"""A layer that updates its vocab and embedding matrix during training."""
import tensorflow as tf
from absl import logging
from tensorflow.python.util.tf_export import keras_export
import tf_keras
from tf_keras.layers import Layer
from tf_keras.layers.experimental import dynamic_lookup
from tf_keras.utils import warmstart_embedding_matrix
@keras_export("keras.layers.experimental.DynamicEmbedding")
class DynamicEmbedding(Layer):
"""A layer that updates its vocab and embedding matrix during training.
DynamicEmbedding allows for the continuous updating of the vocabulary
and embeddings during the training process. In traditional methods, the
vocabulary and mapping to the embedding vector are set at the beginning of
the training process and remain fixed throughout the training process.
However, in many real-world scenarios, the vocabulary and mapping to the
embeddings need to be updated to reflect the changing nature of the data.
For instance, in natural language processing tasks, the vocabulary of
words in a corpus may change over time, and it's important to update the
embeddings to reflect these changes. Similarly, in recommendation systems,
the items in the vocabulary may change over time.
A layer that supports dynamic embeddings addresses this issue by allowing
for the continuous updating of the vocabulary and embeddings during the
training process. and it also updates the embedding matrix to reflect the
new vocabulary.
This layer maintains a hash table to track the most up-to-date vocabulary
based on the inputs received by the layer and the eviction policy. When
this layer is used with an `UpdateEmbeddingCallback`, which is a
time-based callback, the vocabulary lookup tensor is updated at the time
interval set in the `UpdateEmbeddingCallback` based on the most up-to-date
vocabulary hash table maintained by the layer. If this layer is not used
in conjunction with `UpdateEmbeddingCallback` the behavior of the layer
would be same as `keras.layers.Embedding`.
Args:
input_dim: Size of the vocabulary in the input data. Expects an integer.
output_dim: The size of the embedding space. Expects an integer.
initial_vocabulary: The vocabulary to initialize the layer with. If a 1D
tensor is provided, the vocabulary will be initialized with that tensor.
If a `tf.DType` object is provided, a random tensor of that dtype and of
length `input_dim` will be generated as the initial vocabulary.
Supported `tf.DType` values include `tf.int32`, `tf.int64` and
`tf.string`.
eviction_policy: The eviction policy for the vocabulary. Available options
are "LFU" (Least Frequently Used) and *more to come*. Defaults to "LFU".
Expects a string.
input_length: Length of input sequences, when it is constant. This
argument is required if you are going to connect `Flatten` then `Dense`
layers upstream (without it, the shape of the dense outputs cannot be
computed).Expects an integer.
embedding_initializer: Initializer for embedding vectors for new input
vocab tokens to be added to the updated embedding matrix (see
keras.initializers). Defaults to "uniform".
num_oov_indices: Number of out of vocabulary token to use. Currently
supports 1. Expects an integer.
**kwargs: Additional keyword arguments for the parent class.
Attributes:
embedding_layer: Embedding layer of DynamicEmbedding layer.
dynamic_lookup_layer: DynamicLookup layer of DynamicEmbedding layer.
embedding_initializer: Initializer for embedding vectors for new input
vocab tokens to be added to the updated embedding matrix (see
keras.initializers).
num_oov_indices: Number of out of vocabulary token to use.
Example:
```
# Generate dummy data
train_data = np.array([
['a', 'j', 'c', 'd', 'e'],
['a', 'h', 'i', 'j', 'b'],
['i', 'h', 'c', 'j', 'e'],
])
train_labels = np.array([0, 1, 2])
vocab = tf.constant(['a', 'b', 'c', 'd', 'e'])
eviction_policy = 'LFU'
# Define the model
model = tf.keras.models.Sequential([
DynamicEmbedding(
input_dim=5,
output_dim=2,
input_length=5,
eviction_policy=eviction_policy,
initial_vocabulary=vocab,
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(3, activation='softmax'),
])
# Compile the model
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
)
# update the vocabulary every 1 second
update_embedding_callback = UpdateEmbeddingCallback(
model.layers[0], interval=1
)
with update_embedding_callback:
result = model.fit(
train_data,
train_labels,
epochs=100,
batch_size=1,
callbacks=[update_embedding_callback],
)
```
"""
def __init__(
self,
input_dim,
output_dim,
initial_vocabulary,
eviction_policy="LFU",
input_length=None,
embedding_initializer="uniform",
num_oov_indices=1,
**kwargs,
):
"""Initialize DynamicEmbedding layer."""
super().__init__(**kwargs)
# assuming one oov bucket for now
self.embedding_layer = tf_keras.layers.Embedding(
input_dim=input_dim + num_oov_indices,
output_dim=output_dim,
embeddings_initializer=embedding_initializer,
input_length=input_length,
**kwargs,
)
self.dynamic_lookup_layer = dynamic_lookup.DynamicLookup(
vocabulary_size=input_dim,
eviction_policy=eviction_policy,
initial_vocabulary=initial_vocabulary,
**kwargs,
)
self.embedding_initializer = embedding_initializer
self.num_oov_indices = num_oov_indices
def build(self, input_shape=None):
self.embedding_layer.build(input_shape)
self.dynamic_lookup_layer.build(input_shape)
def call(self, inputs, learn_vocab=True):
# get vocab to index mapped for dynamic_lookup_layer
output = self.dynamic_lookup_layer(inputs, learn_vocab=learn_vocab)
# pass the indices as inputs to embedding_layer
return self.embedding_layer(output)
def get_config(self):
config = super().get_config()
config.update(
{
"input_dim": self.embedding_layer.input_dim,
"output_dim": self.embedding_layer.output_dim,
"input_length": self.embedding_layer.input_length,
"eviction_policy": self.dynamic_lookup_layer.eviction_policy,
"initial_vocabulary": (
self.dynamic_lookup_layer.initial_vocabulary.numpy().tolist() # noqa E501
),
"embedding_initializer": self.embedding_initializer,
"num_oov_indices": self.num_oov_indices,
}
)
return config
def get_vocabulary(self):
return self.dynamic_lookup_layer.get_vocabulary()
def save_assets(self, dir_path):
initial_vocabulary = (
self.dynamic_lookup_layer.initial_vocabulary.numpy().tolist()
)
initial_vocabulary_filepath = tf.io.gfile.join(
dir_path, "initial_vocabulary.txt"
)
with open(initial_vocabulary_filepath, "w") as f:
f.write("\n".join([str(w) for w in initial_vocabulary]))
def update_embeddings(self, strategy):
"""Update embedding matrix of dynamic embedding layer."""
try:
if isinstance(strategy, tf.distribute.ParameterServerStrategy):
# if using PSS agrregate values
keys_list = (
self.dynamic_lookup_layer.vocabulary_table_keys.read_all()
)
values_list = (
self.dynamic_lookup_layer.vocabulary_table_values.read_all()
)
keys, values = self.aggregate_lookup_table(
keys_list, values_list
)
else:
# if using on device strategy, just read values
keys, values = (
self.dynamic_lookup_layer.vocabulary_table_keys,
self.dynamic_lookup_layer.vocabulary_table_values,
)
old_vocab = self.dynamic_lookup_layer.vocabulary
new_vocab = self.get_top_vocabulary(
keys,
values,
self.dynamic_lookup_layer.vocabulary_size,
)
# remap and update the embedding matrix
embedding_matrix = self.embedding_layer.embeddings
oov_token = tf.fill([self.num_oov_indices], "UNK")
updated_new_vocab = tf.concat([new_vocab, oov_token], axis=0)
embedding_matrix = warmstart_embedding_matrix(
base_vocabulary=list(old_vocab.numpy()),
new_vocabulary=updated_new_vocab,
base_embeddings=embedding_matrix,
new_embeddings_initializer=self.embedding_initializer,
)
self.dynamic_lookup_layer.vocabulary.assign(new_vocab)
self.embedding_layer.embeddings.assign(embedding_matrix)
except AttributeError:
logging.info(
"Time interval specified by the UpdateEmbeddingCallback may be"
" too small, please try increasing the value of `interval`."
)
def aggregate_lookup_table(self, keys_list, values_list):
# Flatten the keys and values matrices
keys_1d = tf.reshape(keys_list, [-1])
values_1d = tf.reshape(values_list, [-1])
# Get unique keys and their corresponding summed values
unique_keys, idx, _ = tf.unique_with_counts(keys_1d)
summed_values = tf.math.unsorted_segment_sum(
values_1d, idx, tf.shape(unique_keys)[0]
)
return unique_keys, summed_values
def get_top_vocabulary(self, keys, values, k):
"""Get Top vocabulary keys and values."""
values_len = tf.shape(keys)[0]
if values_len > k:
_, indices = tf.nn.top_k(values, k=k)
else:
_, indices = tf.nn.top_k(values, k=values_len)
top_k_vocab = tf.gather(keys, indices)
return top_k_vocab
| tf-keras/tf_keras/layers/experimental/dynamic_embedding.py/0 | {
"file_path": "tf-keras/tf_keras/layers/experimental/dynamic_embedding.py",
"repo_id": "tf-keras",
"token_count": 4598
} | 172 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras merging layers."""
# Merging functions.
# Merging layers.
from tf_keras.layers.merging.add import Add
from tf_keras.layers.merging.add import add
from tf_keras.layers.merging.average import Average
from tf_keras.layers.merging.average import average
from tf_keras.layers.merging.concatenate import Concatenate
from tf_keras.layers.merging.concatenate import concatenate
from tf_keras.layers.merging.dot import Dot
from tf_keras.layers.merging.dot import dot
from tf_keras.layers.merging.maximum import Maximum
from tf_keras.layers.merging.maximum import maximum
from tf_keras.layers.merging.minimum import Minimum
from tf_keras.layers.merging.minimum import minimum
from tf_keras.layers.merging.multiply import Multiply
from tf_keras.layers.merging.multiply import multiply
from tf_keras.layers.merging.subtract import Subtract
from tf_keras.layers.merging.subtract import subtract
| tf-keras/tf_keras/layers/merging/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/layers/merging/__init__.py",
"repo_id": "tf-keras",
"token_count": 462
} | 173 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.normalization import batch_normalization
from tf_keras.layers.normalization import batch_normalization_v1
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class BatchNormalizationTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
def test_basic_batchnorm(self):
test_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
"momentum": 0.9,
"epsilon": 0.1,
"gamma_regularizer": keras.regularizers.l2(0.01),
"beta_regularizer": keras.regularizers.l2(0.01),
},
input_shape=(3, 4, 2),
)
test_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
"moving_mean_initializer": "zeros",
"moving_variance_initializer": "ones",
},
input_shape=(3, 4, 2),
)
test_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={"scale": False, "center": False},
input_shape=(3, 3),
)
test_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
"moving_mean_initializer": "zeros",
"moving_variance_initializer": "ones",
},
input_shape=(3, 2, 4, 2),
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_batchnorm_weights(self):
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_batchnorm_regularization(self):
layer = keras.layers.BatchNormalization(
gamma_regularizer="l1", beta_regularizer="l1"
)
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm
)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_sync_fused_error(self):
with self.assertRaises(ValueError):
_ = batch_normalization.BatchNormalization(
synchronized=True, fused=True
)
def _test_batchnorm_convnet(self, synchronized=False):
if tf.test.is_gpu_available(cuda_only=True):
with self.session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1,
input_shape=(3, 4, 4),
momentum=0.8,
synchronized=synchronized,
)
model.add(norm)
model.compile(
loss="mse",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(
np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1
)
np.testing.assert_allclose(
np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1
)
@test_combinations.run_all_keras_modes
def test_batchnorm_convnet(self):
self._test_batchnorm_convnet(synchronized=False)
@test_combinations.run_all_keras_modes
def test_batchnorm_convnet_synchronized(self):
self._test_batchnorm_convnet(synchronized=True)
@test_combinations.run_all_keras_modes
def test_batchnorm_convnet_channel_last(self):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8
)
model.add(norm)
model.compile(
loss="mse",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
@test_combinations.run_all_keras_modes
def test_batchnorm_correctness(self):
_run_batchnorm_correctness_test(
batch_normalization_v1.BatchNormalization, dtype="float32"
)
_run_batchnorm_correctness_test(
batch_normalization.BatchNormalization, dtype="float32"
)
_run_batchnorm_correctness_test(
batch_normalization.BatchNormalization,
dtype="float32",
synchronized=True,
)
@test_combinations.run_all_keras_modes
def test_batchnorm_float16(self):
_run_batchnorm_correctness_test(
batch_normalization_v1.BatchNormalization, dtype="float16"
)
_run_batchnorm_correctness_test(
batch_normalization.BatchNormalization, dtype="float16"
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
@test_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision(self):
norm = keras.layers.BatchNormalization(
axis=-1, momentum=0.8, dtype="mixed_float16"
)
x = np.random.normal(size=(10, 4, 4, 3))
y = norm(x)
self.assertEqual(y.dtype, "float16")
self.assertEqual(norm.beta.dtype.base_dtype, "float32")
self.assertEqual(norm.gamma.dtype.base_dtype, "float32")
x = np.arange(10 * 4 * 4 * 3).reshape((10, 4, 4, 3))
y = norm(x)
self.assertEqual(y.dtype, "float16")
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"], fused=[True, False])
)
@test_utils.enable_v2_dtype_behavior
def test_batchnorm_mixed_precision_does_not_overflow(self, fused):
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(1, 1, 1), fused=fused, dtype="mixed_float16"
)
x = np.array([-1000.0, 1000.0]).reshape((2, 1, 1, 1))
y = norm(x, training=True)
expected_y = np.array([-1.0, 1.0]).reshape((2, 1, 1, 1))
self.assertAllClose(keras.backend.eval(y), expected_y)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_batchnorm_non_trainable_with_fit(self):
# We use the same data shape for all the data we use in this test.
# This will prevent any used tf.functions from retracing.
# This helps us verify that changing trainable and recompiling really
# does update the training loop, rather than a different data shape
# triggering a retrace.
data_shape = (100, 3)
inputs = keras.Input((3,))
bn = batch_normalization.BatchNormalization()
outputs = bn(inputs)
model = keras.Model(inputs, outputs)
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
model.fit(np.random.random(data_shape), np.random.random(data_shape))
test_data = np.random.random(data_shape)
test_targets = np.random.random(data_shape)
test_loss = model.evaluate(test_data, test_targets)
bn.trainable = False
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@test_combinations.run_all_keras_modes
def test_batchnorm_ignore_masked_values(self):
padded_data = np.array(
[[[1, 5], [2, 5], [0, 0], [0, 0]] for _ in range(10)],
dtype="float32",
) # Pad value of 0
inputs = keras.layers.Input((None, 2))
masked = keras.layers.Masking()(inputs)
normed = keras.layers.BatchNormalization(momentum=0.0)(masked)
model = keras.models.Model(inputs, normed)
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
model.fit(x=padded_data, y=padded_data, batch_size=10, epochs=5)
self.assertAllEqual(model.layers[2].moving_mean, [1.5, 5.0])
self.assertAllEqual(model.layers[2].moving_variance, [0.25, 0.0])
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_sync_batchnorm_with_mask(self):
padded_data = np.array(
[[[1, 5], [2, 5], [0, 0], [0, 0]] for _ in range(10)],
dtype="float32",
) # Pad value of 0
strategy = tf.distribute.MirroredStrategy(["CPU:0"])
distributed_data = strategy.distribute_datasets_from_function(
dataset_fn=lambda _: tf.data.Dataset.from_tensors(
(padded_data, padded_data)
).repeat(),
options=None,
)
with strategy.scope():
inputs = keras.layers.Input((None, 2))
masked = keras.layers.Masking()(inputs)
normed = keras.layers.BatchNormalization(
momentum=0.0, synchronized=True
)(masked)
model = keras.models.Model(inputs, normed)
# MirroredStrategy will be very slow when run eagerly.
model.compile("rmsprop", "mse", run_eagerly=False)
model.fit(distributed_data, steps_per_epoch=1, epochs=5)
self.assertAllEqual(model.layers[2].moving_mean, [1.5, 5.0])
self.assertAllEqual(model.layers[2].moving_variance, [0.25, 0.0])
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_eager_batchnorm_in_custom_model_call_with_tf_function(self):
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.bn = keras.layers.BatchNormalization()
@tf.function()
def call(self, x, training):
return self.bn(x, training=training)
model = MyModel()
for _ in range(10):
x = tf.constant(0.5, shape=[1, 1])
model(x, training=True)
# Make sure the moving mean and variance have been updated
self.assertAllClose(model.bn.moving_mean.numpy(), [0.047], atol=3e-3)
self.assertAllClose(model.bn.moving_variance.numpy(), [0.9], atol=3e-2)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_bessels_correction(self):
# Bessel's correction is currently only used in the fused case. In the
# future, it may be used in the nonfused case as well.
x = tf.constant([0.0, 2.0], shape=[2, 1, 1, 1])
layer = batch_normalization.BatchNormalization(
momentum=0.5, moving_variance_initializer="zeros"
)
layer(x, training=True)
self.assertTrue(layer.fused)
# Since fused is used, Bessel's correction is used. The variance of [0,
# 2] is 2 with Bessel's correction. Since the momentum is 0.5, the
# variance is 2 * 0.5 == 1.
self.assertAllEqual(self.evaluate(layer.moving_variance), [1.0])
x = tf.constant([0.0, 2.0], shape=[2, 1, 1, 1, 1])
layer = batch_normalization.BatchNormalization(
momentum=0.5, moving_variance_initializer="zeros"
)
layer(x, training=True)
self.assertTrue(layer.fused)
# Since fused is used, Bessel's correction is used. The variance of [0,
# 2] is 2 with Bessel's correction. Since the momentum is 0.5, the
# variance is 2 * 0.5 == 1.
self.assertAllEqual(self.evaluate(layer.moving_variance), [1.0])
@test_combinations.run_all_keras_modes
def test_can_be_used_in_multiple_graphs(self):
norm = keras.layers.BatchNormalization(
scale=False, center=False, fused=True
)
@tf.function
def fn1(x):
return norm(x, training=True)
@tf.function
def fn2(x):
return norm(x, training=True)
x = np.array([-1000.0, 1000.0]).reshape((2, 1, 1, 1))
y = norm(fn2(fn1(x)), training=True)
expected_y = np.array([-0.9995, 0.9995]).reshape((2, 1, 1, 1))
self.assertAllClose(keras.backend.eval(y), expected_y)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@test_combinations.generate(
test_combinations.combine(synchronized=[True, False])
)
def test_input_fully_masked(self, synchronized):
norm = keras.layers.BatchNormalization(
scale=False, center=False, synchronized=synchronized
)
x = tf.zeros((4, 5), dtype=tf.float32)
mask = tf.zeros((4,), dtype=tf.float32)
y = norm(x, mask=mask, training=True)
self.assertAllClose(y, tf.zeros_like(x, dtype=tf.float32))
class BatchNormalizationV1Test(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_v1_fused_attribute(self):
norm = batch_normalization_v1.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = batch_normalization_v1.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization_v1.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(2, 2, 2))
norm(inp)
self.assertEqual(norm.fused, False)
class BatchNormalizationV2Test(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
def test_basic_batchnorm_v2(self):
test_utils.layer_test(
batch_normalization.BatchNormalization,
kwargs={"fused": True},
input_shape=(3, 3, 3, 3),
)
test_utils.layer_test(
batch_normalization.BatchNormalization,
kwargs={"fused": None},
input_shape=(3, 3, 3),
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_v2_fused_attribute(self):
norm = batch_normalization.BatchNormalization()
self.assertIsNone(norm.fused)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = batch_normalization.BatchNormalization()
self.assertIsNone(norm.fused)
inp = keras.layers.Input(shape=(4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization.BatchNormalization()
self.assertIsNone(norm.fused)
inp = keras.layers.Input(shape=(4, 4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = batch_normalization.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = batch_normalization.BatchNormalization(fused=True, axis=[3])
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
with self.assertRaisesRegex(ValueError, "fused.*renorm"):
batch_normalization.BatchNormalization(fused=True, renorm=True)
with self.assertRaisesRegex(ValueError, "fused.*when axis is 1 or 3"):
batch_normalization.BatchNormalization(fused=True, axis=2)
with self.assertRaisesRegex(ValueError, "fused.*when axis is 1 or 3"):
batch_normalization.BatchNormalization(fused=True, axis=[1, 3])
with self.assertRaisesRegex(ValueError, "fused.*virtual_batch_size"):
batch_normalization.BatchNormalization(
fused=True, virtual_batch_size=2
)
with self.assertRaisesRegex(ValueError, "fused.*adjustment"):
batch_normalization.BatchNormalization(
fused=True, adjustment=lambda _: (1, 0)
)
norm = batch_normalization.BatchNormalization(fused=True)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4))
with self.assertRaisesRegex(ValueError, "4D or 5D input tensors"):
norm(inp)
def test_updates_in_wrap_function(self):
def my_func():
layer = batch_normalization_v1.BatchNormalization()
x = tf.ones((10, 1))
y = layer(x, training=True)
# Updates should be tracked in a `wrap_function`.
self.assertLen(layer.updates, 2)
return y
wrapped_fn = tf.compat.v1.wrap_function(my_func, [])
wrapped_fn()
@test_combinations.run_all_keras_modes
@test_utils.run_v2_only
def test_basic_batchnorm_v2_input_shape_and_virtual_batch_size(self):
# Test case for GitHub issue for 32380
norm = batch_normalization.BatchNormalization(virtual_batch_size=8)
inp = keras.layers.Input(shape=(None, None, 3))
_ = norm(inp)
# Test case for https://github.com/tensorflow/tensorflow/issues/23050
norm = batch_normalization.BatchNormalization(virtual_batch_size=8)
_ = norm(np.ones((1, 28, 28)))
with self.assertRaisesRegex(Exception, "Reshape"):
norm = batch_normalization.BatchNormalization(virtual_batch_size=8)
_ = norm(np.ones((1, 28, 28)), training=True)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_fused_batchnorm_empty_batch(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/52986
# create a simple strategy with the enable_partial_batch_handling flag
# turned on, to trigger the empty batch code path in fused batchnorm
strategy = tf.distribute.OneDeviceStrategy("/cpu:0")
strategy.extended.enable_partial_batch_handling = True
with strategy.scope():
layer = batch_normalization.BatchNormalization()
def fn():
with tf.GradientTape() as tape:
x = tf.ones((0, 2, 2, 2))
layer(x, training=True)
return tape
tape = strategy.run(fn)
self.assertTrue(layer.fused)
self.assertIsNotNone(layer.moving_mean)
self.assertIsNotNone(layer.moving_variance)
tape_vars = tape.watched_variables()
self.assertAllEqual(layer.gamma, tape_vars[0])
self.assertAllEqual(layer.beta, tape_vars[1])
def _run_batchnorm_correctness_test(
layer, dtype="float32", fused=False, synchronized=False
):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
norm = layer(momentum=0.8, fused=fused, synchronized=synchronized)
model.add(norm)
if dtype == "float16":
# TF-Keras models require float32 losses.
model.add(
keras.layers.Lambda(lambda x: keras.backend.cast(x, "float32"))
)
model.compile(
loss="mse",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
run_eagerly=test_utils.should_run_eagerly(),
)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2)).astype(
dtype
)
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=2e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=2e-1)
@parameterized.parameters(
[
batch_normalization_v1.BatchNormalization,
batch_normalization.BatchNormalization,
]
)
class NormalizationLayersGraphModeOnlyTest(
tf.test.TestCase, parameterized.TestCase
):
def test_shared_batchnorm(self, layer):
"""Test that a BN layer can be shared across different data streams."""
with self.cached_session():
# Test single layer reuse
bn = layer()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile(
tf.compat.v1.train.GradientDescentOptimizer(0.01), "mse"
)
model.train_on_batch(x, x)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name="new_model")
new_model.compile(
tf.compat.v1.train.GradientDescentOptimizer(0.01), "mse"
)
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self, layer):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = layer(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
model.compile(
tf.compat.v1.train.GradientDescentOptimizer(0.01), "mse"
)
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile(
tf.compat.v1.train.GradientDescentOptimizer(0.01), "mse"
)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile(
tf.compat.v1.train.GradientDescentOptimizer(0.01), "mse"
)
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self, layer):
"""Tests that batchnorm layer is trainable when learning phase enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
Args:
layer: Either V1 or V2 of BatchNormalization layer.
"""
# TODO(fchollet): enable in all execution modes when issue with
# learning phase setting is resolved.
with tf.Graph().as_default(), self.cached_session():
bn_mean = 0.5
bn_std = 10.0
val_a = np.expand_dims(np.arange(10.0), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = layer()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights(
[
np.array([1.0]),
np.array([0.0]),
np.array([bn_mean]),
np.array([bn_std**2]),
]
)
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
with keras.backend.learning_phase_scope(1):
model = get_model(bn_mean, bn_std)
model.compile(loss="mse", optimizer="rmsprop")
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/normalization/batch_normalization_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/normalization/batch_normalization_test.py",
"repo_id": "tf-keras",
"token_count": 12740
} | 174 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private base class for global pooling 1D layers."""
import tensorflow.compat.v2 as tf
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.utils import conv_utils
class GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers."""
def __init__(self, data_format="channels_last", keepdims=False, **kwargs):
super().__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.data_format = conv_utils.normalize_data_format(data_format)
self.keepdims = keepdims
def _validate_reduction_axis(self, input_shape, axes):
for axis in axes:
if input_shape[axis] == 0:
raise ValueError(
f"Incorrect input shape {input_shape} "
f"with dimension 0 at reduction axis {axis}."
)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_last":
self._validate_reduction_axis(input_shape, [1])
else:
self._validate_reduction_axis(input_shape, [2])
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
if self.keepdims:
return tf.TensorShape([input_shape[0], input_shape[1], 1])
else:
return tf.TensorShape([input_shape[0], input_shape[1]])
else:
if self.keepdims:
return tf.TensorShape([input_shape[0], 1, input_shape[2]])
else:
return tf.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {"data_format": self.data_format, "keepdims": self.keepdims}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/pooling/base_global_pooling1d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/pooling/base_global_pooling1d.py",
"repo_id": "tf-keras",
"token_count": 1061
} | 175 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for TF-Keras discretization preprocessing layer's adapt method."""
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import discretization
EPSILON = 0.1
def reduce_fn(state, values, epsilon=EPSILON):
"""tf.data.Dataset-friendly implementation of mean and variance."""
(state_,) = state
summary = discretization.summarize(values, epsilon)
if np.sum(state_[:, 0]) == 0:
return (summary,)
return (discretization.merge_summaries(state_, summary, epsilon),)
class BenchmarkAdapt(tf.test.Benchmark):
"""Benchmark adapt."""
def run_dataset_implementation(self, num_elements, batch_size):
input_t = keras.Input(shape=(1,))
layer = discretization.Discretization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.range(num_elements)
ds = ds.map(lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
state = ds.reduce((np.zeros((1, 2)),), reduce_fn)
bins = discretization.get_bucket_boundaries(state, 100)
layer.set_weights([bins])
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time
def bm_adapt_implementation(self, num_elements, batch_size):
"""Test the KPL adapt implementation."""
input_t = keras.Input(shape=(1,), dtype=tf.float32)
layer = discretization.Discretization()
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.range(num_elements)
ds = ds.map(lambda x: tf.expand_dims(tf.cast(x, tf.float32), -1))
ds = ds.batch(batch_size)
starts.append(time.time())
# Benchmarked code begins here.
layer.adapt(ds)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
name = "discretization_adapt|%s_elements|batch_%s" % (
num_elements,
batch_size,
)
baseline = self.run_dataset_implementation(num_elements, batch_size)
extras = {
"tf.data implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100,
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name
)
def benchmark_vocab_size_by_batch(self):
for vocab_size in [100, 1000, 10000, 100000, 1000000]:
for batch in [64 * 2048]:
self.bm_adapt_implementation(vocab_size, batch)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/discretization_adapt_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/discretization_adapt_benchmark.py",
"repo_id": "tf-keras",
"token_count": 1596
} | 176 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras discretization preprocessing layer."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.preprocessing import discretization
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
class DiscretizationTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_bucketize_with_explicit_buckets_integer(self):
input_array = np.array([[-1.5, 1.0, 3.4, 0.5], [0.0, 3.0, 1.3, 0.0]])
expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]]
expected_output_shape = [None, 4]
input_data = keras.Input(shape=(4,))
layer = discretization.Discretization(bin_boundaries=[0.0, 1.0, 2.0])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_int_input(self):
input_array = np.array([[-1, 1, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]]
expected_output_shape = [None, 4]
input_data = keras.Input(shape=(4,), dtype=tf.int64)
layer = discretization.Discretization(bin_boundaries=[-0.5, 0.5, 1.5])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_sparse_float_input(self):
indices = [[0, 1], [0, 2], [1, 1]]
input_array = tf.SparseTensor(
indices=indices, values=[-1.5, 1.0, 3.4], dense_shape=[2, 3]
)
expected_output = [0, 2, 3]
input_data = keras.Input(shape=(3,), dtype=tf.float32, sparse=True)
layer = discretization.Discretization(bin_boundaries=[-0.5, 0.5, 1.5])
bucket_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(indices, output_dataset.indices)
self.assertAllEqual(expected_output, output_dataset.values)
def test_bucketize_with_explicit_buckets_ragged_float_input(self):
input_array = tf.ragged.constant(
[[-1.5, 1.0, 3.4, 0.5], [0.0, 3.0, 1.3]]
)
expected_output = [[0, 2, 3, 1], [1, 3, 2]]
expected_output_shape = [None, None]
input_data = keras.Input(shape=(None,), ragged=True)
layer = discretization.Discretization(bin_boundaries=[0.0, 1.0, 2.0])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_ragged_int_input(self):
input_array = tf.ragged.constant(
[[-1, 1, 3, 0], [0, 3, 1]], dtype=tf.int64
)
expected_output = [[0, 2, 3, 1], [1, 3, 2]]
expected_output_shape = [None, None]
input_data = keras.Input(shape=(None,), ragged=True, dtype=tf.int64)
layer = discretization.Discretization(bin_boundaries=[-0.5, 0.5, 1.5])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_sparse_int_input(self):
indices = [[0, 1], [0, 2], [1, 1]]
input_array = tf.SparseTensor(
indices=indices, values=[-1, 1, 3], dense_shape=[2, 3]
)
expected_output = [0, 2, 3]
input_data = keras.Input(shape=(3,), dtype=tf.int32, sparse=True)
layer = discretization.Discretization(bin_boundaries=[-0.5, 0.5, 1.5])
bucket_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(indices, output_dataset.indices)
self.assertAllEqual(expected_output, output_dataset.values)
def test_one_hot_output(self):
input_data = np.array([-1.5, 1.0, 3.4, 3.5])
expected_output = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0],
]
expected_output_shape = [None, 4]
inputs = keras.Input(shape=(1,))
layer = discretization.Discretization(
bin_boundaries=[0.0, 1.0, 2.0], output_mode="one_hot"
)
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
input_data = np.array([-1.5, 1.0, 3.4, 3.5])
expected_output = [1.0, 0.0, 1.0, 1.0]
expected_output_shape = [None, 4]
inputs = keras.Input(shape=(4,))
layer = discretization.Discretization(
bin_boundaries=[0.0, 1.0, 2.0], output_mode="multi_hot"
)
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
input_data = np.array([-1.5, 1.0, 3.4, 3.5])
expected_output = [1.0, 0.0, 1.0, 2.0]
expected_output_shape = [None, 4]
inputs = keras.Input(shape=(4,))
layer = discretization.Discretization(
bin_boundaries=[0.0, 1.0, 2.0], output_mode="count"
)
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
model = keras.Model(inputs, outputs)
output_data = model(input_data)
self.assertAllEqual(expected_output, output_data)
def test_output_shape(self):
inputs = keras.Input(batch_size=16, shape=(4,), dtype=tf.int64)
layer = discretization.Discretization(bin_boundaries=[-0.5, 0.5, 1.5])
outputs = layer(inputs)
self.assertAllEqual(outputs.shape.as_list(), [16, 4])
@parameterized.named_parameters(
("int32", tf.int32),
("int64", tf.int64),
)
def test_output_dtype(self, dtype):
inputs = keras.Input(batch_size=16, shape=(4,), dtype="float32")
layer = discretization.Discretization(
bin_boundaries=[-0.5, 0.5, 1.5], dtype=dtype
)
outputs = layer(inputs)
self.assertAllEqual(outputs.dtype, dtype)
def test_legacy_dtype_compat(self):
inputs = keras.Input(batch_size=16, shape=(4,), dtype="float32")
layer = discretization.Discretization(
bin_boundaries=[-0.5, 0.5, 1.5], dtype="float32"
)
outputs = layer(inputs)
self.assertAllEqual(outputs.dtype, tf.int64)
# In TF1 we sometimes face an explicit dtype=None in the config.
layer = discretization.Discretization(
bin_boundaries=[-0.5, 0.5, 1.5], dtype=None
)
outputs = layer(inputs)
self.assertAllEqual(outputs.dtype, tf.int64)
@parameterized.named_parameters(
("float32", tf.float32),
("float64", tf.float64),
)
def test_one_hot_output_dtype(self, dtype):
inputs = keras.Input(batch_size=16, shape=(1,), dtype="float32")
layer = discretization.Discretization(
bin_boundaries=[-0.5, 0.5, 1.5], output_mode="one_hot", dtype=dtype
)
outputs = layer(inputs)
self.assertAllEqual(outputs.dtype, dtype)
def test_num_bins_negative_fails(self):
with self.assertRaisesRegex(
ValueError, "`num_bins` must be.*num_bins=-7"
):
_ = discretization.Discretization(num_bins=-7)
def test_num_bins_and_bins_set_fails(self):
with self.assertRaisesRegex(
ValueError,
r"`num_bins` and `bin_boundaries` should not be set.*5.*\[1, 2\]",
):
_ = discretization.Discretization(num_bins=5, bins=[1, 2])
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class DiscretizationAdaptTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(
[
{
"testcase_name": "2d_single_element",
"adapt_data": np.array([[1.0], [2.0], [3.0], [4.0], [5.0]]),
"test_data": np.array([[1.0], [2.0], [3.0]]),
"use_dataset": True,
"expected": np.array([[1], [2], [3]]),
"num_bins": 5,
"epsilon": 0.01,
},
{
"testcase_name": "2d_multi_element",
"adapt_data": np.array(
[
[1.0, 6.0],
[2.0, 7.0],
[3.0, 8.0],
[4.0, 9.0],
[5.0, 10.0],
]
),
"test_data": np.array([[1.0, 10.0], [2.0, 6.0], [3.0, 8.0]]),
"use_dataset": True,
"expected": np.array([[0, 4], [1, 3], [1, 4]]),
"num_bins": 5,
"epsilon": 0.01,
},
{
"testcase_name": "1d_single_element",
"adapt_data": np.array([3.0, 2.0, 1.0, 5.0, 4.0]),
"test_data": np.array([1.0, 2.0, 3.0]),
"use_dataset": True,
"expected": np.array([1, 2, 3]),
"num_bins": 5,
"epsilon": 0.01,
},
{
"testcase_name": "300_batch_1d_single_element_1",
"adapt_data": np.arange(300),
"test_data": np.arange(300),
"use_dataset": True,
"expected": np.concatenate(
[np.zeros(101), np.ones(99), 2 * np.ones(100)]
),
"num_bins": 3,
"epsilon": 0.01,
},
{
"testcase_name": "300_batch_1d_single_element_2",
"adapt_data": np.arange(300) ** 2,
"test_data": np.arange(300) ** 2,
"use_dataset": True,
"expected": np.concatenate(
[np.zeros(101), np.ones(99), 2 * np.ones(100)]
),
"num_bins": 3,
"epsilon": 0.01,
},
{
"testcase_name": "300_batch_1d_single_element_large_epsilon",
"adapt_data": np.arange(300),
"test_data": np.arange(300),
"use_dataset": True,
"expected": np.concatenate([np.zeros(136), np.ones(164)]),
"num_bins": 2,
"epsilon": 0.1,
},
]
)
def test_layer_computation(
self,
adapt_data,
test_data,
use_dataset,
expected,
num_bins=5,
epsilon=0.01,
):
input_shape = tuple(list(test_data.shape)[1:])
np.random.shuffle(adapt_data)
if use_dataset:
# TF-Keras APIs expect batched datasets
adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2
)
test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2
)
layer = discretization.Discretization(
epsilon=epsilon, num_bins=num_bins
)
layer.adapt(adapt_data)
input_data = keras.Input(shape=input_shape)
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
def test_multiple_adapts(self):
first_adapt = [[1], [2], [3]]
second_adapt = [[4], [5], [6]]
predict_input = [[2], [2]]
expected_first_output = [[2], [2]]
expected_second_output = [[0], [0]]
inputs = keras.Input(shape=(1,), dtype=tf.int32)
layer = discretization.Discretization(num_bins=3)
layer.adapt(first_adapt)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_first_output)
# Re-adapt the layer on new inputs.
layer.adapt(second_adapt)
# Re-compile the model.
model.compile()
# `predict` should now use the new model state.
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_second_output)
def test_saved_model_tf(self):
input_data = [[1], [2], [3]]
predict_data = [[0.5], [1.5], [2.5]]
expected_output = [[0], [1], [2]]
inputs = keras.Input(shape=(1,), dtype=tf.float32)
layer = discretization.Discretization(num_bins=3)
layer.adapt(input_data)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(predict_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_saved_model")
tf.saved_model.save(model, output_path)
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = f(tf.constant(predict_data))["discretization"]
self.assertAllClose(new_output_data, expected_output)
@parameterized.product(
save_format=["tf", "h5"],
adapt=[True, False],
)
def test_saved_model_keras(self, save_format, adapt):
input_data = [[1], [2], [3]]
predict_data = [[0.5], [1.5], [2.5]]
expected_output = [[0], [1], [2]]
cls = discretization.Discretization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapt:
layer = cls(num_bins=3)
layer.adapt(input_data)
else:
layer = cls(bin_boundaries=[1.0, 2.0])
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(predict_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format=save_format)
loaded_model = keras.models.load_model(
output_path, custom_objects={"Discretization": cls}
)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model.predict(predict_data)
self.assertAllClose(new_output_data, expected_output)
def test_saved_weights_keras(self):
input_data = [[1], [2], [3]]
predict_data = [[0.5], [1.5], [2.5]]
expected_output = [[0], [1], [2]]
cls = discretization.Discretization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
layer = cls(num_bins=3)
layer.adapt(input_data)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(predict_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_saved_weights"
)
model.save_weights(output_path, save_format="tf")
new_model = keras.Model.from_config(
model.get_config(), custom_objects={"Discretization": cls}
)
new_model.load_weights(output_path)
# Validate correctness of the new model.
new_output_data = new_model.predict(predict_data)
self.assertAllClose(new_output_data, expected_output)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/discretization_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/discretization_test.py",
"repo_id": "tf-keras",
"token_count": 8745
} | 177 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.preprocessing import normalization
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.mixed_precision import policy
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
def _get_layer_computation_test_cases():
test_cases = (
{
"adapt_data": np.array(
[[1.0], [2.0], [3.0], [4.0], [5.0]], dtype=np.float32
),
"axis": -1,
"test_data": np.array([[1.0], [2.0], [3.0]], np.float32),
"expected": np.array([[-1.414214], [-0.707107], [0]], np.float32),
"testcase_name": "2d_single_element",
},
{
"adapt_data": np.array([[1], [2], [3], [4], [5]], dtype=np.int32),
"axis": -1,
"test_data": np.array([[1], [2], [3]], np.int32),
"expected": np.array([[-1.414214], [-0.707107], [0]], np.float32),
"testcase_name": "2d_int_data",
},
{
"adapt_data": np.array(
[[1.0], [2.0], [3.0], [4.0], [5.0]], dtype=np.float32
),
"axis": None,
"test_data": np.array([[1.0], [2.0], [3.0]], np.float32),
"expected": np.array([[-1.414214], [-0.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis",
},
{
"adapt_data": np.array(
[[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32
),
"axis": None,
"test_data": np.array([[1.0], [2.0], [3.0]], np.float32),
"expected": np.array([[-1.414214], [-0.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data",
},
{
"adapt_data": np.array(
[
[[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]],
[[3.0, 4.0, 5.0], [4.0, 5.0, 6.0]],
],
np.float32,
),
"axis": 1,
"test_data": np.array(
[
[[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]],
[[3.0, 4.0, 5.0], [4.0, 5.0, 6.0]],
],
np.float32,
),
"expected": np.array(
[
[[-1.549193, -0.774597, 0.0], [-1.549193, -0.774597, 0.0]],
[[0.0, 0.774597, 1.549193], [0.0, 0.774597, 1.549193]],
],
np.float32,
),
"testcase_name": "3d_internal_axis",
},
{
"adapt_data": np.array(
[
[[1.0, 0.0, 3.0], [2.0, 3.0, 4.0]],
[[3.0, -1.0, 5.0], [4.0, 5.0, 8.0]],
],
np.float32,
),
"axis": (1, 2),
"test_data": np.array(
[
[[3.0, 1.0, -1.0], [2.0, 5.0, 4.0]],
[[3.0, 0.0, 5.0], [2.0, 5.0, 8.0]],
],
np.float32,
),
"expected": np.array(
[
[[1.0, 3.0, -5.0], [-1.0, 1.0, -1.0]],
[[1.0, 1.0, 1.0], [-1.0, 1.0, 1.0]],
],
np.float32,
),
"testcase_name": "3d_multiple_axis",
},
{
"adapt_data": np.zeros((3, 4)),
"axis": -1,
"test_data": np.zeros((3, 4)),
"expected": np.zeros((3, 4)),
"testcase_name": "zero_variance",
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@test_combinations.run_all_keras_modes
class NormalizationTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_broadcasting_during_direct_setting(self):
layer = normalization.Normalization(axis=-1, mean=[1.0], variance=[1.0])
output = layer(np.array([[1.0, 2.0]]))
expected_output = [[0.0, 1.0]]
self.assertAllClose(output, expected_output)
self.assertAllClose(layer.get_weights(), [])
def test_broadcasting_during_direct_setting_with_tensors(self):
if not tf.executing_eagerly():
self.skipTest("Only supported in TF2.")
layer = normalization.Normalization(
axis=-1, mean=tf.constant([1.0]), variance=tf.constant([1.0])
)
output = layer(np.array([[1.0, 2.0]]))
expected_output = [[0.0, 1.0]]
self.assertAllClose(output, expected_output)
self.assertAllClose(layer.get_weights(), [])
def test_1d_data(self):
data = np.array([0.0, 2.0, 0.0, 2.0])
layer = normalization.Normalization(mean=1.0, variance=1.0)
output = layer(data)
self.assertListEqual(output.shape.as_list(), [4])
self.assertAllClose(output, [-1, 1, -1, 1])
def test_0d_data(self):
layer = normalization.Normalization(axis=None, mean=1.0, variance=1.0)
output = layer(0.0)
self.assertListEqual(output.shape.as_list(), [])
self.assertAllClose(output, -1)
def test_broadcasting_during_direct_setting_with_variables_fails(self):
with self.assertRaisesRegex(ValueError, "passing a Variable"):
_ = normalization.Normalization(
axis=-1, mean=tf.Variable([1.0]), variance=tf.Variable([2.0])
)
def test_keeping_an_unknown_axis_fails(self):
layer = normalization.Normalization(axis=-1)
with self.assertRaisesRegex(ValueError, "axis.*must have known shape"):
layer.build([None])
@parameterized.parameters(
# Out of bounds
{"axis": 3},
{"axis": -4},
# In a tuple
{"axis": (1, 3)},
{"axis": (1, -4)},
)
def test_bad_axis_fail_build(self, axis):
layer = normalization.Normalization(axis=axis)
with self.assertRaisesRegex(ValueError, "in the range"):
layer.build([None, 2, 3])
def test_list_input(self):
with self.assertRaisesRegex(
ValueError,
"Normalization only accepts a single input. If you are "
"passing a python list or tuple as a single input, "
"please convert to a numpy array or `tf.Tensor`.",
):
normalization.Normalization()([1, 2, 3])
def test_scalar_input(self):
with self.assertRaisesRegex(
ValueError, "axis.*values must be in the range"
):
normalization.Normalization()(1)
def test_output_dtype(self):
if not tf.__internal__.tf2.enabled():
self.skipTest("set_global_policy only supported in TF2.")
# Output should respect an explicit dtype, and default to the global
# policy.
policy.set_global_policy("float64")
input_data = keras.Input(batch_size=16, shape=(1,))
layer = normalization.Normalization(
mean=1.0, variance=1.0, dtype="float16"
)
output = layer(input_data)
self.assertAllEqual(output.dtype, tf.float16)
layer = normalization.Normalization(mean=1.0, variance=1.0)
output = layer(input_data)
self.assertAllEqual(output.dtype, tf.float64)
def test_invert(self):
input_data = np.array([0.0, 4.0, 0.0, 4.0])
norm = normalization.Normalization(mean=2.0, variance=4.0)
inv_norm = normalization.Normalization(
mean=2.0, variance=4.0, invert=True
)
output = norm(input_data)
output2 = inv_norm(output)
self.assertListEqual(output2.shape.as_list(), [4])
self.assertAllClose(input_data, output2)
@test_utils.run_v2_only
def test_invert_adapt(self):
input_data = [[0.0], [4.0], [0.0], [4.0]]
norm = keras.layers.Normalization(axis=-1)
norm.adapt(input_data)
inv_norm = keras.layers.Normalization(axis=-1, invert=True)
inv_norm.adapt(input_data)
output = norm(input_data)
output2 = inv_norm(output)
self.assertListEqual(output2.shape.as_list(), [4, 1])
self.assertAllClose(input_data, output2)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class NormalizationAdaptTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_layer_api_compatibility(self):
cls = normalization.Normalization
output_data = test_utils.layer_test(
cls,
kwargs={"axis": -1},
input_shape=(None, 3),
input_data=np.array([[3, 1, 2], [6, 5, 4]], dtype=np.float32),
validate_training=False,
adapt_data=np.array([[1, 2, 1], [2, 3, 4], [1, 2, 1], [2, 3, 4]]),
)
expected = np.array([[3.0, -3.0, -0.33333333], [9.0, 5.0, 1.0]])
self.assertAllClose(expected, output_data)
@parameterized.named_parameters(*_get_layer_computation_test_cases())
def test_layer_computation(
self, adapt_data, axis, test_data, use_dataset, expected
):
input_shape = tuple(
[test_data.shape[i] for i in range(1, test_data.ndim)]
)
if use_dataset:
# TF-Keras APIs expect batched datasets
adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2
)
test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2
)
layer = normalization.Normalization(axis=axis)
layer.adapt(adapt_data)
input_data = keras.Input(shape=input_shape)
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = test_utils.should_run_eagerly()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
def test_1d_unbatched_adapt(self):
ds = tf.data.Dataset.from_tensor_slices(
[
[2.0, 0.0, 2.0, 0.0],
[0.0, 2.0, 0.0, 2.0],
]
)
layer = normalization.Normalization(axis=-1)
layer.adapt(ds)
output_ds = ds.map(layer)
self.assertAllClose(
list(output_ds.as_numpy_iterator()),
[
[1.0, -1.0, 1.0, -1.0],
[-1.0, 1.0, -1.0, 1.0],
],
)
def test_0d_unbatched_adapt(self):
ds = tf.data.Dataset.from_tensor_slices([2.0, 0.0, 2.0, 0.0])
layer = normalization.Normalization(axis=None)
layer.adapt(ds)
output_ds = ds.map(layer)
self.assertAllClose(
list(output_ds.as_numpy_iterator()), [1.0, -1.0, 1.0, -1.0]
)
@parameterized.parameters(
# Results should be identical no matter how the axes are specified (3d).
{"axis": (1, 2)},
{"axis": (2, 1)},
{"axis": (1, -1)},
{"axis": (-1, 1)},
)
def test_axis_permutations(self, axis):
layer = normalization.Normalization(axis=axis)
# data.shape = [2, 2, 3]
data = np.array(
[
[[0.0, 1.0, 2.0], [0.0, 2.0, 6.0]],
[[2.0, 3.0, 4.0], [3.0, 6.0, 10.0]],
]
)
expect = np.array(
[
[[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
)
layer.adapt(data)
self.assertAllClose(expect, layer(data))
def test_model_summary_after_layer_adapt(self):
data = np.array(
[
[[0.0, 1.0, 2.0], [0.0, 2.0, 6.0]],
[[2.0, 3.0, 4.0], [3.0, 6.0, 10.0]],
]
)
layer = normalization.Normalization(axis=-1)
layer.adapt(data)
model = keras.Sequential(
[
layer,
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(1),
]
)
model.summary()
def test_multiple_adapts(self):
first_adapt = [[0], [2], [0], [2]]
second_adapt = [[2], [4], [2], [4]]
predict_input = [[2], [2]]
expected_first_output = [[1], [1]]
expected_second_output = [[-1], [-1]]
inputs = keras.Input(shape=(1,), dtype=tf.int32)
layer = normalization.Normalization(axis=-1)
layer.adapt(first_adapt)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_first_output)
# Re-adapt the layer on new inputs.
layer.adapt(second_adapt)
# Re-compile the model.
model.compile()
# `predict` should now use the new model state.
actual_output = model.predict(predict_input)
self.assertAllClose(actual_output, expected_second_output)
@parameterized.parameters(
{"adapted": True},
{"adapted": False},
)
def test_saving_tf(self, adapted):
input_data = [[0.0], [2.0], [0.0], [2.0]]
expected_output = [[-1.0], [1.0], [-1.0], [1.0]]
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapted:
layer = normalization.Normalization(axis=-1)
layer.adapt(input_data)
else:
layer = normalization.Normalization(mean=1.0, variance=1.0)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_saved_model")
tf.saved_model.save(model, output_path)
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = f(tf.constant(input_data))["normalization"]
self.assertAllClose(new_output_data, expected_output)
@parameterized.product(
save_format=["tf", "h5", "keras_v3"],
adapt=[True, False],
)
def test_saving_keras(self, save_format, adapt):
input_data = [[0.0], [2.0], [0.0], [2.0]]
expected_output = [[-1.0], [1.0], [-1.0], [1.0]]
cls = normalization.Normalization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapt:
layer = cls(axis=-1)
layer.adapt(input_data)
else:
layer = cls(mean=1.0, variance=1.0)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_model")
if save_format == "keras_v3":
if not tf.__internal__.tf2.enabled():
self.skipTest(
"TF2 must be enabled to use the new `.keras` saving."
)
output_path += ".keras"
model.save(output_path, save_format=save_format)
loaded_model = keras.models.load_model(
output_path, custom_objects={"Normalization": cls}
)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model.predict(input_data)
self.assertAllClose(new_output_data, expected_output)
@parameterized.product(
save_format=["tf", "h5", "keras_v3"],
adapt=[True, False],
)
def test_saving_keras_invert(self, save_format, adapt):
expected_output = [[0.0], [2.0], [0.0], [2.0]]
input_data = [[-1.0], [1.0], [-1.0], [1.0]]
cls = normalization.Normalization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapt:
layer = cls(axis=-1, invert=True)
layer.adapt(expected_output)
else:
layer = cls(mean=1.0, variance=1.0, invert=True)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_model_invert")
if save_format == "keras_v3":
if not tf.__internal__.tf2.enabled():
self.skipTest(
"TF2 must be enabled to use the new `.keras` saving."
)
output_path += ".keras"
model.save(output_path, save_format=save_format)
loaded_model = keras.models.load_model(
output_path, custom_objects={"Normalization": cls}
)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model.predict(input_data)
self.assertAllClose(new_output_data, expected_output)
@parameterized.parameters(
{"adapted": True},
{"adapted": False},
)
def test_saved_weights_keras(self, adapted):
input_data = [[0.0], [2.0], [0.0], [2.0]]
expected_output = [[-1.0], [1.0], [-1.0], [1.0]]
cls = normalization.Normalization
inputs = keras.Input(shape=(1,), dtype=tf.float32)
if adapted:
layer = cls(axis=-1)
layer.adapt(input_data)
else:
layer = cls(mean=1.0, variance=1.0)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllClose(output_data, expected_output)
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_saved_weights"
)
model.save_weights(output_path, save_format="tf")
new_model = keras.Model.from_config(
model.get_config(), custom_objects={"Normalization": cls}
)
new_model.load_weights(output_path)
# Validate correctness of the new model.
new_output_data = new_model.predict(input_data)
self.assertAllClose(new_output_data, expected_output)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/normalization_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/normalization_test.py",
"repo_id": "tf-keras",
"token_count": 10073
} | 178 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the AlphaDropout layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(base_layer.BaseRandomLayer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units
by randomly setting activations to the negative saturation value.
Args:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
return self.noise_shape if self.noise_shape else tf.shape(inputs)
def call(self, inputs, training=None):
if 0.0 < self.rate < 1.0:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs(inputs=inputs, rate=self.rate):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = tf.greater_equal(
self._random_generator.random_uniform(noise_shape), rate
)
kept_idx = tf.cast(kept_idx, inputs.dtype)
# Get affine transformation params
a = ((1 - rate) * (1 + rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
return backend.in_train_phase(
dropped_inputs, inputs, training=training
)
return inputs
def get_config(self):
config = {"rate": self.rate, "seed": self.seed}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| tf-keras/tf_keras/layers/regularization/alpha_dropout.py/0 | {
"file_path": "tf-keras/tf_keras/layers/regularization/alpha_dropout.py",
"repo_id": "tf-keras",
"token_count": 1441
} | 179 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras cropping layer for 3D input."""
import tensorflow.compat.v2 as tf
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Cropping3D")
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Examples:
>>> input_shape = (2, 28, 28, 10, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x)
>>> print(y.shape)
(2, 24, 20, 6, 3)
Args:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints: interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_cropped_axis, second_cropped_axis,
third_cropped_axis, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(
self, cropping=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs
):
super().__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = (
(cropping, cropping),
(cropping, cropping),
(cropping, cropping),
)
elif hasattr(cropping, "__len__"):
if len(cropping) != 3:
raise ValueError(
f"`cropping` should have 3 elements. Received: {cropping}."
)
dim1_cropping = conv_utils.normalize_tuple(
cropping[0], 2, "1st entry of cropping", allow_zero=True
)
dim2_cropping = conv_utils.normalize_tuple(
cropping[1], 2, "2nd entry of cropping", allow_zero=True
)
dim3_cropping = conv_utils.normalize_tuple(
cropping[2], 2, "3rd entry of cropping", allow_zero=True
)
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
"`cropping` should be either an int, "
"a tuple of 3 ints "
"(symmetric_dim1_crop, symmetric_dim2_crop, "
"symmetric_dim3_crop), "
"or a tuple of 3 tuples of 2 ints "
"((left_dim1_crop, right_dim1_crop),"
" (left_dim2_crop, right_dim2_crop),"
" (left_dim3_crop, right_dim2_crop)). "
f"Received: {cropping}."
)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
if input_shape[2] is not None:
dim1 = (
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
)
else:
dim1 = None
if input_shape[3] is not None:
dim2 = (
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
)
else:
dim2 = None
if input_shape[4] is not None:
dim3 = (
input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
)
else:
dim3 = None
return tf.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3]
)
elif self.data_format == "channels_last":
if input_shape[1] is not None:
dim1 = (
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
)
else:
dim1 = None
if input_shape[2] is not None:
dim2 = (
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
)
else:
dim2 = None
if input_shape[3] is not None:
dim3 = (
input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
)
else:
dim3 = None
return tf.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]]
)
def call(self, inputs):
if self.data_format == "channels_first":
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[1][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
]
elif self.cropping[2][1] == 0:
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
]
return inputs[
:,
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
]
else:
if (
self.cropping[0][1]
== self.cropping[1][1]
== self.cropping[2][1]
== 0
):
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
elif self.cropping[0][1] == 0:
return inputs[
:,
self.cropping[0][0] :,
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[1][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] :,
self.cropping[2][0] : -self.cropping[2][1],
:,
]
elif self.cropping[2][1] == 0:
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] :,
:,
]
return inputs[
:,
self.cropping[0][0] : -self.cropping[0][1],
self.cropping[1][0] : -self.cropping[1][1],
self.cropping[2][0] : -self.cropping[2][1],
:,
]
def get_config(self):
config = {"cropping": self.cropping, "data_format": self.data_format}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/reshaping/cropping3d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/reshaping/cropping3d.py",
"repo_id": "tf-keras",
"token_count": 6911
} | 180 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras zero-padding layer for 3D input."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.ZeroPadding3D")
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Examples:
>>> input_shape = (1, 1, 2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.ZeroPadding3D(padding=2)(x)
>>> print(y.shape)
(1, 5, 6, 6, 3)
Args:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_padded_axis, second_padded_axis,
third_axis_to_pad, depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = (
(padding, padding),
(padding, padding),
(padding, padding),
)
elif hasattr(padding, "__len__"):
if len(padding) != 3:
raise ValueError(
f"`padding` should have 3 elements. Received: {padding}."
)
dim1_padding = conv_utils.normalize_tuple(
padding[0], 2, "1st entry of padding", allow_zero=True
)
dim2_padding = conv_utils.normalize_tuple(
padding[1], 2, "2nd entry of padding", allow_zero=True
)
dim3_padding = conv_utils.normalize_tuple(
padding[2], 2, "3rd entry of padding", allow_zero=True
)
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
"`padding` should be either an int, "
"a tuple of 3 ints "
"(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), "
"or a tuple of 3 tuples of 2 ints "
"((left_dim1_pad, right_dim1_pad),"
" (left_dim2_pad, right_dim2_pad),"
" (left_dim3_pad, right_dim2_pad)). "
f"Received: {padding}."
)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
if input_shape[2] is not None:
dim1 = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + self.padding[2][0] + self.padding[2][1]
else:
dim3 = None
return tf.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3]
)
elif self.data_format == "channels_last":
if input_shape[1] is not None:
dim1 = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + self.padding[2][0] + self.padding[2][1]
else:
dim3 = None
return tf.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]]
)
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format
)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/reshaping/zero_padding3d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/reshaping/zero_padding3d.py",
"repo_id": "tf-keras",
"token_count": 3112
} | 181 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""1D Convolutional LSTM layer."""
from tf_keras.layers.rnn.base_conv_lstm import ConvLSTM
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.ConvLSTM1D")
class ConvLSTM1D(ConvLSTM):
"""1D Convolutional LSTM.
Similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers, specifying the strides of
the convolution. Specifying any stride value != 1 is incompatible with
specifying any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means
no padding. `"same"` results in padding evenly to the left/right or
up/down of the input such that output has the same height/width
dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, time, ...,
channels)` while `channels_first` corresponds to inputs with shape
`(batch, time, channels, ...)`. When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
dilation_rate: An integer or tuple/list of n integers, specifying the
dilation rate to use for dilated convolution. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any `strides`
value != 1.
activation: Activation function to use. By default hyperbolic tangent
activation function is applied (`tanh(x)`).
recurrent_activation: Activation function to use for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
at initialization. Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output in the output
sequence, or the full sequence. (default False)
return_state: Boolean Whether to return the last state in addition to the
output. (default False)
go_backwards: Boolean (default False). If True, process the input sequence
backwards.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.
Call arguments:
inputs: A 4D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` are set.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Input shape: - If data_format='channels_first'
4D tensor with shape: `(samples, time, channels, rows)` - If
data_format='channels_last'
4D tensor with shape: `(samples, time, rows, channels)`
Output shape:
- If `return_state`: a list of tensors. The first tensor is the output.
The remaining tensors are the last states,
each 3D tensor with shape: `(samples, filters, new_rows)` if
data_format='channels_first'
or shape: `(samples, new_rows, filters)` if data_format='channels_last'.
`rows` values might have changed due to padding.
- If `return_sequences`: 4D tensor with shape: `(samples, timesteps,
filters, new_rows)` if data_format='channels_first'
or shape: `(samples, timesteps, new_rows, filters)` if
data_format='channels_last'.
- Else, 3D tensor with shape: `(samples, filters, new_rows)` if
data_format='channels_first'
or shape: `(samples, new_rows, filters)` if data_format='channels_last'.
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)
(the current implementation does not include the feedback loop on the
cells output).
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="hard_sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
dropout=0.0,
recurrent_dropout=0.0,
**kwargs
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
**kwargs
)
| tf-keras/tf_keras/layers/rnn/conv_lstm1d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/conv_lstm1d.py",
"repo_id": "tf-keras",
"token_count": 3301
} | 182 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Legacy module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import backend
from tf_keras import initializers
from tf_keras.engine import base_layer_utils
from tf_keras.engine import input_spec
from tf_keras.legacy_tf_layers import base as base_layer
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def _hasattr(obj, attr_name):
try:
getattr(obj, attr_name)
except AttributeError:
return False
else:
return True
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, tf.Tensor):
p = prefix
p_static = tf.get_static_value(prefix)
if p.shape.ndims == 0:
p = tf.compat.v1.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError(
"Prefix tensor must be either a scalar or vector, "
f"but received tensor: {p}"
)
else:
p = tf.TensorShape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (
tf.constant(p.as_list(), dtype=tf.int32)
if p.is_fully_defined()
else None
)
if isinstance(suffix, tf.Tensor):
s = suffix
s_static = tf.get_static_value(suffix)
if s.shape.ndims == 0:
s = tf.compat.v1.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError(
"suffix tensor must be either a scalar or vector, "
f"but received tensor: {s}"
)
else:
s = tf.TensorShape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (
tf.constant(s.as_list(), dtype=tf.int32)
if s.is_fully_defined()
else None
)
if static:
shape = tf.TensorShape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError(
"Prefix or suffix can't be None. "
f"Received prefix = {prefix} and suffix = {suffix}"
)
shape = tf.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = tf.zeros(c, dtype=dtype)
if not tf.executing_eagerly():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return tf.nest.map_structure(get_state_shape, state_size)
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.RNNCell"])
@tf_export(v1=["nn.rnn_cell.RNNCell"])
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)
# Attribute that indicates whether the cell is a TF RNN cell, due the
# slight difference between TF and TF-Keras RNN cell. Notably the state
# is not wrapped in a list for TF cell where they are single tensor
# state, whereas keras cell will wrap the state into a list, and call()
# will have to unwrap them.
self._is_tf_rnn_cell = True
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a
`2-D Tensor` with shape `[batch_size, self.state_size]`. Otherwise,
if `self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; None uses class name.
Defaults to `None`.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors
matching the arity and shapes of `state`.
"""
if scope is not None:
with tf.compat.v1.variable_scope(
scope, custom_getter=self._rnn_get_variable
) as scope:
return super().__call__(inputs, state, scope=scope)
else:
scope_attrname = "rnncell_scope"
scope = getattr(self, scope_attrname, None)
if scope is None:
scope = tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(),
custom_getter=self._rnn_get_variable,
)
setattr(self, scope_attrname, scope)
with scope:
return super().__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
if tf.compat.v1.executing_eagerly_outside_functions():
trainable = variable.trainable
else:
trainable = variable in tf.compat.v1.trainable_variables() or (
base_layer_utils.is_split_variable(variable)
and list(variable)[0] in tf.compat.v1.trainable_variables()
)
if trainable and all(
variable is not v for v in self._trainable_weights
):
self._trainable_weights.append(variable)
elif not trainable and all(
variable is not v for v in self._non_trainable_weights
):
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of
Integers or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_weight() inside the call() method.
pass
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
# Validate the given batch_size and dtype against inputs if
# provided.
inputs = tf.convert_to_tensor(inputs, name="inputs")
if batch_size is not None:
if tf.is_tensor(batch_size):
static_batch_size = tf.get_static_value(
batch_size, partial=True
)
else:
static_batch_size = batch_size
if inputs.shape.dims[0].value != static_batch_size:
raise ValueError(
"batch size from input tensor is different from the "
"input param. Input tensor batch: "
f"{inputs.shape.dims[0].value}, "
f"batch_size: {batch_size}"
)
if dtype is not None and inputs.dtype != dtype:
raise ValueError(
"dtype from input tensor is different from the "
f"input param. Input tensor dtype: {inputs.dtype}, "
f"dtype: {dtype}"
)
batch_size = (
inputs.shape.dims[0].value or tf.compat.v1.shape(inputs)[0]
)
dtype = inputs.dtype
if batch_size is None or dtype is None:
raise ValueError(
"batch_size and dtype cannot be None while constructing "
f"initial state: batch_size={batch_size}, dtype={dtype}"
)
return self.zero_state(batch_size, dtype)
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid
# recreating zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = tf.executing_eagerly()
if is_eager and _hasattr(self, "_last_zero_state"):
(
last_state_size,
last_batch_size,
last_dtype,
last_output,
) = getattr(self, "_last_zero_state")
if (
last_batch_size == batch_size
and last_dtype == dtype
and last_state_size == state_size
):
return last_output
with backend.name_scope(type(self).__name__ + "ZeroState"):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
def get_config(self):
return super().get_config()
@property
def _use_input_spec_as_call_signature(self):
# We do not store the shape information for the state argument in the
# call function for legacy RNN cells, so do not generate an input
# signature.
return False
class LayerRNNCell(RNNCell):
"""Subclass of RNNCells that act like proper `tf.Layer` objects.
For backwards compatibility purposes, most `RNNCell` instances allow their
`call` methods to instantiate variables via `tf.compat.v1.get_variable`.
The underlying variable scope thus keeps track of any variables, and
returning cached versions. This is atypical of `tf.layer` objects, which
separate this part of layer building into a `build` method that is only
called once.
Here we provide a subclass for `RNNCell` objects that act exactly as
`Layer` objects do. They must provide a `build` method and their
`call` methods do not access Variables `tf.compat.v1.get_variable`.
"""
def __call__(self, inputs, state, scope=None, *args, **kwargs):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D
Tensor` with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size, s] for s in self.state_size`.
scope: optional cell scope.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors
matching the arity and shapes of `state`.
"""
# Bypass RNNCell's variable capturing semantics for LayerRNNCell.
# Instead, it is up to subclasses to provide a proper build
# method. See the class docstring for more details.
return base_layer.Layer.__call__(
self, inputs, state, scope=scope, *args, **kwargs
)
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.BasicRNNCell"])
@tf_export(v1=["nn.rnn_cell.BasicRNNCell"])
class BasicRNNCell(LayerRNNCell):
"""The most basic RNN cell.
Note that this cell is not optimized for performance.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within TF-Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
def __init__(
self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs,
):
warnings.warn(
"`tf.nn.rnn_cell.BasicRNNCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.SimpleRNNCell`, "
"and will be replaced by that in Tensorflow 2.0.",
stacklevel=2,
)
super().__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance.",
self,
)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
f"received shape: {inputs_shape}"
)
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._kernel = self.add_weight(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units],
)
self._bias = self.add_weight(
_BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=tf.compat.v1.zeros_initializer(dtype=self.dtype),
)
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state +
B)."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = tf.matmul(tf.concat([inputs, state], 1), self._kernel)
gate_inputs = tf.nn.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.GRUCell"])
@tf_export(v1=["nn.rnn_cell.GRUCell"])
class GRUCell(LayerRNNCell):
"""Gated Recurrent Unit cell.
Note that this cell is not optimized for performance. Please use
`tf.compat.v1.keras.layers.CuDNNGRU` for better performance on GPU, or
`tf.raw_ops.GRUBlockCell` for better performance on CPU.
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
References: Learning Phrase Representations using RNN Encoder Decoder
for Statistical Machine Translation: [Cho et al., 2014]
(https://aclanthology.coli.uni-saarland.de/papers/D14-1179/d14-1179)
([pdf](http://emnlp2014.org/papers/pdf/EMNLP2014179.pdf))
"""
def __init__(
self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None,
name=None,
dtype=None,
**kwargs,
):
warnings.warn(
"`tf.nn.rnn_cell.GRUCell` is deprecated and will be removed "
"in a future version. This class "
"is equivalent as `tf.keras.layers.GRUCell`, "
"and will be replaced by that in Tensorflow 2.0.",
stacklevel=2,
)
super().__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.compat.v1.keras.layers.CuDNNGRU for better "
"performance on GPU.",
self,
)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
self._kernel_initializer = initializers.get(kernel_initializer)
self._bias_initializer = initializers.get(bias_initializer)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
f"received shape: {inputs_shape}"
)
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._gate_kernel = self.add_weight(
f"gates/{_WEIGHTS_VARIABLE_NAME}",
shape=[input_depth + self._num_units, 2 * self._num_units],
initializer=self._kernel_initializer,
)
self._gate_bias = self.add_weight(
f"gates/{_BIAS_VARIABLE_NAME}",
shape=[2 * self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else tf.compat.v1.constant_initializer(1.0, dtype=self.dtype)
),
)
self._candidate_kernel = self.add_weight(
f"candidate/{_WEIGHTS_VARIABLE_NAME}",
shape=[input_depth + self._num_units, self._num_units],
initializer=self._kernel_initializer,
)
self._candidate_bias = self.add_weight(
f"candidate/{_BIAS_VARIABLE_NAME}",
shape=[self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else tf.compat.v1.zeros_initializer(dtype=self.dtype)
),
)
self.built = True
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = tf.matmul(
tf.concat([inputs, state], 1), self._gate_kernel
)
gate_inputs = tf.nn.bias_add(gate_inputs, self._gate_bias)
value = tf.sigmoid(gate_inputs)
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
candidate = tf.matmul(
tf.concat([inputs, r_state], 1), self._candidate_kernel
)
candidate = tf.nn.bias_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_h = u * state + (1 - u) * c
return new_h, new_h
def get_config(self):
config = {
"num_units": self._num_units,
"kernel_initializer": initializers.serialize(
self._kernel_initializer
),
"bias_initializer": initializers.serialize(self._bias_initializer),
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.LSTMStateTuple"])
@tf_export(v1=["nn.rnn_cell.LSTMStateTuple"])
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, & output state.
Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
and `h` is the output.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError(
"Inconsistent dtypes for internal state: "
f"{c.dtype} vs {h.dtype}"
)
return c.dtype
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.BasicLSTMCell"])
@tf_export(v1=["nn.rnn_cell.BasicLSTMCell"])
class BasicLSTMCell(LayerRNNCell):
"""DEPRECATED: Please use `tf.compat.v1.nn.rnn_cell.LSTMCell` instead.
Basic LSTM recurrent network cell.
The implementation is based on
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell`
that follows.
Note that this cell is not optimized for performance. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU, or
`tf.raw_ops.LSTMBlockCell` for better performance on CPU.
"""
def __init__(
self,
num_units,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs,
):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above). Must
set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated along
the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
It could also be string that is within TF-Keras activation function
names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the
type of the first input). Required when `build` is called before
`call`.
**kwargs: Dict, keyword named properties for common layer attributes,
like `trainable` etc when constructing the cell from configs of
get_config(). When restoring from CudnnLSTM-trained checkpoints,
must use `CudnnCompatibleLSTMCell` instead.
"""
warnings.warn(
"`tf.nn.rnn_cell.BasicLSTMCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.LSTMCell`, "
"and will be replaced by that in Tensorflow 2.0.",
stacklevel=2,
)
super().__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warning(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.",
self,
)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.compat.v1.keras.layers.CuDNNLSTM for better "
"performance on GPU.",
self,
)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
@property
def state_size(self):
return (
LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple
else 2 * self._num_units
)
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
f"received shape: {inputs_shape}"
)
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units
self._kernel = self.add_weight(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units],
)
self._bias = self.add_weight(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=tf.compat.v1.zeros_initializer(dtype=self.dtype),
)
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size,
num_units]`, if `state_is_tuple` has been set to `True`. Otherwise,
a `Tensor` shaped `[batch_size, 2 * num_units]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
_check_rnn_cell_input_dtypes([inputs, state])
sigmoid = tf.sigmoid
one = tf.constant(1, dtype=tf.int32)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = tf.split(value=state, num_or_size_splits=2, axis=one)
gate_inputs = tf.matmul(tf.concat([inputs, h], 1), self._kernel)
gate_inputs = tf.nn.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = tf.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = tf.add
multiply = tf.multiply
new_c = add(
multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)),
)
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = tf.concat([new_c, new_h], 1)
return new_h, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.LSTMCell"])
@tf_export(v1=["nn.rnn_cell.LSTMCell"])
class LSTMCell(LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on (Gers et al., 1999).
The peephole implementation is based on (Sak et al., 2014).
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU, or
`tf.raw_ops.LSTMBlockCell` for better performance on CPU.
References:
Long short-term memory recurrent neural network architectures for large
scale acoustic modeling:
[Sak et al., 2014]
(https://www.isca-speech.org/archive/interspeech_2014/i14_0338.html)
([pdf]
(https://www.isca-speech.org/archive/archive_papers/interspeech_2014/i14_0338.pdf))
Learning to forget:
[Gers et al., 1999]
(http://digital-library.theiet.org/content/conferences/10.1049/cp_19991218)
([pdf](https://arxiv.org/pdf/1409.2329.pdf))
Long Short-Term Memory:
[Hochreiter et al., 1997]
(https://www.mitpressjournals.org/doi/abs/10.1162/neco.1997.9.8.1735)
([pdf](http://ml.jku.at/publications/older/3504.pdf))
"""
def __init__(
self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs,
):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is
clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and
`proj_clip` is provided, then the projected values are clipped
elementwise to within `[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of the
training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated along
the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
It could also be string that is within TF-Keras activation function
names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the
type of the first input). Required when `build` is called before
`call`.
**kwargs: Dict, keyword named properties for common layer attributes,
like `trainable` etc when constructing the cell from configs of
get_config(). When restoring from CudnnLSTM-trained checkpoints,
use `CudnnCompatibleLSTMCell` instead.
"""
warnings.warn(
"`tf.nn.rnn_cell.LSTMCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.LSTMCell`, "
"and will be replaced by that in Tensorflow 2.0.",
stacklevel=2,
)
super().__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warning(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.",
self,
)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warning(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.",
self,
)
if tf.executing_eagerly() and tf.config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.compat.v1.keras.layers.CuDNNLSTM for better "
"performance on GPU.",
self,
)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializers.get(initializer)
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = tf.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple
else num_units + num_proj
)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple
else 2 * num_units
)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError(
"Expected inputs.shape[-1] to be known, "
f"received shape: {inputs_shape}"
)
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units if self._num_proj is None else self._num_proj
maybe_partitioner = (
tf.compat.v1.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None
else None
)
self._kernel = self.add_weight(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units],
initializer=self._initializer,
partitioner=maybe_partitioner,
)
if self.dtype is None:
initializer = tf.compat.v1.zeros_initializer
else:
initializer = tf.compat.v1.zeros_initializer(dtype=self.dtype)
self._bias = self.add_weight(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=initializer,
)
if self._use_peepholes:
self._w_f_diag = self.add_weight(
"w_f_diag",
shape=[self._num_units],
initializer=self._initializer,
)
self._w_i_diag = self.add_weight(
"w_i_diag",
shape=[self._num_units],
initializer=self._initializer,
)
self._w_o_diag = self.add_weight(
"w_o_diag",
shape=[self._num_units],
initializer=self._initializer,
)
if self._num_proj is not None:
maybe_proj_partitioner = (
tf.compat.v1.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None
else None
)
self._proj_kernel = self.add_weight(
f"projection/{_WEIGHTS_VARIABLE_NAME}",
shape=[self._num_units, self._num_proj],
initializer=self._initializer,
partitioner=maybe_proj_partitioner,
)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, [batch, state_size]`. If `state_is_tuple` is True, this must
be a tuple of state Tensors, both `2-D`, with column sizes `c_state`
and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs`
when the previous state was `state`. Same type and shape(s) as
`state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
_check_rnn_cell_input_dtypes([inputs, state])
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = tf.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError(
"Could not infer input size from inputs.get_shape()[-1]."
f"Received input shape: {inputs.get_shape()}"
)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = tf.matmul(tf.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = tf.nn.bias_add(lstm_matrix, self._bias)
i, j, f, o = tf.split(value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = sigmoid(
f + self._forget_bias + self._w_f_diag * c_prev
) * c_prev + sigmoid(
i + self._w_i_diag * c_prev
) * self._activation(
j
)
else:
c = sigmoid(f + self._forget_bias) * c_prev + sigmoid(
i
) * self._activation(j)
if self._cell_clip is not None:
c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = tf.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
m = tf.clip_by_value(m, -self._proj_clip, self._proj_clip)
new_state = (
LSTMStateTuple(c, m)
if self._state_is_tuple
else tf.concat([c, m], 1)
)
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(v1=["keras.__internal__.legacy.rnn_cell.MultiRNNCell"])
@tf_export(v1=["nn.rnn_cell.MultiRNNCell"])
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells.
Example:
```python
num_units = [128, 64]
cells = [BasicLSTMCell(num_units=n) for n in num_units]
stacked_rnn_cell = MultiRNNCell(cells)
```
"""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples,
where `n = len(cells)`. If False, the states are all concatenated
along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the
cells returns a state tuple but the flag `state_is_tuple` is
`False`.
"""
logging.warning(
"`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class "
"is equivalent as `tf.keras.layers.StackedRNNCells`, "
"and will be replaced by that in Tensorflow 2.0."
)
super().__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not tf.nest.is_nested(cells):
raise TypeError(
f"cells must be a list or tuple, but received: {cells}."
)
if len(set(id(cell) for cell in cells)) < len(cells):
logging.log_first_n(
logging.WARN,
"At least two cells provided to MultiRNNCell "
"are the same object and will share weights.",
1,
)
self._cells = cells
for cell_number, cell in enumerate(self._cells):
# Add Trackable dependencies on these cells so their variables get
# saved with this object when using object-based saving.
if isinstance(cell, tf.__internal__.tracking.Trackable):
# TODO(allenl): Track down non-Trackable callers.
self._track_trackable(cell, name="cell-%d" % (cell_number,))
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(tf.nest.is_nested(c.state_size) for c in self._cells):
raise ValueError(
"Some cells return tuples of states, but the flag "
"state_is_tuple is not set. "
f"State sizes are: {[c.state_size for c in self._cells]}"
)
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum(cell.state_size for cell in self._cells)
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with backend.name_scope(type(self).__name__ + "ZeroState"):
if self._state_is_tuple:
return tuple(
cell.zero_state(batch_size, dtype) for cell in self._cells
)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else
# fancy
return super().zero_state(batch_size, dtype)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.compat.v1.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not tf.nest.is_nested(state):
raise ValueError(
"Expected state to be a tuple of length "
f"{len(self.state_size)}"
f", but received: {state}"
)
cur_state = state[i]
else:
cur_state = tf.slice(
state, [0, cur_state_pos], [-1, cell.state_size]
)
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (
tuple(new_states)
if self._state_is_tuple
else tf.concat(new_states, 1)
)
return cur_inp, new_states
def _check_rnn_cell_input_dtypes(inputs):
"""Check whether the input tensors are with supported dtypes.
Default RNN cells only support floats and complex as its dtypes since the
activation function (tanh and sigmoid) only allow those types. This function
will throw a proper error message if the inputs is not in a supported type.
Args:
inputs: tensor or nested structure of tensors that are feed to RNN cell as
input or state.
Raises:
ValueError: if any of the input tensor are not having dtypes of float or
complex.
"""
for t in tf.nest.flatten(inputs):
_check_supported_dtypes(t.dtype)
def _check_supported_dtypes(dtype):
if dtype is None:
return
dtype = tf.as_dtype(dtype)
if not (dtype.is_floating or dtype.is_complex):
raise ValueError(
"RNN cell only supports floating point inputs, "
f"but received dtype: {dtype}"
)
| tf-keras/tf_keras/layers/rnn/legacy_cells.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/legacy_cells.py",
"repo_id": "tf-keras",
"token_count": 24061
} | 183 |
"""Init file."""
from tf_keras.legacy_tf_layers import migration_utils
| tf-keras/tf_keras/legacy_tf_layers/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/__init__.py",
"repo_id": "tf-keras",
"token_count": 25
} | 184 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras loss functions."""
import warnings
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import activations
from tf_keras import backend
from tf_keras import losses
from tf_keras.testing_infra import test_combinations
from tf_keras.utils import losses_utils
# isort: off
from tensorflow.python.autograph.impl import (
api as autograph,
)
ALL_LOSSES = [
losses.mean_squared_error,
losses.mean_absolute_error,
losses.mean_absolute_percentage_error,
losses.mean_squared_logarithmic_error,
losses.squared_hinge,
losses.hinge,
losses.categorical_crossentropy,
losses.binary_crossentropy,
losses.kl_divergence,
losses.poisson,
losses.cosine_similarity,
losses.log_cosh,
losses.categorical_hinge,
]
class KerasLossesTest(tf.test.TestCase, parameterized.TestCase):
def test_objective_shapes_3d(self):
with self.cached_session():
y_a = backend.variable(np.random.random((5, 6, 7)))
y_b = backend.variable(np.random.random((5, 6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.shape.as_list(), [5, 6])
def test_objective_shapes_2d(self):
with self.cached_session():
y_a = backend.variable(np.random.random((6, 7)))
y_b = backend.variable(np.random.random((6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(
objective_output.shape.as_list(),
[
6,
],
)
def test_cce_one_hot(self):
with self.cached_session():
y_a = backend.variable(np.random.randint(0, 7, (5, 6)))
y_b = backend.variable(np.random.random((5, 6, 7)))
objective_output = losses.sparse_categorical_crossentropy(y_a, y_b)
assert backend.eval(objective_output).shape == (5, 6)
y_a = backend.variable(np.random.randint(0, 7, (6,)))
y_b = backend.variable(np.random.random((6, 7)))
objective_output = losses.sparse_categorical_crossentropy(y_a, y_b)
assert backend.eval(objective_output).shape == (6,)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_categorical_crossentropy_loss(self):
target = backend.variable(np.random.randint(0, 1, (5, 1)))
logits = backend.variable(np.random.random((5, 1)))
softmax_output = backend.softmax(logits)
output_from_logit = losses.categorical_crossentropy(
target, logits, from_logits=True
)
output_from_softmax = losses.categorical_crossentropy(
target, softmax_output
)
np.testing.assert_allclose(
backend.eval(output_from_logit),
backend.eval(output_from_softmax),
atol=1e-5,
)
axis = 0
output_from_logit_axis = losses.categorical_crossentropy(
target, logits, from_logits=True, axis=axis
)
output_from_softmax_axis = losses.categorical_crossentropy(
target, softmax_output, axis=axis
)
np.testing.assert_allclose(
backend.eval(output_from_logit_axis),
backend.eval(output_from_softmax_axis),
atol=1e-5,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = backend.placeholder()
p = backend.placeholder()
o = losses.categorical_crossentropy(t, p)
t_val = tf.convert_to_tensor(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
)
p_val = tf.convert_to_tensor(
[[0.9, 0.05, 0.05], [0.05, 0.89, 0.06], [0.05, 0.01, 0.94]]
)
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [0.105, 0.116, 0.062], 1e-3)
# from logits
p_val = tf.convert_to_tensor(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
o = losses.categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [0.002, 0, 0.17], 1e-3)
def test_categorial_crossentropy_loss_different_axis(self):
target = backend.variable(np.random.randint(0, 1, (5, 2, 3)))
logits = backend.variable(np.random.random((5, 2, 3)))
softmax_output = backend.softmax(logits)
axis = 1
output_from_logit_axis = losses.categorical_crossentropy(
target, logits, from_logits=True, axis=axis
)
output_from_softmax_axis = losses.categorical_crossentropy(
target, softmax_output, axis=axis
)
np.testing.assert_allclose(
backend.eval(output_from_logit_axis),
backend.eval(output_from_softmax_axis),
atol=1e-5,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_sparse_categorical_crossentropy_loss(self):
target = backend.variable(np.random.randint(0, 1, (5, 1)))
logits = backend.variable(np.random.random((5, 1)))
softmax_output = backend.softmax(logits)
output_from_logit = losses.sparse_categorical_crossentropy(
target, logits, from_logits=True
)
output_from_softmax = losses.sparse_categorical_crossentropy(
target, softmax_output
)
np.testing.assert_allclose(
backend.eval(output_from_logit),
backend.eval(output_from_softmax),
atol=1e-5,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_sparse_categorical_crossentropy_loss_with_ignore_class(self):
ignore_class = 255
target = backend.variable(np.random.randint(0, 1, (5, 1)))
logits = backend.variable(np.random.random((5, 1)))
softmax_output = backend.softmax(logits)
_valid = tf.constant([[0], [1], [0], [1], [1]], target.dtype)
target.assign(target * _valid + (1 - _valid) * ignore_class)
output_from_logit = losses.sparse_categorical_crossentropy(
target, logits, ignore_class=ignore_class, from_logits=True
)
output_from_softmax = losses.sparse_categorical_crossentropy(
target, softmax_output, ignore_class=ignore_class
)
# expected_mask = [False, True, False, True, True]
# for o in (output_from_logit, output_from_softmax):
# mask = backend.eval(losses_utils.get_mask(o))
# np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_allclose(
backend.eval(output_from_logit),
backend.eval(output_from_softmax),
atol=1e-5,
)
@test_combinations.generate(test_combinations.combine(mode=["graph"]))
def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(
self,
):
# This test only runs in graph because the TF op layer is not supported
# yet for sparse ops.
t = backend.placeholder()
p = backend.placeholder()
o = losses.sparse_categorical_crossentropy(t, p)
t_val = tf.convert_to_tensor([0, 1, 2])
p_val = tf.convert_to_tensor(
[[0.9, 0.05, 0.05], [0.05, 0.89, 0.06], [0.05, 0.01, 0.94]]
)
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [0.105, 0.116, 0.062], 1e-3)
# from logits
p_val = tf.convert_to_tensor(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
o = losses.sparse_categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [0.002, 0, 0.17], 1e-3)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_sparse_categorical_crossentropy_with_float16(self):
# See https://github.com/keras-team/tf-keras/issues/15012 for more
# details. we don't cast y_true to have same dtype as y_pred, since
# y_pred could be float16 which has a small upbound, and the casting
# could cause an underflow. The y_true will be used as int64 anyway.
# create 2 observations with 2049 labels, since 2048 is the largest
# number for float16
y_true = [0, 2049]
# should result in a loss close to 0 since predicting y_true perfectly
y_pred = np.zeros((2, 2050))
y_pred[0][0] = 1
y_pred[1][2049] = 1
y_pred_16 = tf.convert_to_tensor(y_pred, dtype=tf.float16)
# If we did a cast for y_true to float16 in
# SparseCategoricalCrossentropy, then the loss will not be zero.
scce = losses.SparseCategoricalCrossentropy()
self.assertAllClose(scce(y_true, y_pred_16).numpy(), 0.0, atol=1e-3)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_binary_crossentropy_loss(self):
target = backend.variable(np.random.randint(0, 1, (5, 1)))
logits = backend.variable(np.random.random((5, 1)))
sigmoid_output = backend.sigmoid(logits)
output_from_logit = losses.binary_crossentropy(
target, logits, from_logits=True
)
output_from_sigmoid = losses.binary_crossentropy(target, sigmoid_output)
np.testing.assert_allclose(
backend.eval(output_from_logit),
backend.eval(output_from_sigmoid),
atol=1e-5,
)
axis = 0
output_from_logit_axis = losses.binary_crossentropy(
target, logits, from_logits=True, axis=axis
)
output_from_sigmoid_axis = losses.binary_crossentropy(
target, sigmoid_output, axis=axis
)
np.testing.assert_allclose(
backend.eval(output_from_logit_axis),
backend.eval(output_from_sigmoid_axis),
atol=1e-5,
)
def test_get_bce(self):
bce_fn = losses.get("bce")
self.assertEqual(bce_fn, losses.binary_crossentropy)
def test_serialization(self):
fn = losses.get("mse")
config = losses.serialize(fn)
new_fn = losses.deserialize(config)
self.assertEqual(fn, new_fn)
def test_categorical_hinge(self):
y_pred = backend.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
loss = backend.eval(losses.categorical_hinge(y_true, y_pred))
self.assertAllClose(expected_loss, np.mean(loss))
def test_loss_wrapper(self):
loss_fn = losses.get("mse")
mse_obj = losses.LossFunctionWrapper(loss_fn, name=loss_fn.__name__)
self.assertEqual(mse_obj.name, "mean_squared_error")
self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.AUTO)
y_true = tf.constant([[1.0, 9.0], [2.0, 5.0]])
y_pred = tf.constant([[4.0, 8.0], [12.0, 3.0]])
sample_weight = tf.constant([1.2, 0.5])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
# mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2]
# mse = [5, 52]
# weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26]
# reduced_weighted_mse = (6 + 26) / 2 =
self.assertAllClose(self.evaluate(loss), 16, 1e-2)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_loss_wrapper_autograph(self):
# Test that functions with control flow wrapped in a LossFunctionWrapper
# get autographed when in a tf.function
def loss_fn(y_true, y_pred):
mse_loss_fn = losses.get("mse")
if tf.reduce_mean(y_true) > 0:
return mse_loss_fn(y_true, y_pred)
else:
return mse_loss_fn(y_true, y_pred)
mse_obj = losses.LossFunctionWrapper(loss_fn)
y_true = tf.constant([[1.0, 9.0], [2.0, 5.0]])
y_pred = tf.constant([[4.0, 8.0], [12.0, 3.0]])
sample_weight = tf.constant([1.2, 0.5])
@tf.function
def tf_functioned_loss_fn(y_true, y_pred, sample_weight=None):
return mse_obj(y_true, y_pred, sample_weight=sample_weight)
loss = tf_functioned_loss_fn(
y_true, y_pred, sample_weight=sample_weight
)
# mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2]
# mse = [5, 52]
# weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26]
# reduced_weighted_mse = (6 + 26) / 2 =
self.assertAllClose(self.evaluate(loss), 16, 1e-2)
def test_loss_wrapper_dtype(self):
# Make sure the loss wrapper doesn't cause any numerical precision loss
# during calculation. See
# https://github.com/keras-team/tf-keras/issues/15791
x = tf.convert_to_tensor([[2.1]], dtype=tf.float64)
y_true = tf.square(x)
y_pred = tf.convert_to_tensor([[3.68]], dtype=tf.float64)
# TF loss
loss = losses.MeanSquaredError()
tf_loss = loss(y_pred, y_true)
# manually computed loss in 64-bit
man_loss64 = tf.squeeze(tf.square(y_pred - y_true))
self.assertEqual(tf_loss.dtype, tf.float64)
# Make a smaller atol to ensure the float64 precision is hold.
self.assertAllClose(
self.evaluate(tf_loss), self.evaluate(man_loss64), atol=1e-8
)
def test_invalid_reduction(self):
with self.assertRaisesRegex(ValueError, "Invalid Reduction Key: Foo."):
losses.MeanSquaredError(reduction="Foo")
mse_obj = losses.MeanSquaredError()
y = tf.constant([1])
mse_obj.reduction = "Bar"
with self.assertRaisesRegex(ValueError, "Invalid Reduction Key: Bar."):
mse_obj(y, y)
def test_deserialization_error(self):
with self.assertRaisesRegex(ValueError, "Could not interpret loss"):
losses.get(0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_binary_crossentropy_uses_cached_logits(self):
logits = tf.constant([[-30.0, 30.0]])
y_pred = activations.sigmoid(logits)
self.assertTrue(hasattr(y_pred, "_keras_logits"))
y_true = tf.constant([[0.0, 1.0]])
loss = losses.binary_crossentropy(y_true, y_pred)[0]
# Check that logits are used. If y_pred is used directly, loss will
# collapse to 0 from underflow.
self.assertNotEqual(self.evaluate(loss), 0.0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_categorical_crossentropy_uses_cached_logits(self):
logits = tf.constant([[-5.0, 0.0, 5.0]])
y_pred = activations.softmax(logits)
self.assertTrue(hasattr(y_pred, "_keras_logits"))
y_true = tf.constant([[0.0, 0.0, 1.0]])
loss = losses.categorical_crossentropy(
y_true, logits, from_logits=True
)[0]
# Check that logits are used. If y_pred is used directly, loss will
# collapse to 0 from underflow.
self.assertNotEqual(self.evaluate(loss), 0.0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_sparse_categorical_crossentropy_uses_cached_logits(self):
logits = tf.constant([[-5.0, 0.0, 5.0]])
y_pred = activations.softmax(logits)
self.assertTrue(hasattr(y_pred, "_keras_logits"))
y_true = tf.constant([2])
loss = losses.sparse_categorical_crossentropy(
y_true, logits, from_logits=True
)[0]
# Check that logits are used. If y_pred is used directly, loss will
# collapse to 0 from underflow.
self.assertNotEqual(self.evaluate(loss), 0.0)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_loss_not_autographed_in_eager(self):
class MyLoss(losses.Loss):
def call(self, y_true, y_pred):
return y_true - y_pred
loss = MyLoss()
y_true = tf.constant([[0.0, 0.0, 0.0]])
y_pred = tf.constant([[1.0, 1.0, 1.0]])
def tf_convert(fn, _):
assert False, "Function should not be autographed."
return fn
with tf.compat.v1.test.mock.patch.object(
autograph, "tf_convert", tf_convert
):
loss(y_true, y_pred)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanSquaredErrorTest(tf.test.TestCase):
def test_config(self):
mse_obj = losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.SUM, name="mse_1"
)
self.assertEqual(mse_obj.name, "mse_1")
self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 49.5, 3)
def test_scalar_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 113.85, 3)
def test_sample_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3)
def test_ragged_tensors(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.ragged.constant([[1.0, 1.0, 9.0], [2.0, 5.0]])
y_pred = tf.ragged.constant([[4.0, 1.0, 8.0], [12.0, 3.0]])
sample_weight = tf.constant([1.2, 0.5])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
# mse = [((4 - 1)^2 + (8 - 9)^2) / 3, ((12 - 2)^2 + (3 - 5)^2) / 2]
# mse = [3.(3), 52]
# weighted_mse = [3.(3) * 1.2, 52 * 0.5] = [4, 26]
# reduced_weighted_mse = (4 + 26) / 2 =
self.assertAllClose(self.evaluate(loss), 15, 1e-2)
def test_timestep_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32
)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3)
def test_zero_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mse_obj = losses.MeanSquaredError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = tf.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegex(
(ValueError, tf.errors.InvalidArgumentError),
(
r"Incompatible shapes: \[2,3\] vs. \[2,2\]|"
"Dimensions must be equal"
),
):
mse_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mse_obj = losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.NONE
)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3)
def test_sum_reduction(self):
mse_obj = losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.SUM
)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanAbsoluteErrorTest(tf.test.TestCase):
def test_config(self):
mae_obj = losses.MeanAbsoluteError(
reduction=losses_utils.ReductionV2.SUM, name="mae_1"
)
self.assertEqual(mae_obj.name, "mae_1")
self.assertEqual(mae_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mae_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mae_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 5.5, 3)
def test_scalar_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 12.65, 3)
def test_sample_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3)
def test_timestep_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32
)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3)
def test_zero_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mae_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = tf.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegex(
(ValueError, tf.errors.InvalidArgumentError),
(
r"Incompatible shapes: \[2,3\] vs. \[2,2\]|"
"Dimensions must be equal"
),
):
mae_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mae_obj = losses.MeanAbsoluteError(
reduction=losses_utils.ReductionV2.NONE
)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3)
def test_sum_reduction(self):
mae_obj = losses.MeanAbsoluteError(
reduction=losses_utils.ReductionV2.SUM
)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3)
def test_ragged_tensor(self):
mae_obj = losses.MeanAbsoluteError()
y_true = tf.ragged.constant([[1, 9, 2], [-5, -2]], dtype=tf.float32)
y_pred = tf.ragged.constant([[4, 8, 12], [8, 1]], dtype=tf.float32)
# loss = [14/3, 16/2]
sample_weight = tf.constant([1.2, 1.0], shape=(2, 1))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 6.8, 5)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanAbsolutePercentageErrorTest(tf.test.TestCase):
def test_config(self):
mape_obj = losses.MeanAbsolutePercentageError(
reduction=losses_utils.ReductionV2.SUM, name="mape_1"
)
self.assertEqual(mape_obj.name, "mape_1")
self.assertEqual(mape_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mape_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 487.259, 3)
def test_sample_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3)
def test_ragged_tensors(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = tf.ragged.constant([[1, 9, 2], [-5, -2]])
y_pred = tf.ragged.constant([[4, 8, 12], [8, 1]], dtype=tf.float32)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 510.7222, 3)
def test_timestep_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32
)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3)
def test_zero_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_no_reduction(self):
mape_obj = losses.MeanAbsolutePercentageError(
reduction=losses_utils.ReductionV2.NONE
)
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [621.8518, 352.6666], 1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanSquaredLogarithmicErrorTest(tf.test.TestCase):
def test_config(self):
msle_obj = losses.MeanSquaredLogarithmicError(
reduction=losses_utils.ReductionV2.SUM, name="mape_1"
)
self.assertEqual(msle_obj.name, "mape_1")
self.assertEqual(msle_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = msle_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3)
def test_scalar_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3)
def test_sample_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3)
def test_timestep_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32
)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3)
def test_zero_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = msle_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_ragged_tensors(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = tf.ragged.constant([[1, 9, 2], [-5, -2]])
# log(max(y_true, 0) + 1): [[0.69314, 2.3025, 1.0986], [0., 0.]]
y_pred = tf.ragged.constant([[4, 8, 12], [8, 1]], dtype=tf.float32)
# log(max(y_pred, 0) + 1): [[1.6094, 2.1972, 2.5649], [2.1972, 0.6932]]
# per batch loss: [1.0002, 2.6541]
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 5.1121, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CosineSimilarityTest(tf.test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = tf.constant(self.np_y_true)
self.y_pred = tf.constant(self.np_y_pred)
def test_config(self):
cosine_obj = losses.CosineSimilarity(
axis=2, reduction=losses_utils.ReductionV2.SUM, name="cosine_loss"
)
self.assertEqual(cosine_obj.name, "cosine_loss")
self.assertEqual(cosine_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = 2.3
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_sample_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true, self.y_pred, sample_weight=tf.constant(sample_weight)
)
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
np_y_true = self.np_y_true.reshape((2, 3, 1))
np_y_pred = self.np_y_pred.reshape((2, 3, 1))
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3))
y_true = self.l2_norm(np_y_true, 2)
y_pred = self.l2_norm(np_y_pred, 2)
expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,))
y_true = tf.constant(np_y_true)
y_pred = tf.constant(np_y_pred)
loss = cosine_obj(
y_true, y_pred, sample_weight=tf.constant(sample_weight)
)
expected_loss = -np.mean(expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = losses.CosineSimilarity(axis=1)
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class BinaryCrossentropyTest(tf.test.TestCase):
def test_config(self):
bce_obj = losses.BinaryCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name="bce_1"
)
self.assertEqual(bce_obj.name, "bce_1")
self.assertEqual(bce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = tf.constant(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=tf.float32
)
bce_obj = losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = tf.constant(
[
[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0],
]
)
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
bce_obj = losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Reduced loss = 15.33 / 4
self.assertAlmostEqual(self.evaluate(loss), 3.833, 3)
# Test with logits.
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced loss = (0 + 66.666) / 2
self.assertAlmostEqual(self.evaluate(loss), 33.333, 3)
def test_scalar_weighted(self):
bce_obj = losses.BinaryCrossentropy()
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
loss = bce_obj(y_true, y_pred, sample_weight=2.3)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Weighted loss = [0, 15.33 * 2.3, 0, 0]
# Reduced loss = 15.33 * 2.3 / 4
self.assertAlmostEqual(self.evaluate(loss), 8.817, 3)
# Test with logits.
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=2.3)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted loss = [0 * 2.3, 66.666 * 2.3]
# Reduced loss = (0 + 66.666 * 2.3) / 2
self.assertAlmostEqual(self.evaluate(loss), 76.667, 3)
def test_sample_weighted(self):
bce_obj = losses.BinaryCrossentropy()
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Reduced loss = 15.33 * 1.2 / 4
self.assertAlmostEqual(self.evaluate(loss), 4.6, 3)
# Test with logits.
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
weights = tf.constant([4, 3])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, 200 / 3]
# Weighted loss = [0 * 4, 66.666 * 3]
# Reduced loss = (0 + 66.666 * 3) / 2
self.assertAlmostEqual(self.evaluate(loss), 100, 3)
def test_no_reduction(self):
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
logits = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
bce_obj = losses.BinaryCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE
)
loss = bce_obj(y_true, logits)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, (200)/3]
self.assertAllClose((0.0, 66.6666), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = tf.constant([[100.0, -100.0, -100.0]])
y_true = tf.constant([[1, 0, 1]])
label_smoothing = 0.1
# Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# 1 = 1 - 0.5L
# 0 = 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = losses.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_label_smoothing_ndarray(self):
logits = np.asarray([[100.0, -100.0, -100.0]])
y_true = np.asarray([[1, 0, 1]])
label_smoothing = 0.1
# Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# 1 = 1 - 0.5L
# 0 = 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = losses.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_ragged_tensors(self):
bce_obj = losses.BinaryCrossentropy()
y_true = tf.ragged.constant([[1, 0, 1], [0]])
y_pred = tf.ragged.constant([[1, 1, 1], [0]], dtype=tf.float32)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# per batch loss = [ sum([0, 15.33, 0]) / 3, 0. ]
# = [ 5.11, 0]
# Reduced loss = 5.11 * 1.2 / 2
self.assertAlmostEqual(self.evaluate(loss), 3.0666, 3)
# Test with logits.
y_true = tf.ragged.constant([[1, 0, 1], [0, 1]])
logits = tf.ragged.constant([[100.0, -100.0, 100.0], [100.0, 100.0]])
weights = tf.constant([4, 3])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, 100 / 2]
# Weighted loss = [0 * 4, 50 * 3]
# Reduced loss = (0 + 50 * 3) / 2
self.assertAlmostEqual(self.evaluate(loss), 75.0, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class BinaryFocalCrossentropyTest(tf.test.TestCase):
def test_config(self):
obj = losses.BinaryFocalCrossentropy(gamma=1.5, name="bfce_0")
self.assertEqual(obj.name, "bfce_0")
self.assertAlmostEqual(obj.gamma, 1.5)
obj_2 = losses.BinaryFocalCrossentropy.from_config(obj.get_config())
self.assertEqual(obj_2.name, "bfce_0")
self.assertAlmostEqual(obj_2.gamma, 1.5)
def test_all_correct_unweighted(self):
y_true = tf.constant(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
],
dtype=tf.float32,
)
obj = losses.BinaryFocalCrossentropy(gamma=1.5)
loss = obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = tf.constant(
[
[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0],
]
)
obj = losses.BinaryFocalCrossentropy(gamma=2.0, from_logits=True)
loss = obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(gamma=2.0)
loss = obj(y_true, y_pred)
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2],
# [0.7, 0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]]
# focalLoss = focal bceLoss = [[0.001, 1.03], [0.032, 0.009]]
# Reduced loss = (0.001 + 1.03 + 0.032 + 0.009) / 4 = 0.268
self.assertAlmostEqual(self.evaluate(loss), 0.268, 3)
# Test with logits.
y_true = tf.constant([[1, 1, 0], [0, 1, 0]], dtype=tf.float32)
logits = tf.constant([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(gamma=3.0, from_logits=True)
loss = obj(y_true, logits)
# sigmoidal = sigmoid(logits)
# = [[0.8176, 0.063, 0.9478], [0.0219, 0.7685, 0.011]]
# p_t = y_true sigmoidal + (1 - y_true) (1 - sigmoidal)
# = [[0.8176, 0.063, 0.0522], [0.9781, 0.7685, 0.989]]
# focal = (1 - p_t) ** gamma
# = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
# bceLoss = -log(p_t)
# = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]]
# focalLoss = focal bceLoss
# = [[0.0012, 2.2743, 2.514], [0.0000002, 0.0033, 0.00000001]]
# Reduced loss = 0.799
self.assertAlmostEqual(self.evaluate(loss), 0.799, 3)
def test_scalar_weighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(gamma=2.0)
loss = obj(y_true, y_pred, sample_weight=1.23)
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2],
# [0.7, 0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]] * sample_weight
# focalLoss = focal bceLoss
# = [[0.001, 1.03], [0.032, 0.009]] * sample_weight
# Reduced loss = (0.001 + 1.03 + 0.032 + 0.009) * 1.23 / 4 = 0.3296
self.assertAlmostEqual(self.evaluate(loss), 0.3296, 3)
# Test with logits.
y_true = tf.constant([[1, 1, 0], [0, 1, 0]], dtype=tf.float32)
logits = tf.constant([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(gamma=3.0, from_logits=True)
loss = obj(y_true, logits, sample_weight=3.21)
# sigmoidal = sigmoid(logits)
# = [[0.8176, 0.063, 0.9478], [0.0219, 0.7685, 0.011]]
# p_t = y_true sigmoidal + (1 - y_true) (1 - sigmoidal)
# = [[0.8176, 0.063, 0.0522], [0.9781, 0.7685, 0.989]]
# focal = (1 - p_t) ** gamma
# = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
# bceLoss = -log(p_t) * sample_weight
# = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
# sample_weight
# focalLoss = focal * bceLoss =
# [[0.0012, 2.2743, 2.514], [0.0000002, 0.0033, 0.00000001]] *
# sample_weight
# Reduced loss = 0.799 * 3.21 = 2.565
self.assertAlmostEqual(self.evaluate(loss), 2.565, 3)
def test_sample_weighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
obj = losses.BinaryFocalCrossentropy(gamma=2.0)
loss = obj(y_true, y_pred, sample_weight=sample_weight)
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
# 0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) * sample_weight
# = [[0.105, 1.609] ,[0.357, 0.223]] * sample_weight
# focalLoss = focal * bceLoss
# = [[0.001, 1.03], [0.032, 0.009]] * sample_weight
# = [[0.0012, 1.236], [0.1088, 0.0306]]
# Reduced loss = (0.0012 + 1.236 + 0.1088 + 0.0306) / 4 = 0.34415
self.assertAlmostEqual(self.evaluate(loss), 0.34415, 3)
# Test with logits.
y_true = tf.constant([[1, 1, 0], [0, 1, 0]], dtype=tf.float32)
logits = tf.constant([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(gamma=3.0, from_logits=True)
loss = obj(y_true, logits, sample_weight=sample_weight)
# sigmoidal = sigmoid(logits)
# = [[0.8176, 0.063, 0.9478], [0.0219, 0.7685, 0.011]]
# p_t = y_true sigmoidal + (1 - y_true) (1 - sigmoidal)
# = [[0.8176, 0.063, 0.0522], [0.9781, 0.7685, 0.989]]
# focal = (1 - p_t) ** gamma
# = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
# bceLoss = -log(p_t) * sample_weight
# = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
# sample_weight
# focalLoss = focal * bceLoss =
# [[0.0012, 2.2743, 2.514], [0.0000002, 0.0033, 0.00000001]] *
# sample_weight
# focalLoss = [[0.00144, 2.72916, 3.0168], [6.8e-7, 0.01122, 3.4e-8]]
# Reduced loss = 0.799
self.assertAlmostEqual(self.evaluate(loss), 0.95977, 3)
def test_no_reduction(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(
gamma=2.0,
reduction=losses_utils.ReductionV2.NONE,
)
loss = obj(y_true, y_pred)
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
# 0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]]
# focalLoss = focal bceLoss = [[0.001, 1.03], [0.032, 0.009]]
# Reduced loss = [(0.001 + 1.03) / 2, (0.032 + 0.009) / 2]
self.assertAllClose(self.evaluate(loss), (0.5155, 0.0205), 3)
def test_ragged_tensors(self):
y_true = tf.ragged.constant([[1, 0, 1], [0]])
y_pred = tf.ragged.constant([[0.9, 0.8, 0.7], [0.2]])
obj = losses.BinaryFocalCrossentropy(gamma=2.0)
loss = obj(y_true, y_pred)
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2, 0.7],
# [0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64, 0.09], [0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609, 0.357], [0.223]]
# focalLoss = focal bceLoss = [[0.001, 1.03, 0.032], [0.009]]
# Reduced loss = ((0.001 + 1.03 + 0.032) / 3 + 0.009) / 2 = 0.18166
self.assertAlmostEqual(self.evaluate(loss), 0.18166, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class BinaryWeightedFocalCrossentropyTest(tf.test.TestCase):
def test_config(self):
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.1,
gamma=1.5,
name="bfce_0",
)
self.assertTrue(obj.apply_class_balancing)
self.assertEqual(obj.name, "bfce_0")
self.assertAlmostEqual(obj.alpha, 0.1)
self.assertAlmostEqual(obj.gamma, 1.5)
obj_2 = losses.BinaryFocalCrossentropy.from_config(obj.get_config())
self.assertTrue(obj_2.apply_class_balancing)
self.assertEqual(obj_2.name, "bfce_0")
self.assertAlmostEqual(obj_2.alpha, 0.1)
self.assertAlmostEqual(obj_2.gamma, 1.5)
def test_all_correct_unweighted(self):
y_true = tf.constant(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
],
dtype=tf.float32,
)
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True, gamma=1.5
)
loss = obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = tf.constant(
[
[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0],
]
)
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.3,
gamma=2.0,
from_logits=True,
)
loss = obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.4,
gamma=2.0,
)
loss = obj(y_true, y_pred)
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
# 0.8]]
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.4, 0.6], [0.4, 0.6]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]]
# weightedfocalLoss = alpha_weight focal bceLoss
# = [[0.0004, 0.618], [0.0128, 0.0054]]
# Reduced loss = (0.0004 + 0.618 + 0.0128 + 0.0054) / 4 = 0.15915
self.assertAlmostEqual(self.evaluate(loss), 0.15915, 3)
# Test with logits.
y_true = tf.constant([[1, 1, 0], [0, 1, 0]], dtype=tf.float32)
logits = tf.constant([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.3,
gamma=3.0,
from_logits=True,
)
loss = obj(y_true, logits)
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.3, 0.3, 0.7], [0.7, 0.3, 0.7]]
# sigmoidal = sigmoid(logits)
# = [[0.8176, 0.063, 0.9478], [0.0219, 0.7685, 0.011]]
# p_t = y_true sigmoidal + (1 - y_true) (1 - sigmoidal)
# = [[0.8176, 0.063, 0.0522], [0.9781, 0.7685, 0.989]]
# focal = (1 - p_t) ** gamma
# = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
# bceLoss = -log(p_t)
# = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]]
# weightedfocalLoss = alpha_weight focal bceLoss
# = [[0.00036, 0.68229, 1.7598], [0.00000014, 0.00099, 0.000000007]]
# Reduced loss = 0.40724
self.assertAlmostEqual(self.evaluate(loss), 0.40724, 3)
def test_scalar_weighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.6,
gamma=2.0,
)
loss = obj(y_true, y_pred, sample_weight=1.23)
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.6, 0.4], [0.6, 0.4]]
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
# 0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]] * sample_weight
# weightedfocalLoss = alpha_weight focal bceLoss
# = [[0.0006, 0.412], [0.0192, 0.0036]] * sample_weight
# Reduced loss = (0.0006 + 0.412 + 0.0192 + 0.0036) * 1.23 / 4 = 0.13388
self.assertAlmostEqual(self.evaluate(loss), 0.13388, 3)
# Test with logits.
y_true = tf.constant([[1, 1, 0], [0, 1, 0]], dtype=tf.float32)
logits = tf.constant([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.2,
gamma=3.0,
from_logits=True,
)
loss = obj(y_true, logits, sample_weight=3.21)
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.2, 0.2, 0.8], [0.8, 0.2, 0.8]]
# sigmoidal = sigmoid(logits)
# = [[0.8176, 0.063, 0.9478], [0.0219, 0.7685, 0.011]]
# p_t = y_true sigmoidal + (1 - y_true) (1 - sigmoidal)
# = [[0.8176, 0.063, 0.0522], [0.9781, 0.7685, 0.989]]
# focal = (1 - p_t) ** gamma
# = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
# bceLoss = -log(p_t) * sample_weight
# = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
# sample_weight
# weightedfocalLoss = alpha_weight * focal * bceLoss =
# [[0.00024, 0.45486, 2.0112], [0.00000016, 0.00066, 0.000000008]] *
# 3.21
# Reduced loss = 0.41116 * 3.21 = 1.32
self.assertAlmostEqual(self.evaluate(loss), 1.32, 3)
def test_sample_weighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.1,
gamma=2.0,
)
loss = obj(y_true, y_pred, sample_weight=sample_weight)
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.1, 0.9], [0.1, 0.9]]
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
# 0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) * sample_weight
# = [[0.105, 1.609] ,[0.357, 0.223]] * sample_weight
# focalLoss = alpha_weight * focal * bceLoss
# = [[0.0001, 0.927], [0.0032, 0.0081]] * sample_weight
# = [[0.00012, 1.1124], [0.01088, 0.02754]]
# Reduced loss = (0.00012 + 1.1124 + 0.01088 + 0.02754) / 4 = 0.2877
self.assertAlmostEqual(self.evaluate(loss), 0.2877, 3)
# Test with logits.
y_true = tf.constant([[1, 1, 0], [0, 1, 0]], dtype=tf.float32)
logits = tf.constant([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.2,
gamma=3.0,
from_logits=True,
)
loss = obj(y_true, logits, sample_weight=sample_weight)
# sigmoidal = sigmoid(logits)
# = [[0.8176, 0.063, 0.9478], [0.0219, 0.7685, 0.011]]
# p_t = y_true sigmoidal + (1 - y_true) (1 - sigmoidal)
# = [[0.8176, 0.063, 0.0522], [0.9781, 0.7685, 0.989]]
# focal = (1 - p_t) ** gamma
# = [[0.006, 0.823, 0.851], [0.00001, 0.0124, 0.000001]]
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.2, 0.2, 0.8], [0.8, 0.2, 0.8]]
# bceLoss = -log(p_t) * sample_weight
# = [[0.2014, 2.7646 , 2.9527], [0.0221, 0.2633, 0.01106]] *
# sample_weight
# focalLoss = alpha_weight * focal * bceLoss =
# [[0.00024, 0.45486, 2.0112], [1.6e-7, 6.6e-4, 8e-9]] * sample_weight
# focalLoss = [[0.000288, 0.5458, 2.41344], [5.44e-7, 2.444e-3,
# 2.72e-8]]
# Reduced loss = 0.49366
self.assertAlmostEqual(self.evaluate(loss), 0.49366, 3)
def test_no_reduction(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.6,
gamma=2.0,
reduction=losses_utils.ReductionV2.NONE,
)
loss = obj(y_true, y_pred)
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.6, 0.4], [0.6, 0.4]]
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2], [0.7,
# 0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64], [0.09, 0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609] ,[0.357, 0.223]]
# focalLoss = alpha_weight focal bceLoss
# = [[0.0006, 0.412], [0.0192, 0.0036]]
# Reduced loss = [(0.0006 + 0.412) / 2, (0.0192 + 0.0036) / 2]
self.assertAllClose(self.evaluate(loss), (0.2063, 0.0114), 3)
def test_ragged_tensors(self):
y_true = tf.ragged.constant([[1, 0, 1], [0]])
y_pred = tf.ragged.constant([[0.9, 0.8, 0.7], [0.2]])
obj = losses.BinaryFocalCrossentropy(
apply_class_balancing=True,
alpha=0.1,
gamma=2.0,
)
loss = obj(y_true, y_pred)
# alpha_weight = alpha y_true + (1 - alpha) (1 - y_true)
# = [[0.1, 0.9, 0.1], [0.9]]
# p_t = y_true y_pred + (1 - y_true) (1 - y_pred) = [[0.9, 0.2, 0.7],
# [0.8]]
# focal = (1 - p_t) ** gamma = [[0.01, 0.64, 0.09], [0.04]]
# bceLoss = -log(p_t) = [[0.105, 1.609, 0.357], [0.223]]
# focalLoss = alpha_weight focal bceLoss
# = [[0.0001, 0.927, 0.0032], [0.0081]]
# Reduced loss = ((0.0001 + 0.927 + 0.0032) / 3 + 0.0081) / 2 = 0.1591
self.assertAlmostEqual(self.evaluate(loss), 0.1591, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
cce_obj = losses.CategoricalCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name="bce_1"
)
self.assertEqual(cce_obj.name, "bce_1")
self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=tf.int64)
y_pred = tf.constant(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype=tf.float32,
)
cce_obj = losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = tf.constant(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.3239, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0573, 3)
def test_scalar_weighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.7449, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.1317, 3)
def test_sample_weighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
sample_weight = tf.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE
)
loss = cce_obj(y_true, logits)
self.assertAllClose(
(0.001822, 0.000459, 0.169846), self.evaluate(loss), 3
)
def test_label_smoothing(self):
logits = tf.constant([[100.0, -100.0, -100.0]])
y_true = tf.constant([[1, 0, 0]])
label_smoothing = 0.1
# Softmax Cross Entropy Loss: -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100]
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# Label smoothing: z' = z * (1 - L) + L/n
# 1 = 1 - L + L/n
# 0 = L/n
# Applying the above two fns to the given input:
# -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_label_smoothing_ndarray(self):
logits = np.asarray([[100.0, -100.0, -100.0]])
y_true = np.asarray([[1, 0, 0]])
label_smoothing = 0.1
# Softmax Cross Entropy Loss: -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100]
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# Label smoothing: z' = z * (1 - L) + L/n
# 1 = 1 - L + L/n
# 0 = L/n
# Applying the above two fns to the given input:
# -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_shape_mismatch(self):
y_true = tf.constant([[0], [1], [2]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]]
)
cce_obj = losses.CategoricalCrossentropy()
with self.assertRaisesRegex(ValueError, "Shapes .+ are incompatible"):
cce_obj(y_true, y_pred)
def test_ragged_tensors(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = tf.ragged.constant([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1]]])
y_pred = tf.ragged.constant(
[[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6]], [[0.05, 0.01, 0.94]]],
dtype=tf.float32,
)
# batch losses [[0.1054, 0.8047], [0.0619]]
sample_weight = tf.constant([[1.2], [3.4]], shape=(2, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# sum([0.1054, 0.8047, 0.0619]) / 3
self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3)
# Test with logits.
logits = tf.ragged.constant(
[[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0]], [[2.0, 3.0, 5.0]]]
)
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
# batch losses [[0.0018, 0.0004], [0.1698]]
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3)
def test_ragged_tensors_ragged_sample_weights(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = tf.ragged.constant([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1]]])
y_pred = tf.ragged.constant(
[[[0.9, 0.05, 0.05], [0.05, 0.89, 0.06]], [[0.05, 0.01, 0.94]]],
dtype=tf.float32,
)
# batch losses [[0.1054, 0.1165], [0.0619]]
# Use independent weights for each batch element
sample_weight = tf.ragged.constant(
[[1.2, 3.4], [5.6]], dtype=tf.float32
)
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# sum([0.1054*1.2, 0.1165*3.4, 0.0619*5.6])/3
self.assertAlmostEqual(self.evaluate(loss), 0.2897, 3)
# Test with logits.
logits = tf.ragged.constant(
[[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0]], [[2.0, 3.0, 5.0]]]
)
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
# batch losses [[0.0018, 0.0004], [0.1698]]
# sum([0.0018*1.2, 0.0004*3.4, 0.1698*5.6]) / 3
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.3181, 3)
def test_binary_labels(self):
# raise a warning if the shape of y_true and y_pred are all (None, 1).
# categorical_crossentropy shouldn't be used with binary labels.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cce_obj = losses.CategoricalCrossentropy()
cce_obj(tf.constant([[1.0], [0.0]]), tf.constant([[1.0], [1.0]]))
self.assertIs(w[-1].category, SyntaxWarning)
self.assertIn(
"In loss categorical_crossentropy, expected ",
str(w[-1].message),
)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CategoricalFocalCrossentropyTest(tf.test.TestCase):
def test_config(self):
cce_obj = losses.CategoricalFocalCrossentropy(
name="focal_cce",
reduction=losses_utils.ReductionV2.SUM,
alpha=0.25,
gamma=2.0,
)
self.assertEqual(cce_obj.name, "focal_cce")
self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
self.assertEqual(cce_obj.alpha, 0.25)
self.assertEqual(cce_obj.gamma, 2.0)
# Test alpha as a list
cce_obj = losses.CategoricalFocalCrossentropy(alpha=[0.25, 0.5, 0.75])
self.assertEqual(cce_obj.alpha, [0.25, 0.5, 0.75])
def test_all_correct_unweighted(self):
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=tf.int64)
y_pred = tf.constant(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype=tf.float32,
)
cce_obj = losses.CategoricalFocalCrossentropy(alpha=0.25, gamma=2.0)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = tf.constant(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.02059, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.000345, 3)
def test_scalar_weighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.047368, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.000794, 4)
def test_sample_weighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
sample_weight = tf.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.06987, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.001933, 3)
def test_no_reduction(self):
y_true = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE
)
loss = cce_obj(y_true, logits)
self.assertAllClose(
(1.5096224e-09, 2.4136547e-11, 1.0360638e-03),
self.evaluate(loss),
3,
)
def test_label_smoothing(self):
logits = tf.constant([[4.9, -0.5, 2.05]])
y_true = tf.constant([[1, 0, 0]])
label_smoothing = 0.1
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 0.06685
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_label_smoothing_ndarray(self):
logits = np.asarray([[4.9, -0.5, 2.05]])
y_true = np.asarray([[1, 0, 0]])
label_smoothing = 0.1
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 0.06685
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_shape_mismatch(self):
y_true = tf.constant([[0], [1], [2]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]]
)
cce_obj = losses.CategoricalFocalCrossentropy()
with self.assertRaisesRegex(ValueError, "Shapes .+ are incompatible"):
cce_obj(y_true, y_pred)
def test_ragged_tensors(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = tf.ragged.constant([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1]]])
y_pred = tf.ragged.constant(
[[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6]], [[0.05, 0.01, 0.94]]],
dtype=tf.float32,
)
# batch losses [[0.1054, 0.8047], [0.0619]]
sample_weight = tf.constant([[1.2], [3.4]], shape=(2, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.024754, 3)
# Test with logits.
logits = tf.ragged.constant(
[[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0]], [[2.0, 3.0, 5.0]]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.00117, 3)
def test_ragged_tensors_ragged_sample_weights(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = tf.ragged.constant([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1]]])
y_pred = tf.ragged.constant(
[[[0.9, 0.05, 0.05], [0.05, 0.89, 0.06]], [[0.05, 0.01, 0.94]]],
dtype=tf.float32,
)
sample_weight = tf.ragged.constant(
[[1.2, 3.4], [5.6]], dtype=tf.float32
)
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.0006088, 4)
# Test with logits.
logits = tf.ragged.constant(
[[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0]], [[2.0, 3.0, 5.0]]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.001933, 3)
def test_binary_labels(self):
# raise a warning if the shape of y_true and y_pred are all (None, 1).
# categorical_crossentropy shouldn't be used with binary labels.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cce_obj = losses.CategoricalFocalCrossentropy()
cce_obj(tf.constant([[1.0], [0.0]]), tf.constant([[1.0], [1.0]]))
self.assertIs(w[-1].category, SyntaxWarning)
self.assertIn(
"In loss categorical_focal_crossentropy, expected ",
str(w[-1].message),
)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SparseCategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
cce_obj = losses.SparseCategoricalCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name="scc"
)
self.assertEqual(cce_obj.name, "scc")
self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = tf.constant([[0], [1], [2]], dtype=tf.int64)
y_pred = tf.constant(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype=tf.float32,
)
cce_obj = losses.SparseCategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = tf.constant(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = tf.constant([0, 1, 2])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.3239, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0573, 3)
def test_unweighted_ignore_class(self):
cce_obj = losses.SparseCategoricalCrossentropy(ignore_class=-1)
y_true = tf.constant([0, 1, 2, -1])
y_pred = tf.constant(
[
[0.9, 0.05, 0.05],
[0.5, 0.89, 0.6],
[0.05, 0.01, 0.94],
[0.85, 0.14, 0.01],
],
dtype=tf.float32,
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.3239, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0], [7.8, 2.0, 1.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(
ignore_class=-1, from_logits=True
)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0573, 3)
def test_unweighted_ignore_class_for_segmentation(self):
cce_obj = losses.SparseCategoricalCrossentropy(ignore_class=-1)
y_true = tf.constant(
[[[0, 2], [-1, -1]], [[0, 2], [-1, -1]], [[0, 0], [0, 0]]]
)
y_pred = tf.constant(
[
[
[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
[[0.2, 0.5, 0.3], [0.0, 1.0, 0.0]],
],
[
[[1.0, 0.0, 0.0], [0.0, 0.5, 0.5]],
[[0.2, 0.5, 0.3], [0.0, 1.0, 0.0]],
],
[
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[0.1, 0.9, 0.0], [0.2, 0.8, 0.0]],
],
],
dtype=tf.float32,
)
# Expected loss values:
# [[0.0, 0.0], [0.0, 0.0]],
# [[0.0, 0.693148], [0.0, 0.0]],
# [[0.0, 0.0], [2.302585, 1.609438]],
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.575646375, 3)
# # Test with logits.
# logits = tf.constant(
# [[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
# )
# cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
# loss = cce_obj(y_true, logits)
# self.assertAlmostEqual(self.evaluate(loss), 0.0573, 3)
def test_scalar_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = tf.constant([[0], [1], [2]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.7449, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.1317, 3)
def test_sample_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = tf.constant([[0], [1], [2]])
y_pred = tf.constant(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype=tf.float32,
)
sample_weight = tf.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_sample_weighted_ignore_class(self):
cce_obj = losses.SparseCategoricalCrossentropy(ignore_class=-1)
y_true = tf.constant([[0], [1], [2], [-1]])
y_pred = tf.constant(
[
[0.9, 0.05, 0.05],
[0.5, 0.89, 0.6],
[0.05, 0.01, 0.94],
[0.85, 0.14, 0.01],
],
dtype=tf.float32,
)
sample_weight = tf.constant([[1.2], [3.4], [5.6], [10.4]], shape=(4, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0], [7.8, 2.0, 1.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(
ignore_class=-1, from_logits=True
)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = tf.constant([[0], [1], [2]])
logits = tf.constant(
[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE
)
loss = cce_obj(y_true, logits)
self.assertAllClose(
(0.001822, 0.000459, 0.169846), self.evaluate(loss), 3
)
def test_non_tensor(self):
# Test case for GitHub issue 33394.
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = [[0], [1], [2]]
y_pred = [[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]]
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 0.7449, 3)
def test_ragged_tensors(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = tf.ragged.constant([[0, 1], [2]])
y_pred = tf.ragged.constant(
[[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6]], [[0.05, 0.01, 0.94]]],
dtype=tf.float32,
)
# batch losses [[0.1054, 0.8047], [0.0619]]
sample_weight = tf.constant([[1.2], [3.4]], shape=(2, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# sum([0.1054, 0.8047, 0.0619]) / 3
self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3)
# Test with logits.
logits = tf.ragged.constant(
[[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0]], [[2.0, 3.0, 5.0]]]
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
# batch losses [[0.0018, 0.0004], [0.1698]]
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3)
def test_ragged_tensors_rank_1(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = tf.ragged.constant([[0, 1], [2]])
y_pred = tf.ragged.constant(
[[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6]], [[0.05, 0.01, 0.94]]],
ragged_rank=1,
dtype=tf.float32,
)
# batch losses [[0.1054, 0.8047], [0.0619]]
sample_weight = tf.constant([[1.2], [3.4]], shape=(2, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# sum([0.1054, 0.8047, 0.0619]) / 3
self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3)
# Test with logits.
logits = tf.ragged.constant(
[[[8.0, 1.0, 1.0], [0.0, 9.0, 1.0]], [[2.0, 3.0, 5.0]]],
ragged_rank=1,
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
# batch losses [[0.0018, 0.0004], [0.1698]]
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3)
def test_ragged_tensors_3d(self):
# shape [2, 1, None]
y_true = tf.ragged.constant([[[1, 1]], [[0]]])
# shape [2, 1, None, 2]
y_pred = tf.ragged.constant(
[[[[0.1, 0.9], [0.1, 0.9]]], [[[0.9, 0.1]]]]
)
cce_obj = losses.SparseCategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.1054, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class HingeTest(tf.test.TestCase):
def test_config(self):
hinge_obj = losses.Hinge(
reduction=losses_utils.ReductionV2.SUM, name="hinge_loss"
)
self.assertEqual(hinge_obj.name, "hinge_loss")
self.assertEqual(hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
hinge_obj = losses.Hinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# reduced loss = (0.6 + 0.4125) / 2
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(0.506, self.evaluate(loss), atol=1e-3)
def test_scalar_weighted(self):
hinge_obj = losses.Hinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted_loss = [0.6 * 2.3, 0.4125 * 2.3]
# reduced loss = (0.6 + 0.4125) * 2.3 / 2
loss = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 1.164, 3)
# Verify we get the same output when the same input is given
loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAllClose(self.evaluate(loss), self.evaluate(loss_2), 1e-3)
def test_sample_weighted(self):
hinge_obj = losses.Hinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted loss = [0.6 * 1.2, 0.4125 * 3.4]
# reduced loss = (0.6 * 1.2 + 0.4125 * 3.4) / 2
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 1.061, 1e-3)
def test_timestep_weighted(self):
hinge_obj = losses.Hinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
y_pred = tf.constant(
[[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]], shape=(2, 4, 1)
)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
# y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
# [[0.25], [1], [0.5], [0.6]]]
# 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
# [[0.75], [0], [0.5], [0.4]]]
# loss = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# weighted loss = [[2.1, 4.8, 4.5, 0], [3, 0, 0.5, 1.2]]
# reduced loss = (2.1 + 4.8 + 4.5 + 0 + 3 + 0 + 0.5 + 1.2) / 8
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 2.012, 1e-3)
def test_zero_weighted(self):
hinge_obj = losses.Hinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
loss = hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAllClose(self.evaluate(loss), 0.0, 1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SquaredHingeTest(tf.test.TestCase):
def test_config(self):
sq_hinge_obj = losses.SquaredHinge(
reduction=losses_utils.ReductionV2.SUM, name="sq_hinge_loss"
)
self.assertEqual(sq_hinge_obj.name, "sq_hinge_loss")
self.assertEqual(sq_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5,
# 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# reduced loss = (0.485 + 0.2431) / 2
loss = sq_hinge_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(loss), 0.364, 1e-3)
def test_scalar_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5,
# 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted loss = [0.485 * 2.3, 0.2431 * 2.3]
# reduced loss = (0.485 + 0.2431) * 2.3 / 2
loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAllClose(self.evaluate(loss), 0.837, 1e-3)
# Verify we get the same output when the same input is given
loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5,
# 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted loss = [0.485 * 1.2, 0.2431 * 3.4]
# reduced loss = (0.485 * 1.2 + 0.2431 * 3.4) / 2
sample_weight = tf.constant([1.2, 3.4])
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 0.704, 1e-3)
def test_timestep_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
y_pred = tf.constant(
[[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]], shape=(2, 4, 1)
)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
# y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
# [[0.25], [1], [0.5], [0.6]]]
# 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
# [[0.75], [0], [0.5], [0.4]]]
# loss = [[0.49, 0.64, 0.81, 0], [0.5625, 0, 0.25, 0.16]]
# weighted loss = [[1.47, 3.84, 4.05, 0], [2.25, 0, 0.25, 0.48]]
# reduced loss = (1.47 + 3.84 + 4.05 + 0 + 2.25 + 0 + 0.25 + 0.48) / 8
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 1.542, 1e-3)
def test_zero_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]])
loss = sq_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAllClose(self.evaluate(loss), 0.0, 1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CategoricalHingeTest(tf.test.TestCase):
def test_config(self):
cat_hinge_obj = losses.CategoricalHinge(
reduction=losses_utils.ReductionV2.SUM, name="cat_hinge_loss"
)
self.assertEqual(cat_hinge_obj.name, "cat_hinge_loss")
self.assertEqual(cat_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = tf.constant([1, 9, 2, -5], shape=(2, 2))
y_pred = tf.constant([4, 8, 12, 8], shape=(2, 2), dtype=tf.float32)
loss = cat_hinge_obj(y_true, y_pred)
# pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16]
# neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0,
# 48]
# cat_hinge = max(0., neg - pos + 1.) = [0, 65]
# reduced_loss = (0 + 65)/2 = 32.5
self.assertAlmostEqual(self.evaluate(loss), 32.5, 3)
def test_scalar_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 83.95, 3)
# Verify we get the same output when the same input is given
loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 124.1, 3)
def test_timestep_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3, 1), dtype=tf.float32
)
sample_weight = tf.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 4.0, 3)
def test_zero_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = tf.constant(
[4, 8, 12, 8, 1, 3], shape=(2, 3), dtype=tf.float32
)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class LogCoshTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
logcosh_obj = losses.LogCosh(
reduction=losses_utils.ReductionV2.SUM, name="logcosh_loss"
)
self.assertEqual(logcosh_obj.name, "logcosh_loss")
self.assertEqual(logcosh_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
loss = logcosh_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 2.3
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
error = y_pred - y_true
expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
y_pred = tf.constant(y_pred, dtype=tf.float32)
y_true = tf.constant(y_true)
loss = logcosh_obj(
y_true,
y_pred,
sample_weight=tf.constant(sample_weight, shape=(2, 3)),
)
expected_loss = (
np.sum(expected_losses * sample_weight) / self.batch_size
)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 0
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class PoissonTest(tf.test.TestCase):
def setup(self):
self.np_y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
self.np_y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_losses = self.np_y_pred - np.multiply(
self.np_y_true, np.log(self.np_y_pred)
)
self.y_pred = tf.constant(self.np_y_pred, dtype=tf.float32)
self.y_true = tf.constant(self.np_y_true)
def test_config(self):
poisson_obj = losses.Poisson(
reduction=losses_utils.ReductionV2.SUM, name="poisson"
)
self.assertEqual(poisson_obj.name, "poisson")
self.assertEqual(poisson_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
poisson_obj = losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
sample_weight = 2.3
loss = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
y_true = self.np_y_true.reshape(2, 3, 1)
y_pred = self.np_y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1)
expected_losses = y_pred - np.multiply(y_true, np.log(y_pred))
y_pred = tf.constant(y_pred, dtype=tf.float32)
y_true = tf.constant(y_true)
loss = poisson_obj(
y_true,
y_pred,
sample_weight=tf.constant(sample_weight, shape=(2, 3)),
)
expected_loss = (
np.sum(expected_losses * sample_weight) / self.batch_size
)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class KLDivergenceTest(tf.test.TestCase):
def setup(self):
self.np_y_pred = np.asarray([0.4, 0.9, 0.12, 0.36, 0.3, 0.4]).reshape(
(2, 3)
)
self.np_y_true = np.asarray([0.5, 0.8, 0.12, 0.7, 0.43, 0.8]).reshape(
(2, 3)
)
self.batch_size = 2
self.expected_losses = np.multiply(
self.np_y_true, np.log(self.np_y_true / self.np_y_pred)
)
self.y_pred = tf.constant(self.np_y_pred, dtype=tf.float32)
self.y_true = tf.constant(self.np_y_true)
def test_config(self):
k_obj = losses.KLDivergence(
reduction=losses_utils.ReductionV2.SUM, name="kld"
)
self.assertEqual(k_obj.name, "kld")
self.assertEqual(k_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
k_obj = losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = 2.3
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(2, 3),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
y_true = self.np_y_true.reshape(2, 3, 1)
y_pred = self.np_y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3)
expected_losses = np.sum(
np.multiply(y_true, np.log(y_true / y_pred)), axis=-1
)
y_pred = tf.constant(y_pred, dtype=tf.float32)
y_true = tf.constant(y_true)
loss = k_obj(y_true, y_pred, sample_weight=tf.constant(sample_weight))
num_timesteps = 3
expected_loss = np.sum(expected_losses * sample_weight) / (
self.batch_size * num_timesteps
)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class HuberLossTest(tf.test.TestCase):
def huber_loss(self, y_true, y_pred, delta=1.0):
error = y_pred - y_true
abs_error = np.abs(error)
quadratic = np.minimum(abs_error, delta)
linear = np.subtract(abs_error, quadratic)
return np.add(
np.multiply(0.5, np.multiply(quadratic, quadratic)),
np.multiply(delta, linear),
)
def setup(self, delta=1.0):
self.np_y_pred = np.asarray([0.9, 0.2, 0.2, 0.8, 0.4, 0.6]).reshape(
(2, 3)
)
self.np_y_true = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape(
(2, 3)
)
self.batch_size = 6
self.expected_losses = self.huber_loss(
self.np_y_true, self.np_y_pred, delta
)
self.y_pred = tf.constant(self.np_y_pred)
self.y_true = tf.constant(self.np_y_true)
def test_config(self):
h_obj = losses.Huber(
reduction=losses_utils.ReductionV2.SUM, name="huber"
)
self.assertEqual(h_obj.name, "huber")
self.assertEqual(h_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = tf.constant((1.2, 3.4), shape=(2, 1))
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_timestep_weighted(self):
self.setup()
h_obj = losses.Huber()
y_pred = self.np_y_pred.reshape((2, 3, 1))
y_true = self.np_y_true.reshape((2, 3, 1))
expected_losses = self.huber_loss(y_true, y_pred)
y_pred = tf.constant(y_pred)
y_true = tf.constant(y_true)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = h_obj(
y_true,
y_pred,
sample_weight=tf.constant(sample_weight, shape=(2, 3)),
)
actual_loss = np.multiply(expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_zero_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 0
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_non_default_delta(self):
self.setup(delta=0.8)
h_obj = losses.Huber(delta=0.8)
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_loss_with_non_default_dtype(self):
# Test case for GitHub issue:
# https://github.com/tensorflow/tensorflow/issues/39004
self.setup()
h_obj = losses.Huber()
try:
backend.set_floatx("float64")
loss = h_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
finally:
backend.set_floatx("float32")
class BinaryTruePositivesViaControlFlow(losses.Loss):
def __init__(self, reduction=losses_utils.ReductionV2.AUTO):
super().__init__(reduction=reduction)
def call(self, y_true, y_pred):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
result = tf.constant(0.0)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if y_true[i][j] and y_pred[i][j]:
result = result + 1
return result
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CustomLossTest(tf.test.TestCase):
def test_autograph(self):
y_true = tf.constant(
[
[0, 0.9, 0, 1, 0],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 1.5],
]
)
y_pred = tf.constant(
[
[0, 0, 1, 5, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 10, 1, 1, 1],
]
)
@tf.function
def loss_fn(y_true, y_pred):
loss_obj = BinaryTruePositivesViaControlFlow()
return loss_obj(y_true, y_pred)
loss = loss_fn(y_true, y_pred)
self.assertAllEqual(
self.evaluate(loss),
7.0,
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/losses_test.py/0 | {
"file_path": "tf-keras/tf_keras/losses_test.py",
"repo_id": "tf-keras",
"token_count": 64117
} | 185 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras metrics functions."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import backend
from tf_keras import metrics
from tf_keras.testing_infra import test_combinations
class KerasFunctionalMetricsTest(tf.test.TestCase, parameterized.TestCase):
def test_metrics(self):
with self.cached_session():
y_a = backend.variable(np.random.random((6, 7)))
y_b = backend.variable(np.random.random((6, 7)))
for metric in [
metrics.binary_accuracy,
metrics.categorical_accuracy,
]:
output = metric(y_a, y_b)
self.assertEqual(backend.eval(output).shape, (6,))
def test_sparse_categorical_accuracy_int(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = backend.variable(np.random.randint(0, 7, (6,)))
y_pred = backend.variable(np.random.random((6, 7)))
self.assertEqual(backend.eval(metric(y_true, y_pred)).shape, (6,))
# Test correctness if the shape of y_true is (num_samples,)
y_true = backend.variable([1.0, 0.0, 0.0, 0.0])
y_pred = backend.variable(
[[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]]
)
self.assertAllEqual(
backend.eval(metric(y_true, y_pred)), [0.0, 1.0, 1.0, 1.0]
)
# Test correctness if the shape of y_true is (num_samples, 1)
y_true = backend.variable([[1.0], [0.0], [0.0], [0.0]])
y_pred = backend.variable(
[[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]]
)
self.assertAllEqual(
backend.eval(metric(y_true, y_pred)), [0.0, 1.0, 1.0, 1.0]
)
# Test correctness if the shape of y_true is (batch_size,
# seq_length) and y_pred is (batch_size, seq_length, num_classes)
y_pred = backend.variable(
np.array(
[
[[0.2, 0.3, 0.1], [0.1, 0.2, 0.7]],
[[0.3, 0.2, 0.1], [0.7, 0.2, 0.1]],
]
)
)
y_true = backend.variable(np.array([[1, 0], [1, 0]]))
self.assertAllEqual(
backend.eval(metric(y_true, y_pred)), [[1.0, 0.0], [0.0, 1.0]]
)
def test_sparse_categorical_accuracy_float(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = backend.variable(np.random.random((6,)))
y_pred = backend.variable(np.random.random((6, 7)))
self.assertEqual(backend.eval(metric(y_true, y_pred)).shape, (6,))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_sparse_categorical_accuracy_eager(self):
"""Tests that ints passed in via Eager return results. See
b/113504761."""
metric = metrics.sparse_categorical_accuracy
y_true = np.arange(6).reshape([6, 1])
y_pred = np.arange(36).reshape([6, 6])
self.assertAllEqual(
metric(y_true, y_pred), [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_sparse_categorical_accuracy_float_eager(self):
"""Tests that floats passed in via Eager return results. See
b/113504761."""
metric = metrics.sparse_categorical_accuracy
y_true = np.arange(6, dtype=np.float32).reshape([6, 1])
y_pred = np.arange(36).reshape([6, 6])
self.assertAllEqual(
metric(y_true, y_pred), [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
)
def test_sparse_top_k_categorical_accuracy(self):
with self.cached_session():
# Test correctness if the shape of y_true is (num_samples, 1)
y_pred = backend.variable(
np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])
)
y_true = backend.variable(np.array([[1], [0]]))
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3)
)
self.assertEqual(np.mean(result), 1)
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2)
)
self.assertEqual(np.mean(result), 0.5)
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1)
)
self.assertEqual(np.mean(result), 0.0)
# Test correctness if the shape of y_true is (num_samples,)
y_pred = backend.variable(
np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])
)
y_true = backend.variable(np.array([1, 0]))
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3)
)
self.assertEqual(np.mean(result), 1)
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2)
)
self.assertEqual(np.mean(result), 0.5)
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1)
)
self.assertEqual(np.mean(result), 0.0)
# Test correctness if the shape of y_true is (batch_size,
# seq_length) and y_pred is (batch_size, seq_length, num_classes)
y_pred = backend.variable(
np.array(
[
[[0.3, 0.2, 0.1], [0.1, 0.2, 0.7], [0.1, 0.2, 0.7]],
[[0.3, 0.2, 0.1], [0.1, 0.2, 0.7], [0.3, 0.2, 0.1]],
]
)
)
y_true = backend.variable(np.array([[1, 0, 0], [1, 0, 1]]))
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3)
)
self.assertEqual(np.mean(result), 1)
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2)
)
self.assertEqual(np.mean(result), 0.5)
result = backend.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1)
)
self.assertEqual(np.mean(result), 0.0)
def test_top_k_categorical_accuracy(self):
with self.cached_session():
y_pred = backend.variable(
np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]])
)
y_true = backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
result = backend.eval(
metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
)
self.assertEqual(np.mean(result), 1)
result = backend.eval(
metrics.top_k_categorical_accuracy(y_true, y_pred, k=2)
)
self.assertEqual(np.mean(result), 0.5)
result = backend.eval(
metrics.top_k_categorical_accuracy(y_true, y_pred, k=1)
)
self.assertEqual(np.mean(result), 0.0)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/metrics/metrics_functional_test.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/metrics_functional_test.py",
"repo_id": "tf-keras",
"token_count": 4210
} | 186 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleOptimizer."""
import os
from unittest import mock
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import optimizers
from tf_keras.mixed_precision import loss_scale_optimizer
from tf_keras.mixed_precision import test_util as mp_test_util
from tf_keras.optimizers import adam as adam_experimental
from tf_keras.optimizers import optimizer as optimizer_experimental
from tf_keras.optimizers import sgd as sgd_experimental
from tf_keras.optimizers.legacy import adam
from tf_keras.optimizers.legacy import gradient_descent
from tf_keras.optimizers.legacy import optimizer_v2
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
from tensorflow.python.platform import tf_logging
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = tf.distribute.get_strategy
def create_mirrored_strategy():
if tf.config.list_logical_devices("GPU"):
return tf.distribute.MirroredStrategy(["cpu:0", "gpu:0"])
else:
return tf.distribute.MirroredStrategy(["cpu:0"])
STRATEGY_FNS = [default_strategy_fn, create_mirrored_strategy]
def create_sgd(base_optimizer_cls, *args, **kwargs):
"""Creates an SGD optimizer.
Will return either the new experimental SGD optimizer subclassing from
`optimizer_experimental.Optimizer` or the old SGD optimizer subclassing from
`optimizer_v2.OptimizerV2`, depending on `base_optimizer_cls`.
Args:
base_optimizer_cls: What the superclass of the returned SGD optimizer will
be. Either `optimizer_experimental.Optimizer` or
`optimizer_v2.OptimizerV2`.
*args: Arguments to pass to the SGD constructor
**kwargs: Keyword arguments to pass to the SGD constructor.
Returns:
An SGD optimizer.
"""
if base_optimizer_cls == optimizer_v2.OptimizerV2:
return gradient_descent.SGD(*args, **kwargs)
else:
assert (
base_optimizer_cls == optimizer_experimental.Optimizer
), f"Got invalid base_optimizer_cls: {base_optimizer_cls}"
return sgd_experimental.SGD(*args, **kwargs)
# TODO(b/215568552): Remove this as the delegation is handled by metaclass.
def create_lso(
inner_optimizer, dynamic=True, initial_scale=None, dynamic_growth_steps=None
):
"""Creates a LossScaleOptimizer.
Creates either the new LossScaleOptimizerV3 subclassing from
`optimizer_experimental.Optimizer` or the old LossScaleOptimizer subclassing
from `optimizer_v2.OptimizerV2`, depending on the type of `inner_optimizer`.
Args:
inner_optimizer: The optimizer to wrap. Either an
`optimizer_experimental.Optimizer` or an `optimizer_v2.OptimizerV2`.
dynamic: Whether dynamic loss scaling is used.
initial_scale: The initial loss scale.
dynamic_growth_steps: How frequently to increase the dynamic loss scale.
Returns:
Returns a LossScaleOptimizerV3 or a LossScaleOptimizer, depending on the
type of `inner_optimizer`.
"""
return loss_scale_optimizer.BaseLossScaleOptimizer(
inner_optimizer,
dynamic=dynamic,
initial_scale=initial_scale,
dynamic_growth_steps=dynamic_growth_steps,
)
def opt_and_strategy_and_mode_combinations():
"""Returns combinations for running with multiple optimizers and strategies.
Returns:
Combinations that run with both OptimizerV2 and the experimental
optimizer; and with the default strategy and mirrored strategy; and in
both graph and eager mode.
"""
# For the experimental optimizer, don't use graph mode directly since it's
# unsupported. Instead, run both without and with a tf.function, in order to
# test both graph and eager mode.
experimental_opt_combinations = test_combinations.combine(
opt_cls=optimizer_experimental.Optimizer,
strategy_fn=STRATEGY_FNS,
mode="eager",
use_tf_function=[False, True],
)
orig_opt_combinations = test_combinations.combine(
opt_cls=optimizer_v2.OptimizerV2,
strategy_fn=STRATEGY_FNS,
mode=["graph", "eager"],
use_tf_function=False,
)
return experimental_opt_combinations + orig_opt_combinations
def opt_combinations_only():
"""Returns two combinations for running with the two base optimizers."""
experimental_opt_combinations = test_combinations.combine(
mode="eager", opt_cls=optimizer_experimental.Optimizer
)
orig_opt_combination = test_combinations.combine(
opt_cls=optimizer_v2.OptimizerV2
)
return experimental_opt_combinations + orig_opt_combination
@tf_test_utils.with_control_flow_v2
class LossScaleOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _run_if_in_graph_mode(self, val):
# Running only in graph mode is useful, because optimizers sometimes
# return a value that, in Graph mode, is runnable with self.evaluate.
# But in Eager mode, the optimizer already does the computations and the
# return value cannot be run.
if not tf.executing_eagerly():
self.evaluate(val)
def _eval_if_tensor(self, val):
# Calls self.evaluate on val if val is a Tensor or Variable. This is
# useful, since hyperparameters are tf.Variables on OptimizerV2 and are
# Python floats on the experimental optimizer.
return (
self.evaluate(val)
if isinstance(val, (tf.Tensor, tf.Variable))
else val
)
def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad):
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
expected_grad
)
loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync
return lambda: opt.minimize(loss, var_list=[var])
def testIsInstance(self):
optimizer = create_lso(sgd_experimental.SGD())
self.assertIsInstance(
optimizer, loss_scale_optimizer.BaseLossScaleOptimizer
)
optimizer = create_lso(gradient_descent.SGD())
self.assertIsInstance(
optimizer, loss_scale_optimizer.BaseLossScaleOptimizer
)
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testFixedLossScaleAppliedToLossWithMinimize(
self, opt_cls, strategy_fn, use_tf_function
):
with strategy_fn().scope() as strategy:
var = tf.Variable([5.0])
opt = create_sgd(opt_cls, 2.0)
loss_scale = 10.0
opt = create_lso(opt, dynamic=False, initial_scale=loss_scale)
self.assertEqual(self.evaluate(opt.loss_scale), loss_scale)
self.assertIsInstance(opt.loss_scale, tf.Tensor)
# We need num_replicas_in_sync to divide loss_scale, otherwise
# loss_scale / strategy.num_replicas_in_sync will not be exact,
# which could lead to assertion failures due to rounding issues.
self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, loss_scale / strategy.num_replicas_in_sync
)
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient
# is 1, and so the variable will be init_val - grad * lr == 5 - 1 *
# 2 == 3
self.assertAllClose([3.0], self.evaluate(var))
def testFixedLossScaleAppliedToLossWithGetGradients(self):
with tf.Graph().as_default():
var = tf.Variable([2.0])
opt = gradient_descent.SGD(1.0)
loss_scale = 10.0
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=loss_scale
)
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
loss_scale
)
loss = grad_check_fn(var)
run_op = opt.get_gradients(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
# This will cause an assertion to run, as
# mp_test_util.create_identity_with_grad_check_fn added an assertion
# op.
self.evaluate(run_op)
@test_combinations.generate(opt_combinations_only())
def testDynamicAttrsWithFixedLossScale(self, opt_cls):
opt = create_sgd(opt_cls)
opt = create_lso(opt, dynamic=False, initial_scale=2.0)
self.assertFalse(opt.dynamic)
self.assertIsNone(opt.dynamic_counter)
self.assertIsNone(opt.dynamic_growth_steps)
@test_combinations.generate(opt_combinations_only())
def testGetScaledLoss(self, opt_cls):
opt = create_sgd(opt_cls)
opt = create_lso(opt, dynamic=False, initial_scale=2.0)
loss = tf.convert_to_tensor(5.0)
self.assertEqual(10.0, self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(
10.0, self.evaluate(opt.get_scaled_loss(lambda: loss)())
)
loss = tf.convert_to_tensor(5.0, dtype="float16")
self.assertEqual(10.0, self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(
10.0, self.evaluate(opt.get_scaled_loss(lambda: loss)())
)
@test_combinations.generate(opt_combinations_only())
def testGetUnscaledGradients(self, opt_cls):
opt = create_sgd(opt_cls)
opt = create_lso(opt, dynamic=False, initial_scale=2)
scaled_grads = [
tf.convert_to_tensor(3.0),
None,
tf.convert_to_tensor(-4.0, dtype="float16"),
]
grads = opt.get_unscaled_gradients(scaled_grads)
grads = [self.evaluate(g) if g is not None else g for g in grads]
self.assertEqual([1.5, None, -2.0], grads)
@test_combinations.generate(opt_combinations_only())
def testGetUnscaledSparseGradients(self, opt_cls):
opt = create_sgd(opt_cls)
opt = create_lso(opt, dynamic=False, initial_scale=2)
sparse_scaled_grad = tf.IndexedSlices(
tf.convert_to_tensor([[4.0, 2.0], [8.0, 5.0]]),
tf.convert_to_tensor([1, 3], dtype="int32"),
dense_shape=tf.convert_to_tensor([5, 2], dtype="int32"),
)
sparse_grad = opt.get_unscaled_gradients([sparse_scaled_grad])[0]
self.assertIsInstance(sparse_grad, tf.IndexedSlices)
self.assertAllEqual(
[[2.0, 1.0], [4.0, 2.5]], self.evaluate(sparse_grad.values)
)
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testDynamicLossScale(self, opt_cls, strategy_fn, use_tf_function):
strategy = strategy_fn()
learning_rate = 2.0
expected_gradient = tf.Variable(
learning_rate / strategy.num_replicas_in_sync
)
with strategy.scope():
var = tf.Variable([5.0])
opt = create_sgd(opt_cls, learning_rate)
opt = create_lso(opt, initial_scale=2, dynamic_growth_steps=1)
self.assertEqual(opt.initial_scale, 2.0)
self.assertIsInstance(opt.initial_scale, float)
self.assertEqual(opt.dynamic_growth_steps, 1)
self.assertIsInstance(opt.dynamic_growth_steps, int)
self.assertEqual(
opt.initial_scale % strategy.num_replicas_in_sync, 0
)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, expected_gradient
)
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient
# is 1, and so the variable will be init_val - grad * lr == 5 - 1 *
# 2 == 3
self.assertAllClose([3.0], self.evaluate(var))
# Loss scale will be double, so the expected gradient is also
# doubled.
self.evaluate(
expected_gradient.assign(
2 * learning_rate / strategy.num_replicas_in_sync
)
)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# As before, the 2 is subtracted from the variable, making it's new
# value 1.
self.assertAllClose([1.0], self.evaluate(var))
@test_combinations.generate(opt_combinations_only())
def testDynamicLossScaleDefaultValues(self, opt_cls):
opt = create_sgd(opt_cls)
opt = create_lso(opt)
self.assertEqual(opt.initial_scale, 2**15)
self.assertEqual(opt.dynamic_growth_steps, 2000)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2**15)
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testClipping(self, opt_cls, strategy_fn, use_tf_function):
strategy = strategy_fn()
learning_rate = 2.0
for clip_type in ("clipnorm", "global_clipnorm", "clipvalue"):
with strategy.scope(), self.subTest(clip_type=clip_type):
var = tf.Variable([5.0])
opt = create_sgd(opt_cls, learning_rate, **{clip_type: 2.0})
opt = create_lso(opt, initial_scale=2, dynamic_growth_steps=1)
if isinstance(opt, loss_scale_optimizer.LossScaleOptimizer):
# Only OptimizerV2 exposes the clipping attributes
self.assertEqual(getattr(opt, clip_type), 2.0)
self.assertEqual(
opt.initial_scale % strategy.num_replicas_in_sync, 0
)
loss = lambda: var * 4 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
if use_tf_function:
run_fn = tf.function(run_fn)
# Test running with clipped gradients
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The gradient is 4 but is clipped to 2, so the variable will be
# init_val - clipped_grad * lr == 5 - 2 * 2 == 1
self.assertAllClose([1.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 4)
if isinstance(opt, loss_scale_optimizer.LossScaleOptimizerV3):
# Only OptimizerV2 exposes the clipping attributes, so we
# cannot set them on the new optimizer
return
# Test changing the clip amount and running again
setattr(opt, clip_type, 3.0)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The gradient is 4 but is clipped to 3, so the variable will be
# prev_var - clipped_grad * lr == 1 - 3 * 2 == -5
self.assertAllClose([-5.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 8)
# Test Inf gradients are still skipped instead of being clipped
loss = lambda: var * float("Inf")
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertAllClose(
[-5.0], self.evaluate(var)
) # Var does not change
self.assertEqual(self.evaluate(opt.loss_scale), 4)
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testDynamicUpdate(self, opt_cls, strategy_fn, use_tf_function):
with strategy_fn().scope() as strategy:
var = tf.Variable([1.0, 2.0])
opt = create_sgd(opt_cls, 1.0)
opt = create_lso(opt, initial_scale=2, dynamic_growth_steps=1)
# Test optimizer with finite gradients
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Gradient is 2, so variable will have 2 subtracted from it
self.assertAllClose([-1.0, 0.0], self.evaluate(var))
# Loss scale has doubled from 2 to 4
self.assertEqual(4.0, self.evaluate(opt.loss_scale))
# Test optimizer with NaN gradients
loss = lambda: var * float("NaN")
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [-1.0, 0.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(2.0, self.evaluate(opt.loss_scale))
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testDynamicLossScaleWithFloat16Loss(
self, opt_cls, strategy_fn, use_tf_function
):
strategy = strategy_fn()
learning_rate = 2.0
with strategy.scope():
var = tf.Variable([5.0])
opt = create_sgd(opt_cls, learning_rate)
opt = create_lso(opt, initial_scale=2, dynamic_growth_steps=1)
def loss():
return tf.cast(var / strategy.num_replicas_in_sync, "float16")
run_fn = lambda: opt.minimize(loss, var_list=[var])
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient
# is 1, and so the variable will be init_val - grad * lr == 5 - 1 *
# 2 == 3
self.assertAllClose([3.0], self.evaluate(var))
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testNanOnOneReplicaOnly(self, opt_cls, strategy_fn, use_tf_function):
if strategy_fn == default_strategy_fn:
self.skipTest("The test is only useful for non-default strategies")
if not tf.test.is_gpu_available():
self.skipTest("Test requires GPU")
if (
not tf.executing_eagerly()
and not tf.compat.v1.control_flow_v2_enabled()
):
self.skipTest(
"b/181283011: GradientTape does not work properly with "
"V1 control flow, and opt.minimize uses GradientTape"
)
with strategy_fn().scope() as strategy:
var = tf.Variable([1.0, 2.0])
opt = create_sgd(opt_cls, 1.0)
opt = create_lso(opt, initial_scale=2, dynamic_growth_steps=2)
def loss():
rep_id = (
tf.distribute.get_replica_context().replica_id_in_sync_group
)
# The last element of last replica's gradient is NaN.
return tf.cond(
tf.equal(rep_id, 0),
lambda: var * 2.0,
lambda: var * tf.constant([1.0, float("NaN")]),
)
run_fn = lambda: opt.minimize(loss, var_list=[var])
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [1.0, 2.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(1.0, self.evaluate(opt.loss_scale))
def testCustomAggregater(self):
def gradient_aggregator(grads_and_vars):
# Simulate an all-reduce where a replica has a NaN gradient by
# setting the last gradient to NaN
grads_and_vars = list(grads_and_vars)
last_grad, last_var = grads_and_vars[-1]
grads_and_vars[-1] = (last_grad * float("NaN"), last_var)
return grads_and_vars
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(1.0, gradient_aggregator=gradient_aggregator)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=2, dynamic_growth_steps=2
)
loss = lambda: var * 2
run_op = opt.minimize(loss, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [1.0, 2.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(1.0, self.evaluate(opt.loss_scale))
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testDynamicLossScaleWithSlots(
self, opt_cls, strategy_fn, use_tf_function
):
strategy_obj = strategy_fn()
if (
isinstance(strategy_obj, tf.distribute.MirroredStrategy)
and tf.compat.v1.control_flow_v2_enabled()
and not tf.executing_eagerly()
):
self.skipTest("b/138667997")
with strategy_obj.scope() as strategy:
var = tf.Variable([1.0, 2.0])
# An SGD optimizer with momentum has slot variables.
opt = create_sgd(opt_cls, 1.0, momentum=1.0)
initial_scale = 2.0
opt = create_lso(
opt, initial_scale=initial_scale, dynamic_growth_steps=1
)
loss = lambda: var / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The momentum accumulator starts at 0 and the gradient is 1. The
# accumulator is incremented by the gradient, so it is now 1. Then
# the variable is subtracted by the accumulator, so the variable is
# subtracted by 1.
self.assertAllClose([0.0, 1.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 2)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The momentum accumulator was 1 before this step and the gradient
# is 1. The accumulator is incremented by the gradient, so it is
# now 2. Then the variable is subtracted by the accumulator, so the
# variable is subtracted by 2.
self.assertAllClose([-2.0, -1.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 4)
if isinstance(opt, loss_scale_optimizer.LossScaleOptimizer):
self.assertEqual(opt.get_slot_names(), ["momentum"])
def testIterations(self):
opt = gradient_descent.SGD(2.0)
lso = loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=10.0
)
lso.iterations = 7
self.assertEqual(lso.iterations, 7)
self.assertEqual(opt.iterations, 7)
@test_combinations.generate(opt_and_strategy_and_mode_combinations())
def testIterationsIncremented(self, opt_cls, strategy_fn, use_tf_function):
with strategy_fn().scope() as strategy:
# Test iterations is incremented in opt.minimize.
opt = create_sgd(opt_cls, 1.0)
opt = create_lso(opt)
var = tf.Variable([5.0])
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, [var])
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(
self.evaluate(var), 3.0
) # Grad is 2, so var is 5 - 2
self.assertEqual(self.evaluate(opt.iterations), 1)
# Test iterations is incremented in opt.minimize even if gradients
# aren't applied to variables due to NaN gradients.
loss = lambda: var * float("NaN")
run_fn = lambda: opt.minimize(loss, [var])
if use_tf_function:
run_fn = tf.function(run_fn)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), 3.0)
self.assertEqual(self.evaluate(opt.iterations), 2)
def testWeightMethods(self):
with self.test_session():
var = tf.Variable([1.0])
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=2.0, dynamic_growth_steps=1
)
run_op = opt.minimize(lambda: var * 2, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertLen(opt.weights, 1) # The 'iterations' weight
self.assertEqual(self.evaluate(opt.weights[0]), 1)
self.assertEqual(opt.get_weights()[0], 1)
self.assertEqual(self.evaluate(opt.variables()[0]), 1)
opt.set_weights([np.array(2.0)])
self.assertEqual(self.evaluate(opt.variables()[0]), 2)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def testHyperParametersExposedLSOV3(self):
opt = adam_experimental.Adam(learning_rate=1.0, beta_1=0.5, beta_2=0.9)
lso = loss_scale_optimizer.BaseLossScaleOptimizer(opt)
lso.learning_rate = tf.Variable(0.005)
self.assertAllClose(self.evaluate(lso.learning_rate), 0.005)
self.assertIs(lso.learning_rate, opt.learning_rate)
lso.use_ema = True
self.assertEqual(lso.use_ema, True)
self.assertEqual(opt.use_ema, True)
lso.ema_momentum = 0.88
self.assertEqual(lso.ema_momentum, 0.88)
self.assertEqual(opt.ema_momentum, 0.88)
def testHyperParametersExposed(self):
with self.cached_session():
opt = adam.Adam(learning_rate=1.0, beta_1=0.5, beta_2=0.9)
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
# Force hyperparameters to be created
opt.lr
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(lso.beta_1), 0.5)
self.assertIsInstance(lso.beta_1, tf.Variable)
self.assertEqual(self.evaluate(lso.lr), 1.0)
self.assertIs(lso.lr, opt.lr)
self.assertIs(lso.lr, lso.learning_rate)
lso.beta_1 = 0.25
self.assertEqual(self.evaluate(lso.beta_1), 0.25)
self.assertEqual(self.evaluate(opt.beta_1), 0.25)
self.assertIs(lso.beta_1, opt.beta_1)
opt.beta_1 = 0.75
self.assertEqual(self.evaluate(lso.beta_1), 0.75)
self.assertEqual(self.evaluate(opt.beta_1), 0.75)
self.assertIs(lso.beta_1, opt.beta_1)
lso.lr = 2.0
self.assertEqual(self.evaluate(lso.lr), 2.0)
self.assertEqual(self.evaluate(lso.learning_rate), 2.0)
self.assertEqual(self.evaluate(opt.lr), 2.0)
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertIs(lso.lr, opt.lr)
# Test setting attribute that is both attribute on
# LossScaleOptimizer and hyperparameter on wrapped optimizer.
class MyOpt(gradient_descent.SGD):
def __init__(self):
super().__init__()
self._set_hyper("loss_scale", 123.0)
opt = MyOpt()
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
with self.assertRaises(AttributeError):
lso.loss_scale = 2.0
@test_combinations.generate(opt_combinations_only())
def testArbitraryAttributesNotExposed(self, opt_cls):
opt = create_sgd(opt_cls)
lso = create_lso(opt)
self.assertFalse(opt.nesterov)
with self.assertRaisesRegex(
AttributeError,
"'LossScaleOptimizer(V3)?' object has no attribute 'nesterov'",
):
lso.nesterov
lso.nesterov = True
self.assertTrue(lso.nesterov)
self.assertFalse(opt.nesterov)
def testDir(self):
lso = loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD())
dir_result = dir(lso)
self.assertIn("learning_rate", dir_result) # Hyperparameter
self.assertIn("lr", dir_result) # Hyperparameter
self.assertIn("minimize", dir_result) # Attribute
self.assertIn("loss_scale", dir_result) # Attribute
self.assertNotIn("nesterov", dir_result) # Attribute on inner optimizer
self.assertIn("nesterov", dir(lso.inner_optimizer))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testApplyGradientsGetsUnwrappedTensors(self):
# Tests that gradients passed to apply_gradients are not wrapped in a
# DistributionStrategy wrapper, such as PerReplica, but instead are raw
# Tensors. Optimizer subclasses that override apply_gradients() expect
# raw Tensors, even though the base Optimizer can handle PerReplica
# gradients.
outer_self = self
class MyOptimizer(gradient_descent.SGD):
def apply_gradients(
self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True,
):
for grad, _ in grads_and_vars:
outer_self.assertIsInstance(grad, tf.Tensor)
return super().apply_gradients(
grads_and_vars, name, experimental_aggregate_gradients
)
with create_mirrored_strategy().scope() as strategy:
var = tf.Variable([5.0])
opt = MyOptimizer(learning_rate=1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=1
)
loss = lambda: var * 2.0
run_fn = lambda: opt.minimize(loss, [var])
strategy.experimental_run(run_fn)
@test_combinations.generate(
test_combinations.combine(mode="eager", use_tf_function=[False, True])
)
def testApplyGradientsGetsUnwrappedTensorsWithNewOptimizer(
self, use_tf_function
):
outer_self = self
class MyOptimizer(sgd_experimental.SGD):
def apply_gradients(
self,
grads_and_vars,
skip_gradients_aggregation=False,
experimental_aggregate_gradients=True,
):
for grad, _ in grads_and_vars:
outer_self.assertIsInstance(grad, tf.Tensor)
return super().apply_gradients(
grads_and_vars,
skip_gradients_aggregation=skip_gradients_aggregation,
)
with create_mirrored_strategy().scope() as strategy:
var = tf.Variable([5.0])
opt = MyOptimizer(learning_rate=1.0)
opt = loss_scale_optimizer.LossScaleOptimizerV3(
opt, dynamic=False, initial_scale=1
)
loss = lambda: var * 2.0
run_fn = lambda: opt.minimize(loss, [var])
if use_tf_function:
run_fn = tf.function(run_fn)
strategy.experimental_run(run_fn)
@test_combinations.generate(opt_combinations_only())
def testLossScaleDelegationWithWrapper(self, opt_cls):
# Test learning_rate is exposed when LossScaleOptimizer wraps another
# wrapper.
class MyOptimizer(opt_cls):
def __init__(self):
super().__init__("MyOptimizer")
self.inner_optimizer = create_sgd(opt_cls, learning_rate=1.0)
@property
def learning_rate(self):
return self.inner_optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, value):
self.inner_optimizer.learning_rate = value
def get_config(self):
return {}
with self.cached_session():
opt = MyOptimizer()
opt = create_lso(opt)
# Force hyperparameters to be created
opt.learning_rate
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.learning_rate), 1.0)
self.assertEqual(
self.evaluate(
opt.inner_optimizer.inner_optimizer.learning_rate
),
1.0,
)
opt.learning_rate = 2.0
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertEqual(
self.evaluate(
opt.inner_optimizer.inner_optimizer.learning_rate
),
2.0,
)
@test_combinations.generate(
test_combinations.combine(
opt_cls=optimizer_v2.OptimizerV2,
strategy_fn=STRATEGY_FNS,
mode=["graph", "eager"],
use_tf_function=False,
save_with_ls=[False, True],
restore_with_ls=[False, True],
)
+ test_combinations.combine(
opt_cls=optimizer_experimental.Optimizer,
strategy_fn=STRATEGY_FNS,
mode="eager",
use_tf_function=[False, True],
save_with_ls=[False, True],
restore_with_ls=[False, True],
)
)
def testCheckpoint(
self,
opt_cls,
strategy_fn,
use_tf_function,
save_with_ls,
restore_with_ls,
):
if not save_with_ls and not restore_with_ls:
self.skipTest(
"Skipping because save_with_ls=False and "
"restore_with_ls=False, which means loss scaling is not "
"used"
)
sgd_cls = type(create_sgd(opt_cls))
class MySGD(sgd_cls):
"""A custom optimizer that tracks an extra variable."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.my_var = tf.Variable(0.0)
self._track_trackable(self.my_var, "my_var")
strategy = strategy_fn()
replicas = strategy.num_replicas_in_sync
if (
isinstance(strategy, tf.distribute.MirroredStrategy)
and not tf.executing_eagerly()
):
# TODO(b/121381184): Enable running the test in this case.
return
with self.test_session(), strategy.scope():
# Build and run a simple model.
var = tf.Variable([2.0])
opt = inner_opt = MySGD(1.0, momentum=1.0)
if save_with_ls:
opt = create_lso(
opt, initial_scale=1.0, dynamic_growth_steps=2.0
)
run_fn = lambda: opt.minimize(
lambda: var / replicas + 1.0, var_list=[var]
)
if use_tf_function:
run_fn = tf.function(run_fn)
opt_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(strategy.experimental_local_results(opt_op))
# Assert values.
self.assertEqual(self.evaluate(var), 1.0)
if save_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.0)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
if opt_cls == optimizer_v2.OptimizerV2:
slot_var = opt.get_slot(var, "momentum")
self.assertEqual(self.evaluate(slot_var).item(), -1)
self.assertEqual(self.evaluate(opt.iterations), 1)
# Set optimizer variable to check arbitrary optimizer attributes can
# be saved/restored
self.evaluate(inner_opt.my_var.assign(1.0))
# Save a checkpoint.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = checkpoint.save(prefix)
# Create new model
var = tf.Variable([2.0])
opt = inner_opt = MySGD(1.0, momentum=1.0)
if restore_with_ls:
opt = create_lso(
opt, initial_scale=1.0, dynamic_growth_steps=2.0
)
# Restore new model.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
status = checkpoint.restore(save_path)
if save_with_ls:
status.assert_existing_objects_matched()
else:
status.assert_nontrivial_match()
# Assert restored values. We can only assert in eager mode since the
# variables are uninitialized in graph mode
if tf.executing_eagerly():
self.assertEqual(self.evaluate(var), 1.0)
if save_with_ls and restore_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.0)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
elif restore_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.0)
self.assertEqual(self.evaluate(opt.dynamic_counter), 0)
self.assertEqual(self.evaluate(opt.iterations), 1)
# Run the model again.
run_fn = lambda: opt.minimize(
lambda: var / replicas + 1.0, var_list=[var]
)
if use_tf_function:
run_fn = tf.function(run_fn)
opt_op = strategy.experimental_run(run_fn)
# Assert new values.
self.evaluate(tf.compat.v1.global_variables_initializer())
status.run_restore_ops()
self.evaluate(strategy.experimental_local_results(opt_op))
self.assertEqual(self.evaluate(var), -1)
if opt_cls == optimizer_v2.OptimizerV2:
slot_var = opt.get_slot(var, "momentum")
self.assertEqual(self.evaluate(slot_var).item(), -2)
self.assertEqual(self.evaluate(opt.iterations), 2)
self.assertEqual(self.evaluate(inner_opt.my_var), 1)
# Restore model again to test restoring after slots are created
status = checkpoint.restore(save_path)
if save_with_ls and restore_with_ls:
status.assert_consumed()
elif save_with_ls:
status.assert_existing_objects_matched()
elif restore_with_ls:
status.assert_nontrivial_match()
status.run_restore_ops()
self.assertEqual(self.evaluate(var), 1)
if opt_cls == optimizer_v2.OptimizerV2:
self.assertEqual(self.evaluate(slot_var).item(), -1)
@test_combinations.generate(
test_combinations.combine(config_version=["v2", "tf2_3"])
+ test_combinations.combine(config_version="v3", mode="eager")
)
def testGetConfigFixed(self, config_version):
# Get a config from LossScaleOptimizer, LossScaleOptimizerV3, or the
# LossScaleOptimizer from TF 2.3. Then restore the config into a
# LossScaleOptimizer or LossScaleOptimizerV3
if config_version == "v2":
opt = gradient_descent.SGD(2.0, momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=2
)
config = opt.get_config()
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
elif config_version == "v3":
opt = sgd_experimental.SGD(2.0, momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizerV3(
opt, dynamic=False, initial_scale=2
)
config = opt.get_config()
opt = loss_scale_optimizer.LossScaleOptimizerV3.from_config(config)
else:
self.assertEqual(config_version, "tf2_3")
config = {
"optimizer": {
"class_name": "SGD",
"config": {
"learning_rate": 2.0,
"momentum": 0.5,
"decay": 0.0,
"nesterov": False,
"name": "SGD",
},
},
"loss_scale": {
"class_name": "FixedLossScale",
"config": {"loss_scale_value": 2.0},
},
}
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
# Force hyperparameters to be created
opt.learning_rate
self.evaluate(tf.compat.v1.global_variables_initializer())
# Test attributes on the optimizer
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertEqual(self.evaluate(opt.inner_optimizer.learning_rate), 2.0)
self.assertEqual(
self._eval_if_tensor(opt.inner_optimizer.momentum), 0.5
)
self.assertEqual(self.evaluate(opt.loss_scale), 2.0)
self.assertEqual(opt.initial_scale, 2.0)
self.assertIsNone(opt.dynamic_growth_steps)
self.assertIsNone(opt.dynamic_counter)
self.assertFalse(opt.dynamic)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2
)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.0])
@test_combinations.generate(
test_combinations.combine(config_version=["v2", "tf2_3"])
+ test_combinations.combine(config_version="v3", mode="eager")
)
def testGetConfigDynamic(self, config_version):
# Get a config from LossScaleOptimizer, LossScaleOptimizerV3, or the
# LossScaleOptimizer from TF 2.3. Then restore the config into a
# LossScaleOptimizer or LossScaleOptimizerV3
if config_version == "v2":
opt = gradient_descent.SGD(2.0, momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=2, dynamic_growth_steps=3
)
config = opt.get_config()
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
elif config_version == "v3":
opt = sgd_experimental.SGD(2.0, momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizerV3(
opt, initial_scale=2, dynamic_growth_steps=3
)
config = opt.get_config()
opt = loss_scale_optimizer.LossScaleOptimizerV3.from_config(config)
else:
self.assertEqual(config_version, "tf2_3")
config = {
"optimizer": {
"class_name": "SGD",
"config": {
"learning_rate": 2.0,
"momentum": 0.5,
"decay": 0.0,
"nesterov": False,
"name": "SGD",
},
},
"loss_scale": {
"class_name": "DynamicLossScale",
"config": {
"initial_loss_scale": 2.0,
"increment_period": 3,
"multiplier": 2.0,
},
},
}
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
# Force hyperparameters to be created
opt.learning_rate
self.evaluate(tf.compat.v1.global_variables_initializer())
# Test attributes on the optimizer
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertEqual(self.evaluate(opt.inner_optimizer.learning_rate), 2.0)
self.assertEqual(
self._eval_if_tensor(opt.inner_optimizer.momentum), 0.5
)
self.assertEqual(self.evaluate(opt.loss_scale), 2.0)
self.assertEqual(opt.initial_scale, 2.0)
self.assertEqual(opt.dynamic_growth_steps, 3.0)
self.assertTrue(opt.dynamic)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2
)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.0])
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
def test_from_config_with_invalid_multiplier(self):
config = {
"optimizer": {
"class_name": "SGD",
"config": {
"learning_rate": 2.0,
"momentum": 0.5,
"decay": 0.0,
"nesterov": False,
"name": "SGD",
},
},
"loss_scale": {
"class_name": "DynamicLossScale",
"config": {
"initial_loss_scale": 2.0,
"increment_period": 3,
"multiplier": 4.0,
},
},
}
expected_error = (
"Cannot deserialize LossScaleOptimizer with a "
"DynamicLossScale whose multiplier is not 2. Got "
"DynamicLossScale: DynamicLossScale\\("
)
with self.assertRaisesRegex(ValueError, expected_error):
loss_scale_optimizer.LossScaleOptimizer.from_config(config)
@test_combinations.generate(
test_combinations.combine(lso_type=["v1", "v2"])
+ test_combinations.combine(lso_type="v3", mode="eager")
)
def testSerializationWithBuiltInOptimizer(self, lso_type):
if lso_type in ("v1", "v2"):
opt = gradient_descent.SGD(2.0, momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=2.0, dynamic_growth_steps=3.0
)
config = optimizers.serialize(opt)
if lso_type == "v1":
# LossScaleOptimizerV1 was an older experimental version of LSO
# that is now deleted. The config had the same format as LSO but
# the class name was different. This tests that LSO V1 configs
# can still be deserialized, which are deserialized as a
# (non-V1) LSO
config["class_name"] = "LossScaleOptimizerV1"
else:
opt = sgd_experimental.SGD(2.0, momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizerV3(
opt, initial_scale=2.0, dynamic_growth_steps=3
)
config = optimizers.serialize(opt)
opt = optimizers.deserialize(config)
# Force hyperparameters to be created
opt.learning_rate
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertEqual(
self._eval_if_tensor(opt.inner_optimizer.momentum), 0.5
)
self.assertEqual(self.evaluate(opt.loss_scale), 2.0)
self.assertEqual(opt.dynamic_growth_steps, 3.0)
self.assertTrue(opt.dynamic)
if lso_type in ("v1", "v2"):
self.assertEqual(type(opt), loss_scale_optimizer.LossScaleOptimizer)
else:
self.assertEqual(
type(opt), loss_scale_optimizer.LossScaleOptimizerV3
)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2
)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.0])
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
@test_combinations.generate(opt_combinations_only())
def testSerializationWithCustomOptimizer(self, opt_cls):
sgd_cls = type(create_sgd(opt_cls))
class MySGD(sgd_cls):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.my_attribute = 123
opt = MySGD(2.0, momentum=0.5)
opt = create_lso(opt, initial_scale=2.0, dynamic_growth_steps=3.0)
config = optimizers.serialize(opt)
custom_objects = {"MySGD": MySGD}
opt = optimizers.deserialize(config, custom_objects=custom_objects)
# Force hyperparameters to be created
opt.learning_rate
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertEqual(
self._eval_if_tensor(opt.inner_optimizer.momentum), 0.5
)
self.assertEqual(self.evaluate(opt.loss_scale), 2.0)
self.assertEqual(opt.dynamic_growth_steps, 3.0)
self.assertEqual(opt.inner_optimizer.my_attribute, 123)
@test_utils.run_v2_only
def testConvertToLegacyOptimizer(self):
opt = sgd_experimental.SGD(1.0)
opt = loss_scale_optimizer.BaseLossScaleOptimizer(opt)
converted_opt = optimizers.convert_to_legacy_optimizer(opt)
self.assertEqual(
type(converted_opt), loss_scale_optimizer.LossScaleOptimizer
)
reference_opt = gradient_descent.SGD(1.0)
reference_opt = loss_scale_optimizer.BaseLossScaleOptimizer(
reference_opt
)
self.assertEqual(converted_opt.get_config(), reference_opt.get_config())
# Test with a custom learning rate schedule
class CustomLRSchedule(learning_rate_schedule.LearningRateSchedule):
def __init__(self, initial_learning_rate):
self.initial_learning_rate = initial_learning_rate
def __call__(self, step):
step = tf.cast(step, tf.float32)
return self.initial_learning_rate / (step + 1)
def get_config(self):
return {"initial_learning_rate": self.initial_learning_rate}
opt = sgd_experimental.SGD(CustomLRSchedule(1.0))
opt = loss_scale_optimizer.BaseLossScaleOptimizer(opt)
converted_opt = optimizers.convert_to_legacy_optimizer(opt)
self.assertEqual(
type(converted_opt), loss_scale_optimizer.LossScaleOptimizer
)
reference_opt = gradient_descent.SGD(CustomLRSchedule(1.0))
reference_opt = loss_scale_optimizer.BaseLossScaleOptimizer(
reference_opt
)
self.assertEqual(converted_opt.get_config(), reference_opt.get_config())
@test_combinations.generate(opt_combinations_only())
def testUnsupportedStrategy(self, opt_cls):
strategy = tf.distribute.experimental.CentralStorageStrategy()
expected_error = (
"Loss scaling is not supported with the tf.distribute.Strategy: "
"CentralStorageStrategy. Try using a different Strategy, e.g. a "
"MirroredStrategy"
)
with strategy.scope(), self.assertRaisesRegex(
ValueError, expected_error
):
create_lso(create_sgd(opt_cls))
opt = create_lso(create_sgd(opt_cls))
with strategy.scope():
var = tf.Variable(1.0)
loss = lambda: var * 2.0
run_fn = lambda: opt.minimize(loss, [var])
with self.assertRaisesRegex(ValueError, expected_error):
strategy.experimental_run(run_fn)
@test_combinations.generate(opt_combinations_only())
def testInvalidArgsWithFixedLossScale(self, opt_cls):
opt = create_sgd(opt_cls)
with self.assertRaisesRegex(
ValueError,
'"initial_scale" must be specified if "dynamic" is False',
):
create_lso(opt, dynamic=False)
opt = create_sgd(opt_cls)
with self.assertRaisesRegex(
ValueError,
'"dynamic_growth_steps" must be None if "dynamic" is '
"False, but got: 2",
):
create_lso(
opt, dynamic=False, initial_scale=1, dynamic_growth_steps=2
)
@test_combinations.generate(opt_combinations_only())
def testDynamicMustBeBool(self, opt_cls):
opt = create_sgd(opt_cls)
with self.assertRaisesRegex(
TypeError,
'"dynamic" argument to LossScaleOptimizer.__init__ must be '
"a bool, but got: 'dynamic'",
):
create_lso(opt, "dynamic")
@test_combinations.generate(opt_combinations_only())
def testScalingWarning(self, opt_cls):
var = tf.Variable(1.0)
lso = create_lso(create_sgd(opt_cls))
with mock.patch.object(tf_logging, "warning") as mock_warn:
lso.apply_gradients([(tf.constant(1.0), var)])
self.assertIn(
"You forgot to call LossScaleOptimizer.get_scaled_loss() and "
"LossScaleOptimizer.get_unscaled_gradients() before",
mock_warn.call_args_list[0][0][0],
)
lso = create_lso(create_sgd(opt_cls))
with mock.patch.object(tf_logging, "warning") as mock_warn:
lso.get_scaled_loss(tf.constant(1.0))
lso.apply_gradients([(tf.constant(1.0), var)])
self.assertIn(
"You forgot to call "
"LossScaleOptimizer.get_unscaled_gradients() before",
mock_warn.call_args_list[0][0][0],
)
lso = create_lso(create_sgd(opt_cls))
with mock.patch.object(tf_logging, "warning") as mock_warn:
lso.get_unscaled_gradients([tf.constant(1.0)])
lso.apply_gradients([(tf.constant(1.0), var)])
self.assertIn(
"You forgot to call LossScaleOptimizer.get_scaled_loss() "
"before",
mock_warn.call_args_list[0][0][0],
)
@test_combinations.generate(opt_combinations_only())
def testScalingNoWarning(self, opt_cls):
var = tf.Variable(1.0)
lso = create_lso(create_sgd(opt_cls))
with mock.patch.object(tf_logging, "warning") as mock_warn:
lso.get_scaled_loss(tf.constant(1.0))
lso.get_unscaled_gradients([tf.constant(1.0)])
lso.apply_gradients([(tf.constant(1.0), var)])
mock_warn.assert_not_called()
@test_combinations.generate(opt_combinations_only())
def testErrorWhenNesting(self, opt_cls):
opt = create_sgd(opt_cls)
opt = create_lso(opt)
with self.assertRaisesRegex(
TypeError,
"LossScaleOptimizer cannot wrap another LossScaleOptimizer",
):
create_lso(opt)
@test_combinations.generate(opt_combinations_only())
def testErrorWrappingSameOptimizerMultipleTimes(self, opt_cls):
inner_opt = create_sgd(opt_cls)
create_lso(inner_opt)
with self.assertRaisesRegex(
ValueError,
'"inner_optimizer" is already wrapped by a LossScaleOptimizer.',
):
create_lso(inner_opt)
def testErrorWhenWrappingNonOptimizer(self):
with self.assertRaisesRegex(
TypeError,
'"inner_optimizer" must be an instance of '
"`tf.keras.optimizers.Optimizer` or "
"`tf.keras.optimizers.experimental.Optimizer`, but got: 1",
):
loss_scale_optimizer.BaseLossScaleOptimizer(1)
def testErrorWhenV3LsoWrapsV2Optimizer(self):
sgd = gradient_descent.SGD()
with self.assertRaisesRegex(
TypeError, "only the new experimental optimizer"
):
loss_scale_optimizer.LossScaleOptimizerV3(sgd)
def testErrorWhenV2LsoWrapsV3Optimizer(self):
sgd = sgd_experimental.SGD()
with self.assertRaisesRegex(
TypeError,
"only the classic optimizers subclassing from "
"`tf.keras.optimizers.Optimizer` can be passed",
):
loss_scale_optimizer.LossScaleOptimizer(sgd)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/mixed_precision/loss_scale_optimizer_test.py/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/loss_scale_optimizer_test.py",
"repo_id": "tf-keras",
"token_count": 28364
} | 187 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras models API."""
from tf_keras.engine.functional import Functional
from tf_keras.engine.sequential import Sequential
from tf_keras.engine.training import Model
# Private symbols that are used in tests.
# TODO(b/221261361): Clean up private symbols usage and remove these imports.
from tf_keras.models.cloning import _clone_functional_model
from tf_keras.models.cloning import _clone_layer
from tf_keras.models.cloning import _clone_layers_and_model_config
from tf_keras.models.cloning import _clone_sequential_model
from tf_keras.models.cloning import clone_and_build_model
from tf_keras.models.cloning import clone_model
from tf_keras.models.cloning import share_weights
from tf_keras.models.sharpness_aware_minimization import (
SharpnessAwareMinimization,
)
from tf_keras.saving.legacy.model_config import model_from_config
from tf_keras.saving.legacy.model_config import model_from_json
from tf_keras.saving.legacy.model_config import model_from_yaml
from tf_keras.saving.saving_api import load_model
from tf_keras.saving.saving_api import save_model
| tf-keras/tf_keras/models/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/models/__init__.py",
"repo_id": "tf-keras",
"token_count": 496
} | 188 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for OptimizerV2."""
import collections
import os
from copy import deepcopy
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import backend
from tf_keras import callbacks
from tf_keras import losses
from tf_keras.engine import input_layer
from tf_keras.engine import sequential
from tf_keras.engine import training
from tf_keras.layers import core
from tf_keras.layers import regularization
from tf_keras.optimizers import optimizer_v1
from tf_keras.optimizers.legacy import adadelta
from tf_keras.optimizers.legacy import adagrad
from tf_keras.optimizers.legacy import adam
from tf_keras.optimizers.legacy import adamax
from tf_keras.optimizers.legacy import ftrl
from tf_keras.optimizers.legacy import gradient_descent
from tf_keras.optimizers.legacy import nadam
from tf_keras.optimizers.legacy import optimizer_v2
from tf_keras.optimizers.legacy import rmsprop
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import np_utils
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
_DATA_TYPES = [tf.half, tf.float32, tf.float64]
# TODO(b/141710709): complex support in NVCC and ROCM.
if not tf_test_utils.IsBuiltWithNvcc() and not tf.test.is_built_with_rocm():
_DATA_TYPES += [tf.complex64, tf.complex128]
class OptimizerTest(tf.test.TestCase, parameterized.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testBasic(self):
for dtype in _DATA_TYPES:
with test_utils.use_gpu():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(3.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14.0, -13.0], self.evaluate(var0))
self.assertAllClose([-6.0, -5.0], self.evaluate(var1))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testAdaptiveLearningRate(self):
for dtype in _DATA_TYPES:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
def loss():
return 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(1.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, [var0, var1])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
# var0 = [1., 2.] - 1.0 * [5, 5]
self.assertAllClose([-4.0, -3.0], self.evaluate(var0))
# var1 = [3., 4.] - 1.0 * [3, 3]
self.assertAllClose([0.0, 1.0], self.evaluate(var1))
sgd.learning_rate = 0.5
if tf.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
# Validate updated params
# var0 = [-4., -3.] - 0.5 * [5, 5]
self.assertAllClose([-6.5, -5.5], self.evaluate(var0))
# var1 = [0., 1.] - 0.5 * [3, 3]
self.assertAllClose([-1.5, -0.5], self.evaluate(var1))
sgd.learning_rate = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5
)
if tf.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testPrecomputedGradient(self):
for dtype in _DATA_TYPES:
with test_utils.use_gpu():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1
grad_loss = tf.constant([42, -42], dtype=dtype)
sgd = gradient_descent.SGD(3.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(
loss, var_list=[var0, var1], grad_loss=grad_loss
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose(
[1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0),
)
self.assertAllClose(
[3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1),
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testNoGradients(self):
for dtype in _DATA_TYPES:
with test_utils.use_gpu():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(ValueError, "No gradients"):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testNoGradientsForAnyVariables_Minimize(self):
for dtype in _DATA_TYPES:
with test_utils.use_gpu():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: tf.constant(5.0)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(
ValueError, "No gradients provided for any variable"
):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testNoGradientsForAnyVariables_ApplyGradients(self):
for dtype in _DATA_TYPES:
with test_utils.use_gpu():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegex(
ValueError, "No gradients provided for any variable"
):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testGradientsAsVariables(self):
for i, dtype in enumerate(_DATA_TYPES):
with test_utils.use_gpu():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
tf.Variable(tf.zeros([2], dtype), name="c_%d_%d" % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
tf.compat.v1.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
# Run convert_ops to achieve the gradients converting
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(
zip(converted_grads, [var0, var1])
)
opt_op = sgd.apply_gradients(converted_grads_and_vars)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(convert_ops)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14.0, -13.0], self.evaluate(var0))
self.assertAllClose([-6.0, -5.0], self.evaluate(var1))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testComputeGradientsWithTensors(self):
with test_utils.use_gpu():
x = tf.convert_to_tensor(1.0)
def f():
return x * x
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(f, [x])
self.assertLen(grads_and_vars, 1)
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd.apply_gradients(grads_and_vars)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testConstraint(self):
constraint_01 = lambda x: tf.clip_by_value(x, -0.1, 0.0)
constraint_0 = lambda x: tf.clip_by_value(x, 0.0, 1.0)
with test_utils.use_gpu():
var0 = tf.Variable([1.0, 2.0], constraint=constraint_01)
var1 = tf.Variable([3.0, 4.0], constraint=constraint_0)
loss = lambda: 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(3.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testIterationWithoutMinimize(self):
with test_utils.use_gpu():
sgd = gradient_descent.SGD(3.0)
self.evaluate(sgd.iterations.initializer)
self.assertEqual(0, self.evaluate(sgd.iterations))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testConfig(self):
with test_utils.use_gpu():
opt = gradient_descent.SGD(learning_rate=1.0)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
lr = opt._get_hyper("learning_rate")
lr2 = opt2._get_hyper("learning_rate")
self.evaluate(tf.compat.v1.global_variables_initializer())
# assert both are equal float values.
self.assertEqual(self.evaluate(lr), self.evaluate(lr2))
var0 = tf.Variable([[1.0], [2.0]], dtype=tf.float32)
loss = lambda: 3 * var0
# learning rate variable created when calling minimize.
opt.minimize(loss, [var0])
opt3 = gradient_descent.SGD.from_config(config)
lr3 = opt3._get_hyper("learning_rate")
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(lr), self.evaluate(lr3))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testConfigWithLearningRateDecay(self):
with test_utils.use_gpu():
var0 = tf.Variable([[1.0], [2.0]], dtype=tf.float32)
for decay_schedule in [
learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.1
),
learning_rate_schedule.PiecewiseConstantDecay([5], [1.0, 0.5]),
]:
step = 10
opt = gradient_descent.SGD(decay_schedule)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
# assert both are equal float values.
self.assertAllEqual(
decay_schedule(step), opt._get_hyper("learning_rate")(step)
)
self.assertAllEqual(
decay_schedule(step), opt2._get_hyper("learning_rate")(step)
)
loss = lambda: 3 * var0
# learning rate variable is created when calling minimize.
opt.minimize(loss, [var0])
self.evaluate(tf.compat.v1.global_variables_initializer())
config = opt.get_config()
opt3 = gradient_descent.SGD.from_config(config)
self.assertAllEqual(
self.evaluate(opt._get_hyper("learning_rate")(step)),
opt3._get_hyper("learning_rate")(step),
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testGradClipValue(self):
with test_utils.use_gpu():
var = tf.Variable([1.0, 2.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.0, 1.0], self.evaluate(var))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testGradClipNorm(self):
with test_utils.use_gpu():
var = tf.Variable([1.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.0], self.evaluate(var))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testGradGlobalClipNorm(self):
with test_utils.use_gpu():
# l2 norm is 5.0
var1 = tf.Variable([1.0])
var2 = tf.Variable([2.0])
loss = lambda: 3 * var1 + 4 * var2
opt = gradient_descent.SGD(learning_rate=1.0, global_clipnorm=2.0)
opt_op = opt.minimize(loss, [var1, var2])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
# grad1 = 3.0 * 2.0 / 5.0 = 1.2
self.assertAllClose([-0.2], self.evaluate(var1))
# grad2 = 4.0 * 2.0 / 5.0 = 1.6
self.assertAllClose([0.4], self.evaluate(var2))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testInvalidClipNorm(self):
with self.assertRaisesRegex(ValueError, ">= 0"):
gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
@test_combinations.generate(
test_combinations.combine(
mode=["graph", "eager"],
clip_type=["clipnorm", "global_clipnorm", "clipvalue"],
)
)
def testConfigWithCliping(self, clip_type):
opt = gradient_descent.SGD(learning_rate=1.0, **{clip_type: 2.0})
config = opt.get_config()
opt = gradient_descent.SGD.from_config(config)
self.assertEqual(getattr(opt, clip_type), 2.0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testInvalidKwargs(self):
with self.assertRaisesRegex(TypeError, "Unexpected keyword argument"):
gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testWeights(self):
with test_utils.use_gpu():
opt1 = adam.Adam(learning_rate=1.0)
var1 = tf.Variable([1.0, 2.0], dtype=tf.float32)
loss1 = lambda: 3 * var1
opt_op_1 = opt1.minimize(loss1, [var1])
self.evaluate(tf.compat.v1.global_variables_initializer())
config = opt1.get_config()
opt2 = adam.Adam.from_config(config)
var2 = tf.Variable([1.0, 2.0], dtype=tf.float32)
loss2 = lambda: 3 * var2
opt_op_2 = opt2.minimize(loss2, [var2])
weights = opt1.get_weights()
# Assert set_weights and both variables get updated to same value.
self.evaluate(tf.compat.v1.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_1, opt_op_2])
self.assertAllClose(self.evaluate(var1), self.evaluate(var2))
self.assertEqual(1, self.evaluate(opt1.iterations))
self.assertEqual(1, self.evaluate(opt2.iterations))
var3 = tf.Variable([1.0, 2.0, 3.0], dtype=tf.float32)
var4 = tf.Variable([4.0, 5.0, 6.0], dtype=tf.float32)
loss3 = lambda: 3 * var3 + 5 * var4
opt_op_3 = opt1.minimize(loss3, [var3, var4])
# Assert set_weights with ValueError since weight list does not
# match.
self.evaluate(tf.compat.v1.global_variables_initializer())
weights = opt1.get_weights()
with self.assertRaisesRegex(ValueError, "but the optimizer was"):
opt2.set_weights(weights)
# Assert set_weights and variables get updated to same value.
var5 = tf.Variable([1.0, 2.0, 3.0], dtype=tf.float32)
var6 = tf.Variable([4.0, 5.0, 6.0], dtype=tf.float32)
loss4 = lambda: 3 * var5 + 5 * var6
opt_op_4 = opt2.minimize(loss4, [var5, var6])
self.evaluate(tf.compat.v1.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_3, opt_op_4])
self.assertAllClose(
self.evaluate([var3, var4]), self.evaluate([var5, var6])
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testGettingHyperParameters(self):
with self.test_session():
opt = adam.Adam(learning_rate=1.0)
var = tf.Variable([1.0, 2.0], dtype=tf.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
lr = self.evaluate(opt.lr)
self.assertEqual(1.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(3.0))
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
with self.assertRaises(AttributeError):
opt.not_an_attr += 3
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testGettingHyperParametersWithLrInConstructor(self):
with self.test_session():
opt = gradient_descent.SGD(lr=3.0)
var = tf.Variable([1.0, 2.0], dtype=tf.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt.learning_rate, tf.Variable)
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(4.0))
lr = self.evaluate(opt.lr)
self.assertEqual(4.0, lr)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testDir(self):
opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.1)
dir_result = set(dir(opt))
self.assertIn("learning_rate", dir_result) # Hyperparameter
self.assertIn("lr", dir_result) # Hyperparameter
self.assertIn("momentum", dir_result) # Hyperparameter
self.assertIn("nesterov", dir_result) # Attribute
self.assertIn("minimize", dir_result) # Attribute
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testOptimizerWithKerasModel(self):
a = input_layer.Input(shape=(3,), name="input_a")
b = input_layer.Input(shape=(3,), name="input_b")
dense = core.Dense(4, name="dense")
c = dense(a)
d = dense(b)
e = regularization.Dropout(0.5, name="dropout")(c)
model = training.Model([a, b], [d, e])
optimizer = gradient_descent.SGD(learning_rate=0.001)
loss = "mse"
model.compile(optimizer, loss, metrics=["mae"])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit(
[input_a_np, input_b_np],
[output_d_np, output_e_np],
epochs=1,
batch_size=5,
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testOptimizerSaving(self):
np.random.seed(1331)
input_np = np.random.random((10, 3))
output_np = np.random.random((10, 4))
a = input_layer.Input(shape=(3,), name="input_a")
model = sequential.Sequential()
model.add(core.Dense(4, kernel_initializer="zeros", name="dense"))
model.add(regularization.Dropout(0.5, name="dropout"))
model(a)
optimizer = gradient_descent.SGD(learning_rate=0.1)
model.compile(optimizer, loss="mse", metrics=["mae"])
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
epochs=2,
verbose=0,
)
temp_filepath = os.path.join(self.get_temp_dir(), "optv2_model.keras")
model.save(temp_filepath)
loaded_model = keras.models.load_model(temp_filepath)
self.assertAllClose(model(input_np), loaded_model(input_np), atol=1e-6)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testOptimizerWithCallbacks(self):
np.random.seed(1331)
input_np = np.random.random((10, 3))
output_np = np.random.random((10, 4))
a = input_layer.Input(shape=(3,), name="input_a")
model = sequential.Sequential()
model.add(core.Dense(4, kernel_initializer="zeros", name="dense"))
model.add(regularization.Dropout(0.5, name="dropout"))
model(a)
optimizer = gradient_descent.SGD(learning_rate=0.1)
model.compile(optimizer, loss="mse", metrics=["mae"])
# This does not reduce the LR after the first epoch (due to low delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.1,
min_delta=0,
patience=1,
cooldown=5,
)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=0,
)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4
)
# This should reduce the LR after the first epoch (due to high delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.1,
min_delta=10,
patience=1,
cooldown=5,
)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=2,
)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4
)
def testOptimizerSetIterations(self):
global_step = tf.compat.v1.train.get_or_create_global_step()
opt = adam.Adam(learning_rate=1.0)
opt.iterations = global_step
var = tf.Variable([1.0, 2.0], dtype=tf.float32)
self.evaluate(tf.compat.v1.global_variables_initializer())
init_step_value = self.evaluate(global_step)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
new_step_value = self.evaluate(global_step)
self.assertEqual(new_step_value, init_step_value + 1)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testOptimizerWithCallableVarList(self):
train_samples = 20
input_dim = 1
num_classes = 2
(x, y), _ = test_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes,
)
y = np_utils.to_categorical(y)
num_hidden = 1
model = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes
)
opt = adam.Adam()
loss = lambda: losses.mean_squared_error(model(x), y)
var_list = lambda: model.trainable_weights
with self.assertRaisesRegex(
ValueError, "Weights for model .* have not yet been created"
):
var_list()
train_op = opt.minimize(loss, var_list)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(
[[0.0]], self.evaluate(opt.get_slot(var_list()[0], "m"))
)
self.evaluate(train_op)
self.assertNotEqual(
[[0.0]], self.evaluate(opt.get_slot(var_list()[0], "m"))
)
self.assertLen(var_list(), 4)
def testVarKey(self):
with tf.compat.v1.get_default_graph().as_default():
a = tf.Variable([1.0, 2.0], name="var")
b = tf.Variable([1.0], name="var")
self.assertTrue(a._in_graph_mode)
self.assertTrue(b._in_graph_mode)
var_key = optimizer_v2._var_key(a)
self.assertEqual("var", var_key)
var_key = optimizer_v2._var_key(b)
self.assertEqual("var_1", var_key)
def testVarName(self):
with tf.compat.v1.get_default_graph().as_default():
var = tf.Variable([1.0, 2.0], name="var")
loss = var + 1.0
opt = adam.Adam()
opt.get_updates(loss, [var])
opt_vars = opt.variables()
self.assertLen(opt_vars, 3)
self.assertEqual("Adam/iter:0", opt_vars[0].name)
self.assertEqual("Adam/var/m:0", opt_vars[1].name)
var_2 = tf.Variable([1.0, 2.0], name="var_2")
loss = var_2 + 1.0
with backend.name_scope("outter"):
opt.get_updates(loss, [var_2])
opt_vars = opt.variables()
self.assertLen(opt_vars, 5)
self.assertEqual("outter/Adam/var_2/m:0", opt_vars[3].name)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testEmptyVarList(self):
opt = gradient_descent.SGD(1.0)
opt.minimize(lambda: tf.constant(1.0), [])
opt.apply_gradients([])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testAggregationTrue(self):
# Test that experimental_aggregate_gradients=True works without
# distributed strategy.
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(3.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose([1.0, 2.0], self.evaluate(var))
opt_op = opt.apply_gradients(
[([0.1, 0.1], var)], experimental_aggregate_gradients=True
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testAggregationFalse(self):
# Test that experimental_aggregate_gradients=False works without
# distributed strategy.
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(3.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose([1.0, 2.0], self.evaluate(var))
opt_op = opt.apply_gradients(
[([0.1, 0.1], var)], experimental_aggregate_gradients=False
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testRestoringIterationsWithoutAnOptimizer(self):
opt = gradient_descent.SGD(3.0)
opt.iterations.assign(5)
checkpoint = tf.train.Checkpoint(optimizer=opt)
path = checkpoint.save(self.get_temp_dir())
# Following verifies that the `iterations` can be restored with the
# absence of an `Optimizer` object (using a `Checkpoint` as a
# placeholder).
iterations_var = tf.Variable(0, dtype=tf.int64)
optimizer_checkpoint = tf.train.Checkpoint(iter=iterations_var)
checkpoint_to_restore = tf.train.Checkpoint(
optimizer=optimizer_checkpoint
)
checkpoint_to_restore.restore(path)
self.assertEqual(5, self.evaluate(iterations_var))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testSlotWithNonstandardShapeRestoresBasedOnCheckpoint(self):
# First create an optimizer and a slot variable with a non-standard
# shape.
x = tf.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=tf.float32)
slot_shape = [2, 1]
optimizer_1 = optimizer_v2.OptimizerV2(name="test")
optimizer_1.add_slot(x, "test_slot", "ones", shape=slot_shape)
# Then save the variable and optimizer to a checkpoint.
checkpoint_1 = tf.train.Checkpoint(var=x, optimizer=optimizer_1)
checkpoint_path = checkpoint_1.save(self.get_temp_dir())
# Create a new optimizer and call restore on it (and x)
optimizer_2 = optimizer_v2.OptimizerV2(name="test")
checkpoint_2 = tf.train.Checkpoint(var=x, optimizer=optimizer_2)
checkpoint_2.restore(checkpoint_path)
self.assertEqual(
slot_shape, optimizer_2.get_slot(x, "test_slot").shape.as_list()
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_gradient_aggregator(self):
def gradient_aggregator(grads_and_vars):
# Simulate an all-reduce where the other replica has zeros for
# gradients, by dividing each gradient by 2.
grads = [g for g, _ in grads_and_vars]
vars = [v for _, v in grads_and_vars]
all_reduced_grads = [g / 2 for g in grads]
return list(zip(all_reduced_grads, vars))
var = tf.Variable(2.0)
sgd = gradient_descent.SGD(1.0, gradient_aggregator=gradient_aggregator)
loss = lambda: 2 * var
opt_op = sgd.minimize(loss, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
self.assertEqual(self.evaluate(var), 1.0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_override_aggregate_gradients(self):
class MyOptimizer(gradient_descent.SGD):
def _aggregate_gradients(self, grads_and_vars):
# Simulate an all-reduce where the other replica has zeros for
# gradients, by dividing each gradient by 2.
grads = [g for g, _ in grads_and_vars]
vars = [v for _, v in grads_and_vars]
all_reduced_grads = [g / 2 for g in grads]
return list(zip(all_reduced_grads, vars))
var = tf.Variable(2.0)
sgd = MyOptimizer(1.0)
loss = lambda: 2 * var
opt_op = sgd.minimize(loss, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
self.assertEqual(self.evaluate(var), 1.0)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_create_slots_for_sharded_variables(self):
# set names so that ShardedVariable is well-named for slot variable
# keying.
var_a = tf.Variable([1.0], name="part_0")
var_b = tf.Variable([2.0], name="part_1")
sharded_var = tf.__internal__.distribute.ShardedVariable([var_a, var_b])
opt = adagrad.Adagrad()
opt._create_slots(sharded_var.variables)
opt._create_slots_for_sharded_variables(sharded_var.variables)
sharded_slot = opt.get_slot(sharded_var, "accumulator")
self.assertIsInstance(
sharded_slot, tf.__internal__.distribute.ShardedVariable
)
slot_a = opt.get_slot(var_a, "accumulator")
self.assertAllClose(sharded_slot.variables[0], slot_a)
slot_b = opt.get_slot(var_b, "accumulator")
self.assertAllClose(sharded_slot.variables[1], slot_b)
@test_combinations.run_all_keras_modes
class OptimizersCompatibilityTest(test_combinations.TestCase):
def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
if tf.executing_eagerly():
self.skipTest("v1 optimizer does not run in eager mode")
np.random.seed(1331)
with test_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = test_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes,
)
y = np_utils.to_categorical(y)
num_hidden = 5
model_v1 = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden,
num_classes=num_classes,
input_dim=input_dim,
)
model_v1.compile(
opt_v1,
loss="categorical_crossentropy",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
model_v1.fit(x, y, batch_size=5, epochs=1)
model_v2 = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden,
num_classes=num_classes,
input_dim=input_dim,
)
model_v2.set_weights(model_v1.get_weights())
model_v2.compile(
opt_v2,
loss="categorical_crossentropy",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
if not tf.compat.v1.executing_eagerly_outside_functions():
model_v2._make_train_function()
if test_weights:
opt_v2.set_weights(opt_v1.get_weights())
hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
self.assertAllClose(
model_v1.get_weights(),
model_v2.get_weights(),
rtol=1e-5,
atol=1e-5,
)
self.assertAllClose(
hist_1.history["loss"],
hist_2.history["loss"],
rtol=1e-5,
atol=1e-5,
)
def testAdadeltaCompatibility(self):
opt_v1 = optimizer_v1.Adadelta(lr=0.01)
opt_v2 = adadelta.Adadelta(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdagradCompatibility(self):
opt_v1 = optimizer_v1.Adagrad(lr=0.01)
opt_v2 = adagrad.Adagrad(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamCompatibility(self):
opt_v1 = optimizer_v1.Adam()
opt_v2 = adam.Adam()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamaxCompatibility(self):
opt_v1 = optimizer_v1.Adamax(lr=0.01)
opt_v2 = adamax.Adamax(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testNadamCompatibility(self):
opt_v1 = optimizer_v1.Nadam(lr=0.001)
opt_v2 = nadam.Nadam(learning_rate=0.001)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testMomentumCompatibility(self):
opt_v1 = optimizer_v1.SGD(lr=0.01, momentum=0.9)
opt_v2 = gradient_descent.SGD(learning_rate=0.01, momentum=0.9)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testRMSpropCompatibility(self):
opt_v1 = optimizer_v1.RMSprop()
opt_v2 = rmsprop.RMSprop()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testSGDCompatibility(self):
opt_v1 = optimizer_v1.SGD(lr=0.01)
opt_v2 = gradient_descent.SGD(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2, False)
def testNumericEquivalenceForNesterovMomentum(self):
if tf.executing_eagerly():
self.skipTest("v1 optimizer does not run in eager mode")
np.random.seed(1331)
with test_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = test_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes,
)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden,
num_classes=num_classes,
input_dim=input_dim,
)
model_k_v2 = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden,
num_classes=num_classes,
input_dim=input_dim,
)
model_k_v2.set_weights(model_k_v1.get_weights())
model_tf = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden,
num_classes=num_classes,
input_dim=input_dim,
)
model_tf.set_weights(model_k_v2.get_weights())
opt_k_v1 = optimizer_v1.SGD(momentum=0.9, nesterov=True)
opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
opt_tf = tf.compat.v1.train.MomentumOptimizer(
learning_rate=0.01, momentum=0.9, use_nesterov=True
)
model_k_v1.compile(
opt_k_v1,
loss="categorical_crossentropy",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
model_k_v2.compile(
opt_k_v2,
loss="categorical_crossentropy",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
model_tf.compile(
opt_tf,
loss="categorical_crossentropy",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
hist_k_v1 = model_k_v1.fit(
x, y, batch_size=5, epochs=10, shuffle=False
)
hist_k_v2 = model_k_v2.fit(
x, y, batch_size=5, epochs=10, shuffle=False
)
hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(
model_k_v1.get_weights(), model_tf.get_weights()
)
self.assertAllClose(
model_k_v1.get_weights(), model_k_v2.get_weights()
)
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(
hist_k_v1.history["loss"], hist_tf.history["loss"]
)
self.assertAllClose(
hist_k_v1.history["loss"], hist_k_v2.history["loss"]
)
def testNumericEquivalenceForAmsgrad(self):
if tf.executing_eagerly():
self.skipTest("v1 optimizer does not run in eager mode")
np.random.seed(1331)
with test_utils.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = test_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes,
)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden,
num_classes=num_classes,
input_dim=input_dim,
)
model_k_v2 = test_utils.get_small_sequential_mlp(
num_hidden=num_hidden,
num_classes=num_classes,
input_dim=input_dim,
)
model_k_v2.set_weights(model_k_v1.get_weights())
opt_k_v1 = optimizer_v1.Adam(amsgrad=True)
opt_k_v2 = adam.Adam(amsgrad=True)
model_k_v1.compile(
opt_k_v1,
loss="categorical_crossentropy",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
model_k_v2.compile(
opt_k_v2,
loss="categorical_crossentropy",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
hist_k_v1 = model_k_v1.fit(
x, y, batch_size=5, epochs=10, shuffle=False
)
hist_k_v2 = model_k_v2.fit(
x, y, batch_size=5, epochs=10, shuffle=False
)
self.assertAllClose(
model_k_v1.get_weights(), model_k_v2.get_weights()
)
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(
hist_k_v1.history["loss"], hist_k_v2.history["loss"]
)
# Note: These tests are kept in a separate class to avoid bugs in some
# distributions of Python that break AutoGraph which is used by tf.function.
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
class OptimizerWithFunctionTest(tf.test.TestCase, parameterized.TestCase):
def testBasic(self):
var = tf.Variable([1.0, 2.0], dtype=tf.float32)
loss = lambda: 3 * var
opt = adam.Adam(learning_rate=1.0)
@tf.function
def fn():
opt.minimize(loss, [var])
return var
self.assertAllClose([0.0, 1.0], fn(), atol=1e-4)
self.assertAllClose([-1, 0.0], fn(), atol=1e-4)
def testBasicWithConstantDecay(self):
var = tf.Variable([1.0, 2.0], dtype=tf.float32)
loss = lambda: 3 * var
opt = adam.Adam(learning_rate=1.0)
@tf.function
def fn():
opt.minimize(loss, [var])
return var
self.assertAllClose([0.0, 1.0], fn(), atol=1e-4)
self.assertAllClose([-1, 0.0], fn(), atol=1e-4)
def testVarKeyWithVarCreatedInEager(self):
a = tf.Variable([1.0, 2.0], name="var")
b = tf.Variable([1.0], name="var")
@tf_test_utils.also_run_as_tf_function
def var_key_test():
self.assertFalse(a._in_graph_mode)
self.assertFalse(b._in_graph_mode)
var_key_a = optimizer_v2._var_key(a)
self.assertStartsWith(var_key_a, "var_")
var_key_b = optimizer_v2._var_key(b)
self.assertStartsWith(var_key_b, "var_")
self.assertNotEqual(var_key_a, var_key_b)
var_key_test()
def testLearningRateDecayUsedInTwoFunctions(self):
a = tf.Variable([1.0, 2.0], name="var")
b = tf.Variable([1.0], name="var")
learning_rate_decay = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5
)
opt = adam.Adam(learning_rate=learning_rate_decay)
loss_a = lambda: 3 * a
loss_b = lambda: 2 * b
@tf.function
def fn_a():
opt.minimize(loss_a, [a])
return a
@tf.function
def fn_b():
opt.minimize(loss_b, [b])
return b
fn_a()
fn_b()
_NUM_LEARNERS = 50
APPLY_SCOPE = "debug_apply"
ALLOWLIST = [
# optimizer_v2._deduplicate_indexed_slices contains an indexed slice:
# array_ops.shape(unique_indices)[0]
# which winds up expanding to [0:1:1] thereby creating three constants
# to represent the indices.
("embeddings/strided_slice/stack", "Const"),
]
def get_inputs(op):
op_inputs = list(op.inputs) + op.control_inputs
names = [i.name for i in op_inputs]
op_inputs = [getattr(i, "op", i) for i in op_inputs]
return op_inputs, names
def strip_name(node):
if "Placeholder" in node.op:
return
node.name = ""
def topological_sort(graph):
graph_ops = graph.get_operations()
sources = []
result = []
inputs = {}
outputs = collections.defaultdict(set)
for op in graph_ops:
op_inputs = get_inputs(op)[0]
if not op_inputs:
sources.append(op)
inputs[op] = set(op_inputs)
for i in op_inputs:
outputs[i].add(op)
while sources:
op = sources.pop()
for op_output in outputs[op]:
inputs[op_output].remove(op)
if not inputs[op_output]:
sources.append(op_output)
result.append(op)
# Check correctness.
if len(result) != len(graph_ops):
raise ValueError(
f"Sort result has {len(result)} ops, "
f"source graph has {len(graph_ops)}."
)
sort_check_seen = set()
for op in result:
sort_check_seen.add(op)
for i in get_inputs(op)[0]:
assert i in sort_check_seen
return result
def identify_redundant_ops(graph):
"""Implements basic common subexpression elimination.
This is not intended to replicate the graph semantics of TensorFlow Graphs
(for instance it does not handle stateful op ordering), nor is it intended
to replace the common subexpression elimination Grappler pass. Rather, it
provides a high level sanity check that clearly redundant ops are not being
created.
Args:
graph: The graph to be analyzed.
Returns:
A count of the duplicate ops and a description of the structure of each.
"""
sorted_ops = topological_sort(graph)
duplicates = collections.defaultdict(list)
unified_node_defs = {}
name_map = {}
for op in sorted_ops:
input_names = []
for op_input, name in zip(*get_inputs(op)):
input_def = op_input.node_def
# Operations can have multiple outputs. We track which is used to
# prevent overzealous elimination.
input_def.name = name
input_def.input[:] = [name_map.get(i, i) for i in input_def.input]
strip_name(input_def)
# NodeDef.SerializeToString() does not provide identical serialized
# representations for identical NodeDefs, so we instead use string
# representation as a dict key.
key = repr(input_def)
if key in unified_node_defs:
input_names.append(unified_node_defs[key])
else:
unified_node_defs[key] = op_input.name
input_names.append(name)
node_def = op.node_def
node_def.input[:] = input_names
strip_name(node_def)
key = repr(node_def)
duplicates[key].append(op)
name_map[op.name] = duplicates[key][0].name
num_duplicates = 0
duplicate_types = []
for standard_def, op_defs in duplicates.items():
# We are only interested in testing the apply method of the optimizer
op_defs = [i for i in op_defs if APPLY_SCOPE in i.name]
# We only check for per-apply redundant ops.
if len(op_defs) < _NUM_LEARNERS:
continue
# Certain ops are simply not worth eliminating, and are instead simply
# ignored.
name, op_type = op_defs[0].name, op_defs[0].type
if any(
allowlisted_scope in name and op_type == allowlisted_type
for allowlisted_scope, allowlisted_type in ALLOWLIST
):
continue
num_duplicates += len(op_defs)
traceback = []
for level in op_defs[0].traceback:
traceback.append(f" {level[0]} {level[2]}:{level[1]}")
duplicate_types.append(
"# Example name: {}\n# Op creation stack:\n{}\n{}".format(
op_defs[0].name, "\n".join(traceback), standard_def
)
)
return num_duplicates, duplicate_types
def make_model():
r"""Constructs a simple ensemble of weak learners model.
--------- --------- --------- ---------
| Input | | Input | ... | Input | | Input |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Embed | | Embed | ... | Embed | | Embed |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Dense | | Dense | ... | Dense | | Dense |
--------- --------- --------- ---------
\ | | /
\ | | /
---------------------------------------------
|
---------
| Dense |
---------
This topology is chosen because it exercises both dense and sparse update
paths.
Returns:
A model for testing optimizer coefficient reuse.
"""
inputs = []
intermediates = []
for _ in range(_NUM_LEARNERS):
inp = keras.layers.Input(shape=(1,), dtype=tf.int32)
layer = keras.layers.Embedding(1, 4)(inp)
layer = keras.layers.Dense(1)(layer)
inputs.append(inp)
intermediates.append(layer)
layer = keras.layers.Concatenate(axis=-1)(intermediates)
layer = keras.layers.Dense(1)(layer)
return keras.models.Model(inputs, layer)
COEFFICIENT_PARAMS = (
("Adadelta", adadelta.Adadelta, None),
("Adagrad", adagrad.Adagrad, None),
("Adam", adam.Adam, None),
("Adam_amdgrad", adam.Adam, dict(amsgrad=True)),
("Adamax", adamax.Adamax, None),
("Ftrl", ftrl.Ftrl, None),
(
"Ftrl_l2_shrinkage",
ftrl.Ftrl,
dict(l2_shrinkage_regularization_strength=0.1),
),
("SGD", gradient_descent.SGD, None),
("SGD_momentum", gradient_descent.SGD, dict(momentum=0.5)),
("Nadam", nadam.Nadam, None),
("RMSprop", rmsprop.RMSprop, None),
("RMSprop_centered", rmsprop.RMSprop, dict(centered=True)),
("RMSprop_momentum", rmsprop.RMSprop, dict(momentum=0.5)),
(
"RMSprop_momentum_centered",
rmsprop.RMSprop,
dict(momentum=0.5, centered=True),
),
)
class OptimizerCoefficientTest(test_combinations.TestCase):
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_duplicate_ops(self, optimizer_class, init_kwargs=None):
init_kwargs = init_kwargs or {}
optimizer = optimizer_class(**init_kwargs)
graph = tf.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(
model.outputs[0], trainable_variables
)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
num_duplicates, duplicate_types = identify_redundant_ops(graph)
if num_duplicates:
# Avoid spamming logs.
if len(duplicate_types) > 3:
duplicate_types = duplicate_types[:3] + ["..."]
num_total = len(graph.get_operations())
raise ValueError(
"{} of {} ({:.1f}%) ops were duplicates:\n\n{}".format(
num_duplicates,
num_total,
num_duplicates / num_total * 100,
"\n".join(duplicate_types),
)
)
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_subclass_compat(self, optimizer_class, init_kwargs=None):
"""Ensure that subclassed optimizers without apply_state still work."""
class SubclassedOptimizer(optimizer_class):
def _resource_apply_dense(self, grad, var):
return super()._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
return super()._resource_apply_sparse(grad, var, indices)
init_kwargs = init_kwargs or {}
optimizer = SubclassedOptimizer(**init_kwargs)
graph = tf.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(
model.outputs[0], trainable_variables
)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
class DeepcopyTests(tf.test.TestCase):
def setUp(self):
self.optimizer = adam.Adam(0.42)
super().setUp()
def test_deepcopy(self):
clone = deepcopy(self.optimizer)
assert clone.get_config()["learning_rate"] == 0.42, "wrong lr"
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/legacy/optimizer_v2_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/optimizer_v2_test.py",
"repo_id": "tf-keras",
"token_count": 29114
} | 189 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate schedule functions."""
import abc
import math
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.saving import serialization_lib
from tf_keras.saving.legacy import serialization as legacy_serialization
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.optimizers.schedules.LearningRateSchedule")
class LearningRateSchedule:
"""The learning rate schedule base class.
You can use a learning rate schedule to modulate how the learning rate
of your optimizer changes over time.
Several built-in learning rate schedules are available, such as
`tf.keras.optimizers.schedules.ExponentialDecay` or
`tf.keras.optimizers.schedules.PiecewiseConstantDecay`:
```python
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-2,
decay_steps=10000,
decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
```
A `LearningRateSchedule` instance can be passed in as the `learning_rate`
argument of any optimizer.
To implement your own schedule object, you should implement the `__call__`
method, which takes a `step` argument (scalar integer tensor, the
current training step count).
Like for any other TF-Keras object, you can also optionally
make your object serializable by implementing the `get_config`
and `from_config` methods.
Example:
```python
class MyLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate):
self.initial_learning_rate = initial_learning_rate
def __call__(self, step):
return self.initial_learning_rate / (step + 1)
optimizer = tf.keras.optimizers.SGD(learning_rate=MyLRSchedule(0.1))
```
"""
@abc.abstractmethod
def __call__(self, step):
raise NotImplementedError(
f"Learning rate schedule '{self.__class__.__name__}' "
"must override `__call__(self, step)`."
)
@abc.abstractmethod
def get_config(self):
raise NotImplementedError(
f"Learning rate schedule '{self.__class__.__name__}' "
"must override `get_config()` in order to be serializable."
)
@classmethod
def from_config(cls, config):
"""Instantiates a `LearningRateSchedule` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `LearningRateSchedule` instance.
"""
return cls(**config)
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a TF-Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None,
):
"""Applies exponential decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "ExponentialDecay") as name:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
decay_steps = tf.cast(self.decay_steps, dtype)
decay_rate = tf.cast(self.decay_rate, dtype)
global_step_recomp = tf.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = tf.floor(p)
return tf.multiply(
initial_learning_rate, tf.pow(decay_rate, p), name=name
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
class PiecewiseConstantDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a piecewise constant decay schedule.
The function returns a 1-arg callable to compute the piecewise constant
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as the boundary tensors.
The output of the 1-arg function that takes the `step`
is `values[0]` when `step <= boundaries[0]`,
`values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ...,
and values[-1] when `step > boundaries[-1]`.
"""
def __init__(self, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
Args:
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as
the optimizer step.
values: A list of `Tensor`s or `float`s or `int`s that specifies the
values for the intervals defined by `boundaries`. It should have one
more element than `boundaries`, and all elements should have the
same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Raises:
ValueError: if the number of elements in the lists do not match.
"""
super().__init__()
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of "
f"values. Received: boundaries={boundaries} of length "
f"{len(boundaries)}, and values={values} "
f"of length {len(values)}."
)
self.boundaries = boundaries
self.values = values
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "PiecewiseConstant"):
boundaries = tf.nest.map_structure(
tf.convert_to_tensor, tf.nest.flatten(self.boundaries)
)
values = tf.nest.map_structure(
tf.convert_to_tensor, tf.nest.flatten(self.values)
)
x_recomp = tf.convert_to_tensor(step)
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We cast the boundaries to have the same type as the step
b = tf.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
pred_fn_pairs = []
pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
pred_fn_pairs.append(
(x_recomp > boundaries[-1], lambda: values[-1])
)
for low, high, v in zip(
boundaries[:-1], boundaries[1:], values[1:-1]
):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x_recomp > low) & (x_recomp <= high)
pred_fn_pairs.append((pred, lambda v=v: v))
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return tf.case(pred_fn_pairs, default, exclusive=True)
def get_config(self):
return {
"boundaries": self.boundaries,
"values": self.values,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.PolynomialDecay")
class PolynomialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a polynomial decay schedule.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This schedule applies a polynomial decay function to an optimizer step,
given a provided `initial_learning_rate`, to reach an `end_learning_rate`
in the given `decay_steps`.
It requires a `step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training
step.
The schedule is a 1-arg callable that produces a decayed learning rate
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `step`.
```python
def decayed_learning_rate(step):
decay_steps = decay_steps * ceil(step / decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using
sqrt (i.e. power=0.5):
```python
...
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None,
):
"""Applies a polynomial decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to `1.0`.
cycle: A boolean, whether it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "PolynomialDecay") as name:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
end_learning_rate = tf.cast(self.end_learning_rate, dtype)
power = tf.cast(self.power, dtype)
global_step_recomp = tf.cast(step, dtype)
decay_steps_recomp = tf.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = tf.where(
tf.equal(global_step_recomp, 0),
1.0,
tf.math.ceil(global_step_recomp / self.decay_steps),
)
decay_steps_recomp = tf.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than
# decay_steps.
global_step_recomp = tf.minimum(
global_step_recomp, decay_steps_recomp
)
p = tf.divide(global_step_recomp, decay_steps_recomp)
return tf.add(
tf.multiply(
initial_learning_rate - end_learning_rate,
tf.pow(1 - p, power),
),
end_learning_rate,
name=name,
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"end_learning_rate": self.end_learning_rate,
"power": self.power,
"cycle": self.cycle,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.InverseTimeDecay")
class InverseTimeDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an inverse time decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies the inverse decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * step / decay_step)
```
or, if `staircase` is `True`, as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a TF-Keras model when decaying 1/t with a rate of 0.5:
```python
...
initial_learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate, decay_steps, decay_rate)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None,
):
"""Applies inverse time decay to the initial learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed
to continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "InverseTimeDecay") as name:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
decay_steps = tf.cast(self.decay_steps, dtype)
decay_rate = tf.cast(self.decay_rate, dtype)
global_step_recomp = tf.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = tf.floor(p)
const = tf.cast(tf.constant(1), dtype)
denom = tf.add(const, tf.multiply(decay_rate, p))
return tf.divide(initial_learning_rate, denom, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name,
}
@keras_export(
"keras.optimizers.schedules.CosineDecay", "keras.experimental.CosineDecay"
)
class CosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay with optional warmup.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
For the idea of a linear warmup of our learning rate,
see [Goyal et al.](https://arxiv.org/pdf/1706.02677.pdf).
When we begin training a model, we often want an initial increase in our
learning rate followed by a decay. If `warmup_target` is an int, this
schedule applies a linear increase per optimizer step to our learning rate
from `initial_learning_rate` to `warmup_target` for a duration of
`warmup_steps`. Afterwards, it applies a cosine decay function taking our
learning rate from `warmup_target` to `alpha` for a duration of
`decay_steps`. If `warmup_target` is None we skip warmup and our decay
will take our learning rate from `initial_learning_rate` to
`alpha x initial_learning_rate`. It requires a `step` value to compute
the learning rate. You can just pass a TensorFlow variable that you
increment at each training step.
The schedule is a 1-arg callable that produces a warmup followed by a
decayed learning rate when passed the current optimizer step. This can be
useful for changing the learning rate value across different invocations of
optimizer functions.
Our warmup is computed as:
```python
def warmup_learning_rate(step):
completed_fraction = step / warmup_steps
total_delta = target_warmup - initial_learning_rate
return completed_fraction * total_delta + initial_learning_rate
```
And our decay is computed as:
```python
if warmup_target is None:
initial_decay_lr = initial_learning_rate
else:
initial_decay_lr = warmup_target
def decayed_learning_rate(step):
step = min(step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return initial_decay_lr * decayed
```
Example usage without warmup:
```python
decay_steps = 1000
initial_learning_rate = 0.1
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate, decay_steps)
```
Example usage with warmup:
```python
decay_steps = 1000
initial_learning_rate = 0
warmup_steps = 1000
target_learning_rate = 0.1
lr_warmup_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate, decay_steps, warmup_target=target_learning_rate,
warmup_steps=warmup_steps
)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
alpha=0.0,
name=None,
warmup_target=None,
warmup_steps=0,
):
"""Applies cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python int. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python int.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` `Tensor` or a Python int.
Minimum learning rate value for decay as a fraction of
`initial_learning_rate`.
name: String. Optional name of the operation. Defaults to
'CosineDecay'.
warmup_target: None or a scalar `float32` or `float64` `Tensor` or a
Python int. The target learning rate for our warmup phase. Will cast
to the `initial_learning_rate` datatype. Setting to None will skip
warmup and begins decay phase from `initial_learning_rate`.
Otherwise scheduler will warmup from `initial_learning_rate` to
`warmup_target`.
warmup_steps: A scalar `int32` or `int64` `Tensor` or a Python int.
Number of steps to warmup over.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
self.warmup_steps = warmup_steps
self.warmup_target = warmup_target
def _decay_function(self, step, decay_steps, decay_from_lr, dtype):
with tf.name_scope(self.name or "CosineDecay"):
completed_fraction = step / decay_steps
tf_pi = tf.constant(math.pi, dtype=dtype)
cosine_decayed = 0.5 * (1.0 + tf.cos(tf_pi * completed_fraction))
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return tf.multiply(decay_from_lr, decayed)
def _warmup_function(
self, step, warmup_steps, warmup_target, initial_learning_rate
):
with tf.name_scope(self.name or "CosineDecay"):
completed_fraction = step / warmup_steps
total_step_delta = warmup_target - initial_learning_rate
return total_step_delta * completed_fraction + initial_learning_rate
def __call__(self, step):
with tf.name_scope(self.name or "CosineDecay"):
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
decay_steps = tf.cast(self.decay_steps, dtype)
global_step_recomp = tf.cast(step, dtype)
if self.warmup_target is None:
global_step_recomp = tf.minimum(global_step_recomp, decay_steps)
return self._decay_function(
global_step_recomp,
decay_steps,
initial_learning_rate,
dtype,
)
warmup_target = tf.cast(self.warmup_target, dtype)
warmup_steps = tf.cast(self.warmup_steps, dtype)
global_step_recomp = tf.minimum(
global_step_recomp, decay_steps + warmup_steps
)
return tf.cond(
global_step_recomp < warmup_steps,
lambda: self._warmup_function(
global_step_recomp,
warmup_steps,
warmup_target,
initial_learning_rate,
),
lambda: self._decay_function(
global_step_recomp - warmup_steps,
decay_steps,
warmup_target,
dtype,
),
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"alpha": self.alpha,
"name": self.name,
"warmup_target": self.warmup_target,
"warmup_steps": self.warmup_steps,
}
@keras_export(
"keras.optimizers.schedules.CosineDecayRestarts",
"keras.experimental.CosineDecayRestarts",
)
class CosineDecayRestarts(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule with restarts.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times initial learning rate as the new learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = (
tf.keras.optimizers.schedules.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None,
):
"""Applies cosine decay with restarts to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period.
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the
initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "SGDRDecay") as name:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
first_decay_steps = tf.cast(self.first_decay_steps, dtype)
alpha = tf.cast(self.alpha, dtype)
t_mul = tf.cast(self._t_mul, dtype)
m_mul = tf.cast(self._m_mul, dtype)
global_step_recomp = tf.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = tf.floor(
tf.math.log(1.0 - completed_fraction * (1.0 - t_mul))
/ tf.math.log(t_mul)
)
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (
completed_fraction - sum_r
) / t_mul**i_restart
else:
i_restart = tf.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = tf.cond(
tf.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True),
)
m_fac = m_mul**i_restart
cosine_decayed = (
0.5
* m_fac
* (
1.0
+ tf.cos(
tf.constant(math.pi, dtype=dtype) * completed_fraction
)
)
)
decayed = (1 - alpha) * cosine_decayed + alpha
return tf.multiply(initial_learning_rate, decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"first_decay_steps": self.first_decay_steps,
"t_mul": self._t_mul,
"m_mul": self._m_mul,
"alpha": self.alpha,
"name": self.name,
}
# Note: this code is still used by V1 APIs.
class LinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.LinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None,
):
"""Applies linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "LinearCosineDecay") as name:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
decay_steps = tf.cast(self.decay_steps, dtype)
num_periods = tf.cast(self.num_periods, dtype)
alpha = tf.cast(self.alpha, dtype)
beta = tf.cast(self.beta, dtype)
global_step_recomp = tf.cast(step, dtype)
global_step_recomp = tf.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + tf.cos(tf.constant(math.pi, dtype=dtype) * fraction)
)
linear_cosine_decayed = (
alpha + linear_decayed
) * cosine_decayed + beta
return tf.multiply(
initial_learning_rate, linear_cosine_decayed, name=name
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name,
}
# Note: this code is still used by V1 APIs.
class NoisyLinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a noisy linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a noisy linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule is a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
return initial_learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
seed=None,
name=None,
):
"""Applies noisy linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation
above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
seed: Integer, optional random seed to enable deterministic behavior.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.initial_variance = initial_variance
self.variance_decay = variance_decay
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.seed = seed
self.name = name
self._random_generator = backend.RandomGenerator(seed)
def __call__(self, step):
with tf.name_scope(self.name or "NoisyLinearCosineDecay") as name:
initial_learning_rate = tf.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate"
)
dtype = initial_learning_rate.dtype
decay_steps = tf.cast(self.decay_steps, dtype)
initial_variance = tf.cast(self.initial_variance, dtype)
variance_decay = tf.cast(self.variance_decay, dtype)
num_periods = tf.cast(self.num_periods, dtype)
alpha = tf.cast(self.alpha, dtype)
beta = tf.cast(self.beta, dtype)
global_step_recomp = tf.cast(step, dtype)
global_step_recomp = tf.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
variance = initial_variance / (
tf.pow(1.0 + global_step_recomp, variance_decay)
)
std = tf.sqrt(variance)
noisy_linear_decayed = (
linear_decayed
+ self._random_generator.random_normal(
linear_decayed.shape, stddev=std
)
)
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + tf.cos(tf.constant(math.pi, dtype=dtype) * fraction)
)
noisy_linear_cosine_decayed = (
alpha + noisy_linear_decayed
) * cosine_decayed + beta
return tf.multiply(
initial_learning_rate, noisy_linear_cosine_decayed, name=name
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"initial_variance": self.initial_variance,
"variance_decay": self.variance_decay,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"seed": self.seed,
"name": self.name,
}
@keras_export("keras.optimizers.schedules.serialize")
def serialize(learning_rate_schedule, use_legacy_format=False):
"""Serializes a `LearningRateSchedule` into a JSON-compatible dict.
Args:
learning_rate_schedule: The `LearningRateSchedule` object to serialize.
Returns:
A JSON-serializable dict representing the object's config.
Example:
>>> lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
... 0.1, decay_steps=100000, decay_rate=0.96, staircase=True)
>>> tf.keras.optimizers.schedules.serialize(lr_schedule)
{'module': 'keras.optimizers.schedules',
'class_name': 'ExponentialDecay', 'config': {...},
'registered_name': None}
"""
if use_legacy_format:
return legacy_serialization.serialize_keras_object(
learning_rate_schedule
)
return serialization_lib.serialize_keras_object(learning_rate_schedule)
@keras_export("keras.optimizers.schedules.deserialize")
def deserialize(config, custom_objects=None, use_legacy_format=False):
"""Instantiates a `LearningRateSchedule` object from a serialized form.
Args:
config: The serialized form of the `LearningRateSchedule`.
Dictionary of the form {'class_name': str, 'config': dict}.
custom_objects: A dictionary mapping class names (or function names) of
custom (non-Keras) objects to class/functions.
Returns:
A `LearningRateSchedule` object.
Example:
```python
# Configuration for PolynomialDecay
config = {
'class_name': 'PolynomialDecay',
'config': {'cycle': False,
'decay_steps': 10000,
'end_learning_rate': 0.01,
'initial_learning_rate': 0.1,
'name': None,
'power': 0.5}}
lr_schedule = tf.keras.optimizers.schedules.deserialize(config)
```
"""
if use_legacy_format:
return legacy_serialization.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="decay",
)
return serialization_lib.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="decay",
)
| tf-keras/tf_keras/optimizers/schedules/learning_rate_schedule.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/schedules/learning_rate_schedule.py",
"repo_id": "tf-keras",
"token_count": 20387
} | 190 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for text input preprocessing.
Deprecated: `tf.keras.preprocessing.text` APIs are not recommended for new code.
Prefer `tf.keras.utils.text_dataset_from_directory` and
`tf.keras.layers.TextVectorization` which provide a more efficient approach
for preprocessing text input. For an introduction to these APIs, see
the [text loading tutorial]
(https://www.tensorflow.org/tutorials/load_data/text)
and [preprocessing layer guide]
(https://www.tensorflow.org/guide/keras/preprocessing_layers).
"""
import collections
import hashlib
import json
import warnings
import numpy as np
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.preprocessing.text.text_to_word_sequence")
def text_to_word_sequence(
input_text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
):
r"""Converts a text to a sequence of words (or tokens).
Deprecated: `tf.keras.preprocessing.text.text_to_word_sequence` does not
operate on tensors and is not recommended for new code. Prefer
`tf.strings.regex_replace` and `tf.strings.split` which provide equivalent
functionality and accept `tf.Tensor` input. For an overview of text handling
in Tensorflow, see the [text loading tutorial]
(https://www.tensorflow.org/tutorials/load_data/text).
This function transforms a string of text into a list of words
while ignoring `filters` which include punctuations by default.
>>> sample_text = 'This is a sample sentence.'
>>> tf.keras.preprocessing.text.text_to_word_sequence(sample_text)
['this', 'is', 'a', 'sample', 'sentence']
Args:
input_text: Input text (string).
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n'``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to convert the input to lowercase.
split: str. Separator for word splitting.
Returns:
A list of words (or tokens).
"""
if lower:
input_text = input_text.lower()
translate_dict = {c: split for c in filters}
translate_map = str.maketrans(translate_dict)
input_text = input_text.translate(translate_map)
seq = input_text.split(split)
return [i for i in seq if i]
@keras_export("keras.preprocessing.text.one_hot")
def one_hot(
input_text,
n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
analyzer=None,
):
r"""One-hot encodes a text into a list of word indexes of size `n`.
Deprecated: `tf.keras.text.preprocessing.one_hot` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.Hashing` with `output_mode='one_hot'` which provides
equivalent functionality through a layer which accepts `tf.Tensor` input.
See the [preprocessing layer guide]
(https://www.tensorflow.org/guide/keras/preprocessing_layers) for an
overview of preprocessing layers.
This function receives as input a string of text and returns a
list of encoded integers each corresponding to a word (or token)
in the given input string.
Args:
input_text: Input text (string).
n: int. Size of vocabulary.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default:
```
'!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n
```,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
analyzer: function. Custom analyzer to split the text
Returns:
List of integers in `[1, n]`. Each integer encodes a word
(unicity non-guaranteed).
"""
return hashing_trick(
input_text,
n,
hash_function=hash,
filters=filters,
lower=lower,
split=split,
analyzer=analyzer,
)
@keras_export("keras.preprocessing.text.hashing_trick")
def hashing_trick(
text,
n,
hash_function=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
analyzer=None,
):
r"""Converts a text to a sequence of indexes in a fixed-size hashing space.
Deprecated: `tf.keras.text.preprocessing.hashing_trick` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.Hashing` which provides equivalent functionality through a
layer which accepts `tf.Tensor` input. See the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers) for an
overview of preprocessing layers.
Args:
text: Input text (string).
n: Dimension of the hashing space.
hash_function: When `None` uses a python `hash` function. Can be 'md5'
or any function that takes in input a string and returns a int.
Note that 'hash' is not a stable hashing function, so
it is not consistent across different runs, while 'md5'
is a stable hashing function. Defaults to `None`.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: ``!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\\t\\n``,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
analyzer: function. Custom analyzer to split the text
Returns:
A list of integer word indices (unicity non-guaranteed).
`0` is a reserved index that won't be assigned to any word.
Two or more words may be assigned to the same index, due to possible
collisions by the hashing function.
The [probability](
https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)
of a collision is in relation to the dimension of the hashing space and
the number of distinct objects.
"""
if hash_function is None:
hash_function = hash
elif hash_function == "md5":
hash_function = lambda w: int(hashlib.md5(w.encode()).hexdigest(), 16)
if analyzer is None:
seq = text_to_word_sequence(
text, filters=filters, lower=lower, split=split
)
else:
seq = analyzer(text)
return [(hash_function(w) % (n - 1) + 1) for w in seq]
@keras_export("keras.preprocessing.text.Tokenizer")
class Tokenizer(object):
"""Text tokenization utility class.
Deprecated: `tf.keras.preprocessing.text.Tokenizer` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.TextVectorization` which provides equivalent functionality
through a layer which accepts `tf.Tensor` input. See the
[text loading tutorial](https://www.tensorflow.org/tutorials/load_data/text)
for an overview of the layer and text handling in tensorflow.
This class allows to vectorize a text corpus, by turning each
text into either a sequence of integers (each integer being the index
of a token in a dictionary) or into a vector where the coefficient
for each token could be binary, based on word count, based on tf-idf...
By default, all punctuation is removed, turning the texts into
space-separated sequences of words
(words may include the `'` character). These sequences are then
split into lists of tokens. They will then be indexed or vectorized.
`0` is a reserved index that won't be assigned to any word.
Args:
num_words: the maximum number of words to keep, based
on word frequency. Only the most common `num_words-1` words will
be kept.
filters: a string where each element is a character that will be
filtered from the texts. The default is all punctuation, plus
tabs and line breaks, minus the `'` character.
lower: boolean. Whether to convert the texts to lowercase.
split: str. Separator for word splitting.
char_level: if True, every character will be treated as a token.
oov_token: if given, it will be added to word_index and used to
replace out-of-vocabulary words during text_to_sequence calls
analyzer: function. Custom analyzer to split the text.
The default analyzer is text_to_word_sequence
"""
def __init__(
self,
num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
char_level=False,
oov_token=None,
analyzer=None,
**kwargs
):
# Legacy support
if "nb_words" in kwargs:
warnings.warn(
"The `nb_words` argument in `Tokenizer` "
"has been renamed `num_words`."
)
num_words = kwargs.pop("nb_words")
document_count = kwargs.pop("document_count", 0)
if kwargs:
raise TypeError("Unrecognized keyword arguments: " + str(kwargs))
self.word_counts = collections.OrderedDict()
self.word_docs = collections.defaultdict(int)
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = document_count
self.char_level = char_level
self.oov_token = oov_token
self.index_docs = collections.defaultdict(int)
self.word_index = {}
self.index_word = {}
self.analyzer = analyzer
def fit_on_texts(self, texts):
"""Updates internal vocabulary based on a list of texts.
In the case where texts contains lists,
we assume each entry of the lists to be a token.
Required before using `texts_to_sequences` or `texts_to_matrix`.
Args:
texts: can be a list of strings,
a generator of strings (for memory-efficiency),
or a list of list of strings.
"""
for text in texts:
self.document_count += 1
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(
text,
filters=self.filters,
lower=self.lower,
split=self.split,
)
else:
seq = self.analyzer(text)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
# In how many documents each word occurs
self.word_docs[w] += 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
# forcing the oov_token to index 1 if it exists
if self.oov_token is None:
sorted_voc = []
else:
sorted_voc = [self.oov_token]
sorted_voc.extend(wc[0] for wc in wcounts)
# note that index 0 is reserved, never assigned to an existing word
self.word_index = dict(
zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))
)
self.index_word = {c: w for w, c in self.word_index.items()}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def fit_on_sequences(self, sequences):
"""Updates internal vocabulary based on a list of sequences.
Required before using `sequences_to_matrix`
(if `fit_on_texts` was never called).
Args:
sequences: A list of sequence.
A "sequence" is a list of integer word indices.
"""
self.document_count += len(sequences)
for seq in sequences:
seq = set(seq)
for i in seq:
self.index_docs[i] += 1
def texts_to_sequences(self, texts):
"""Transforms each text in texts to a sequence of integers.
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Args:
texts: A list of texts (strings).
Returns:
A list of sequences.
"""
return list(self.texts_to_sequences_generator(texts))
def texts_to_sequences_generator(self, texts):
"""Transforms each text in `texts` to a sequence of integers.
Each item in texts can also be a list,
in which case we assume each item of that list to be a token.
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Args:
texts: A list of texts (strings).
Yields:
Yields individual sequences.
"""
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for text in texts:
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(
text,
filters=self.filters,
lower=self.lower,
split=self.split,
)
else:
seq = self.analyzer(text)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if num_words and i >= num_words:
if oov_token_index is not None:
vect.append(oov_token_index)
else:
vect.append(i)
elif self.oov_token is not None:
vect.append(oov_token_index)
yield vect
def sequences_to_texts(self, sequences):
"""Transforms each sequence into a list of text.
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Args:
sequences: A list of sequences (list of integers).
Returns:
A list of texts (strings)
"""
return list(self.sequences_to_texts_generator(sequences))
def sequences_to_texts_generator(self, sequences):
"""Transforms each sequence in `sequences` to a list of texts(strings).
Each sequence has to a list of integers.
In other words, sequences should be a list of sequences
Only top `num_words-1` most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Args:
sequences: A list of sequences.
Yields:
Yields individual texts.
"""
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for seq in sequences:
vect = []
for num in seq:
word = self.index_word.get(num)
if word is not None:
if num_words and num >= num_words:
if oov_token_index is not None:
vect.append(self.index_word[oov_token_index])
else:
vect.append(word)
elif self.oov_token is not None:
vect.append(self.index_word[oov_token_index])
vect = " ".join(vect)
yield vect
def texts_to_matrix(self, texts, mode="binary"):
"""Convert a list of texts to a Numpy matrix.
Args:
texts: list of strings.
mode: one of "binary", "count", "tfidf", "freq".
Returns:
A Numpy matrix.
"""
sequences = self.texts_to_sequences(texts)
return self.sequences_to_matrix(sequences, mode=mode)
def sequences_to_matrix(self, sequences, mode="binary"):
"""Converts a list of sequences into a Numpy matrix.
Args:
sequences: list of sequences
(a sequence is a list of integer word indices).
mode: one of "binary", "count", "tfidf", "freq"
Returns:
A Numpy matrix.
Raises:
ValueError: In case of invalid `mode` argument,
or if the Tokenizer requires to be fit to sample data.
"""
if not self.num_words:
if self.word_index:
num_words = len(self.word_index) + 1
else:
raise ValueError(
"Specify a dimension (`num_words` argument), "
"or fit on some text data first."
)
else:
num_words = self.num_words
if mode == "tfidf" and not self.document_count:
raise ValueError(
"Fit the Tokenizer on some data before using tfidf mode."
)
x = np.zeros((len(sequences), num_words))
for i, seq in enumerate(sequences):
if not seq:
continue
counts = collections.defaultdict(int)
for j in seq:
if j >= num_words:
continue
counts[j] += 1
for j, c in list(counts.items()):
if mode == "count":
x[i][j] = c
elif mode == "freq":
x[i][j] = c / len(seq)
elif mode == "binary":
x[i][j] = 1
elif mode == "tfidf":
# Use weighting scheme 2 in
# https://en.wikipedia.org/wiki/Tf%E2%80%93idf
tf = 1 + np.log(c)
idf = np.log(
1
+ self.document_count / (1 + self.index_docs.get(j, 0))
)
x[i][j] = tf * idf
else:
raise ValueError("Unknown vectorization mode:", mode)
return x
def get_config(self):
"""Returns the tokenizer configuration as Python dictionary.
The word count dictionaries used by the tokenizer get serialized
into plain JSON, so that the configuration can be read by other
projects.
Returns:
A Python dictionary with the tokenizer configuration.
"""
json_word_counts = json.dumps(self.word_counts)
json_word_docs = json.dumps(self.word_docs)
json_index_docs = json.dumps(self.index_docs)
json_word_index = json.dumps(self.word_index)
json_index_word = json.dumps(self.index_word)
return {
"num_words": self.num_words,
"filters": self.filters,
"lower": self.lower,
"split": self.split,
"char_level": self.char_level,
"oov_token": self.oov_token,
"document_count": self.document_count,
"word_counts": json_word_counts,
"word_docs": json_word_docs,
"index_docs": json_index_docs,
"index_word": json_index_word,
"word_index": json_word_index,
}
def to_json(self, **kwargs):
"""Returns a JSON string containing the tokenizer configuration.
To load a tokenizer from a JSON string, use
`keras.preprocessing.text.tokenizer_from_json(json_string)`.
Args:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string containing the tokenizer configuration.
"""
config = self.get_config()
tokenizer_config = {
"class_name": self.__class__.__name__,
"config": config,
}
return json.dumps(tokenizer_config, **kwargs)
@keras_export("keras.preprocessing.text.tokenizer_from_json")
def tokenizer_from_json(json_string):
"""Parses a JSON tokenizer configuration and returns a tokenizer instance.
Deprecated: `tf.keras.preprocessing.text.Tokenizer` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.TextVectorization` which provides equivalent functionality
through a layer which accepts `tf.Tensor` input. See the
[text loading tutorial](https://www.tensorflow.org/tutorials/load_data/text)
for an overview of the layer and text handling in tensorflow.
Args:
json_string: JSON string encoding a tokenizer configuration.
Returns:
A TF-Keras Tokenizer instance
"""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get("config")
word_counts = json.loads(config.pop("word_counts"))
word_docs = json.loads(config.pop("word_docs"))
index_docs = json.loads(config.pop("index_docs"))
# Integer indexing gets converted to strings with json.dumps()
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop("index_word"))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop("word_index"))
tokenizer = Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer
| tf-keras/tf_keras/preprocessing/text.py/0 | {
"file_path": "tf-keras/tf_keras/preprocessing/text.py",
"repo_id": "tf-keras",
"token_count": 10038
} | 191 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras model saving code."""
import collections
import os
import pathlib
import shutil
import tempfile
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import losses
from tf_keras import optimizers
from tf_keras.engine import functional
from tf_keras.engine import sequential
from tf_keras.feature_column import dense_features
from tf_keras.feature_column import sequence_feature_column as ksfc
from tf_keras.layers import core
from tf_keras.optimizers import optimizer_v1
from tf_keras.premade_models.linear import LinearModel
from tf_keras.saving import object_registration
from tf_keras.saving.legacy import model_config
from tf_keras.saving.legacy import save
from tf_keras.saving.legacy import serialization
from tf_keras.saving.legacy.saved_model import utils as saved_model_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
try:
import h5py
except ImportError:
h5py = None
class TestSaveModel(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self.model = test_utils.get_small_sequential_mlp(1, 2, 3)
self.subclassed_model = test_utils.get_small_subclass_mlp(1, 2)
def assert_h5_format(self, path):
if h5py is not None:
self.assertTrue(
h5py.is_hdf5(path),
f"Model saved at path {path} is not a valid hdf5 file.",
)
def assert_saved_model(self, path):
tf.__internal__.saved_model.parse_saved_model(path)
@test_utils.run_v2_only
def test_load_file_not_found(self):
path = pathlib.Path(self.get_temp_dir()) / "does_not_exist"
with self.assertRaisesRegex(IOError, "No file or directory found at"):
save.load_model(path)
@test_utils.run_v2_only
def test_save_format_defaults(self):
path = os.path.join(self.get_temp_dir(), "model_path")
save.save_model(self.model, path)
self.assert_saved_model(path)
@test_utils.run_v2_only
def test_save_format_defaults_pathlib(self):
path = pathlib.Path(self.get_temp_dir()) / "model_path"
save.save_model(self.model, path)
self.assert_saved_model(path)
@test_utils.run_v2_only
def test_save_hdf5(self):
path = os.path.join(self.get_temp_dir(), "model")
save.save_model(self.model, path, save_format="h5")
self.assert_h5_format(path)
with self.assertRaisesRegex(
NotImplementedError,
"requires the model to be a Functional model "
"or a Sequential model.",
):
save.save_model(self.subclassed_model, path, save_format="h5")
@test_utils.run_v2_only
def test_save_load_hdf5_pathlib(self):
path = pathlib.Path(self.get_temp_dir()) / "model"
save.save_model(self.model, path, save_format="h5")
save.load_model(path)
@test_utils.run_v2_only
def test_save_tf(self):
path = os.path.join(self.get_temp_dir(), "model")
save.save_model(self.model, path, save_format="tf")
self.assert_saved_model(path)
with self.assertRaisesRegex(
ValueError,
r"Model.*cannot be saved.*as opposed to `model.call\(\).*",
):
save.save_model(self.subclassed_model, path, save_format="tf")
self.subclassed_model.predict(np.random.random((3, 5)))
save.save_model(self.subclassed_model, path, save_format="tf")
self.assert_saved_model(path)
@test_utils.run_v2_only
def test_save_load_tf_string(self):
path = os.path.join(self.get_temp_dir(), "model")
save.save_model(self.model, path, save_format="tf")
save.load_model(path)
@test_utils.run_v2_only
def test_save_load_tf_pathlib(self):
path = pathlib.Path(self.get_temp_dir()) / "model"
save.save_model(self.model, path, save_format="tf")
save.load_model(path)
@test_utils.run_v2_only
def test_save_load_weights_tf_pathlib(self):
path = pathlib.Path(self.get_temp_dir()) / "model"
self.model.save_weights(path, save_format="tf")
self.model.load_weights(path)
@test_utils.run_v2_only
def test_save_load_weights_hdf5_pathlib(self):
path = pathlib.Path(self.get_temp_dir()) / "model"
self.model.save_weights(path, save_format="h5")
self.model.load_weights(path)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_saving_h5_for_rnn_layers(self):
# See https://github.com/tensorflow/tensorflow/issues/35731 for details.
inputs = keras.Input([10, 91], name="train_input")
rnn_layers = [
keras.layers.LSTMCell(
size, recurrent_dropout=0, name="rnn_cell%d" % i
)
for i, size in enumerate([512, 512])
]
rnn_output = keras.layers.RNN(
rnn_layers, return_sequences=True, name="rnn_layer"
)(inputs)
pred_feat = keras.layers.Dense(91, name="prediction_features")(
rnn_output
)
pred = keras.layers.Softmax()(pred_feat)
model = keras.Model(inputs=[inputs], outputs=[pred, pred_feat])
path = os.path.join(self.get_temp_dir(), "model_path.h5")
model.save(path)
# Make sure the variable name is unique.
self.assertNotEqual(
rnn_layers[0].kernel.name, rnn_layers[1].kernel.name
)
self.assertIn("rnn_cell1", rnn_layers[1].kernel.name)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_saving_optimizer_weights(self):
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.layer = keras.layers.Dense(1)
def call(self, x):
return self.layer(x)
path = os.path.join(self.get_temp_dir(), "weights_path")
x, y = np.ones((10, 10)), np.ones((10, 1))
model = MyModel()
model.compile("rmsprop", loss="bce")
model.train_on_batch(x, y)
model.reset_metrics()
model.save_weights(path, save_format="tf")
batch_loss = model.train_on_batch(x, y)
new_model = MyModel()
new_model.compile("rmsprop", loss="bce")
new_model.train_on_batch(x, y)
new_model.reset_metrics()
new_model.load_weights(path)
new_batch_loss = new_model.train_on_batch(x, y)
self.assertAllClose(batch_loss, new_batch_loss)
@test_combinations.generate(
test_combinations.combine(mode=["eager", "graph"])
)
def test_save_include_optimizer_false(self):
def get_variables(file_name):
reader = tf.train.load_checkpoint(
os.path.join(file_name, "variables/variables")
)
shape_from_key = reader.get_variable_to_shape_map()
return sorted(shape_from_key.keys())
path = os.path.join(self.get_temp_dir(), "no_optimizer")
x, y = np.ones((10, 10)), np.ones((10, 1))
model = keras.models.Sequential()
model.add(keras.layers.Dense(1))
model.compile("adam", loss="mse")
model.train_on_batch(x, y)
model.save(path, save_format="tf", include_optimizer=False)
variables = get_variables(path)
for v in variables:
self.assertNotIn("optimizer", v)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_saving_model_with_custom_object(self):
with object_registration.custom_object_scope(), self.cached_session():
@object_registration.register_keras_serializable()
class CustomLoss(losses.MeanSquaredError):
pass
model = sequential.Sequential(
[core.Dense(units=1, input_shape=(1,))]
)
model.compile(optimizer="sgd", loss=CustomLoss())
model.fit(np.zeros([10, 1]), np.zeros([10, 1]))
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, "saving")
model.save(filepath)
# Make sure the model can be correctly load back.
_ = save.load_model(filepath, compile=True)
def test_saving_model_with_name_conflict(self):
class Sequential(keras.Model):
def __init__(self):
super().__init__()
self.layer = keras.layers.Dense(1)
def call(self, x):
return self.layer(x)
model = Sequential()
model(tf.ones((10, 10)))
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, "Sequential")
with self.assertLogs() as logs:
model.save(filepath, save_format="tf")
expected_substring = (
"has the same name 'Sequential' as a built-in TF-Keras"
)
matched = [log for log in logs.output if expected_substring in log]
self.assertNotEmpty(matched)
def test_saving_built_in_model(self):
model = LinearModel()
model(tf.constant([[5.0]]))
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, "LinearModel")
with self.assertLogs() as logs:
model.save(filepath, save_format="tf")
expected_substring = (
"has the same name 'LinearModel' as a built-in TF-Keras"
)
matched = [log for log in logs.output if expected_substring in log]
# Check that a warning is *not* logged for a premade model.
self.assertEmpty(matched)
@object_registration.register_keras_serializable(package="Foo")
class RegisteredSubLayer(keras.layers.Layer):
pass
class TestJson(test_combinations.TestCase):
"""Tests to_json()/from_json()."""
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_saving_with_dense_features(self):
cols = [
tf.feature_column.numeric_column("a"),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
"b", ["one", "two"]
)
),
]
input_layers = {
"a": keras.layers.Input(shape=(1,), name="a"),
"b": keras.layers.Input(shape=(1,), name="b", dtype="string"),
}
fc_layer = dense_features.DenseFeatures(cols)(input_layers)
output = keras.layers.Dense(10)(fc_layer)
model = keras.models.Model(input_layers, output)
model.compile(
loss=keras.losses.MSE,
optimizer="rmsprop",
metrics=[keras.metrics.categorical_accuracy],
)
config = model.to_json()
loaded_model = model_config.model_from_json(config)
inputs_a = np.arange(10).reshape(10, 1)
inputs_b = np.arange(10).reshape(10, 1).astype("str")
with self.cached_session():
# Initialize tables for V1 lookup.
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.tables_initializer())
self.assertLen(
loaded_model.predict({"a": inputs_a, "b": inputs_b}), 10
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_saving_with_sequence_features(self):
cols = [
tf.feature_column.sequence_numeric_column("a"),
tf.feature_column.indicator_column(
tf.feature_column.sequence_categorical_column_with_vocabulary_list( # noqa: E501
"b", ["one", "two"]
)
),
]
input_layers = {
"a": keras.layers.Input(shape=(None, 1), sparse=True, name="a"),
"b": keras.layers.Input(
shape=(None, 1), sparse=True, name="b", dtype="string"
),
}
fc_layer, _ = ksfc.SequenceFeatures(cols)(input_layers)
# TODO(tibell): Figure out the right dtype and apply masking.
# sequence_length_mask = array_ops.sequence_mask(sequence_length)
# x = keras.layers.GRU(32)(fc_layer, mask=sequence_length_mask)
x = keras.layers.GRU(32)(fc_layer)
output = keras.layers.Dense(10)(x)
model = keras.models.Model(input_layers, output)
model.compile(
loss=keras.losses.MSE,
optimizer="rmsprop",
metrics=[keras.metrics.categorical_accuracy],
)
config = model.to_json()
loaded_model = model_config.model_from_json(config)
batch_size = 10
timesteps = 1
values_a = np.arange(10, dtype=np.float32)
indices_a = np.zeros((10, 3), dtype=np.int64)
indices_a[:, 0] = np.arange(10)
inputs_a = tf.SparseTensor(
indices_a, values_a, (batch_size, timesteps, 1)
)
values_b = np.zeros(10, dtype=str)
indices_b = np.zeros((10, 3), dtype=np.int64)
indices_b[:, 0] = np.arange(10)
inputs_b = tf.SparseTensor(
indices_b, values_b, (batch_size, timesteps, 1)
)
with self.cached_session():
# Initialize tables for V1 lookup.
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.tables_initializer())
self.assertLen(
loaded_model.predict({"a": inputs_a, "b": inputs_b}, steps=1),
batch_size,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_nested_layers(self):
class MyLayer(keras.layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def get_config(self):
config = super().get_config()
config["sublayers"] = self.sublayers
return config
layer = MyLayer(
[
keras.layers.Dense(2, name="MyDense"),
RegisteredSubLayer(name="MySubLayer"),
]
)
model = keras.Sequential([keras.Input([None]), layer])
model_json = model.to_json()
self.assertIn("Foo>RegisteredSubLayer", model_json)
loaded_model = model_config.model_from_json(
model_json, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(loaded_layer.sublayers[0], keras.layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
class MaskedTensor(tf.experimental.ExtensionType):
__name__ = "MaskedTensor_save_test"
values: tf.Tensor
mask: tf.Tensor
class Spec(tf.TypeSpec):
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
def with_shape(self, shape):
values_spec = tf.TensorSpec(
shape, dtype=self.values.dtype, name=self.values.name
)
mask_spec = tf.TensorSpec(
shape, dtype=self.mask.dtype, name=self.mask.name
)
return MaskedTensor.Spec(values_spec, mask_spec)
@test_combinations.run_with_all_saved_model_formats
class TestWholeModelSaving(test_combinations.TestCase):
def _save_model_dir(self, dirname="saved_model"):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def _assert_same_weights_and_metrics(self, model, loaded_model):
"""Checks that loaded weights & metrics are the same as the original.
Args:
model: original model
loaded_model: loaded model
"""
self.assertAllClose(model.weights, loaded_model.weights)
if loaded_model.optimizer:
if test_utils.get_save_format() == "tf":
# TODO(b/153110928): TF-Keras TF format doesn't restore
# optimizer weights currently.
return
if isinstance(
loaded_model.optimizer,
keras.optimizers.optimizer.Optimizer,
):
loaded_model.optimizer.build(loaded_model.trainable_variables)
self.assertAllClose(
model.optimizer.variables,
loaded_model.optimizer.variables,
)
else:
self.assertAllClose(
model.optimizer.weights, loaded_model.optimizer.weights
)
# In V1/Graph mode, the model isn't built, so the metrics are not loaded
# immediately (requires model to be called on some data before building
# metrics).
check_metrics = tf.__internal__.tf2.enabled() and tf.executing_eagerly()
if check_metrics:
self.assertAllEqual(
[m.name for m in model.metrics],
[m.name for m in loaded_model.metrics],
)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_save_and_load(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
save_kwargs = test_utils.get_save_kwargs()
if (
save_format == "h5" or not save_kwargs.get("save_traces", True)
) and test_utils.get_model_type() == "subclass":
# HDF5 format currently does not allow saving subclassed models.
# When saving with `save_traces=False`, the subclassed model must
# have a get_config/from_config, which the autogenerated model does
# not have.
return
with self.cached_session():
model = test_utils.get_model_from_layers(
[
keras.layers.Dense(2),
keras.layers.RepeatVector(3),
keras.layers.TimeDistributed(keras.layers.Dense(3)),
],
input_shape=(3,),
)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.legacy.rmsprop.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalCrossentropy(
name="cce", label_smoothing=tf.constant(0.2)
),
],
weighted_metrics=[
keras.metrics.categorical_crossentropy,
keras.metrics.CategoricalCrossentropy(
name="cce", label_smoothing=tf.constant(0.2)
),
],
sample_weight_mode="temporal",
)
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(
model, saved_model_dir, save_format=save_format, **save_kwargs
)
loaded_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, loaded_model)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
eval_out = model.evaluate(x, y)
eval_out2 = loaded_model.evaluate(x, y)
self.assertArrayNear(eval_out, eval_out2, 0.001)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_sequential_model_saving_without_input_shape(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer="rmsprop",
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name="cat_acc"),
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name="cat_acc2"),
],
sample_weight_mode="temporal",
)
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
model.save(saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_sequential_model_saving_without_compile(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
# Save the model without any compilation or training.
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with tf.Graph().as_default(), self.cached_session():
# test with custom optimizer, loss
class CustomOp(optimizer_v1.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(
loss=custom_loss, optimizer=CustomOp(), metrics=["acc"]
)
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
new_model = keras.models.load_model(
saved_model_dir,
custom_objects={
"CustomOp": CustomOp,
"custom_loss": custom_loss,
},
)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss="mse", optimizer="sgd", metrics=["acc"])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_with_tf_optimizer(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(
loss="mse",
optimizer=tf.compat.v1.train.AdadeltaOptimizer(0.1),
metrics=["acc"],
)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_right_after_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss="mse", optimizer="sgd", metrics=["acc"])
if not tf.compat.v1.executing_eagerly_outside_functions():
model._make_train_function()
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
model = keras.models.load_model(saved_model_dir)
def test_saving_lambda_numpy_array_arguments(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
if h5py is None:
self.skipTest("h5py required to run this test")
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(
lambda image, mu, std: (image - mu) / std,
arguments={"mu": mean, "std": std},
)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss="mse", optimizer="sgd", metrics=["acc"])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
self.assertAllClose(mean, model.layers[1].arguments["mu"])
self.assertAllClose(std, model.layers[1].arguments["std"])
def test_saving_model_with_long_layer_names(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with self.cached_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amount of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name="input_" + ("x" * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name="dense_%d" % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(
"adam", loss=keras.losses.MeanSquaredError(), metrics=["acc"]
)
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
model = keras.models.load_model(saved_model_dir)
if save_format in ["tf", "tensorflow"]:
return
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(saved_model_dir, "r") as h5file:
num_names_arrays = len(
[
attr
for attr in h5file["model_weights"].attrs
if attr.startswith("layer_names")
]
)
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_model_with_long_weights_names(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with self.cached_session():
x = keras.Input(shape=(2,), name="nested_model_input")
f = x
for i in range(4):
f = keras.layers.Dense(2, name="nested_model_dense_%d" % (i,))(
f
)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(
2, name="nested_model_output" + ("x" * (2**14))
)(f)
nested_model = keras.Model(
inputs=[x], outputs=[f], name="nested_model"
)
x = keras.Input(shape=(2,), name="outer_model_input")
f = nested_model(x)
f = keras.layers.Dense(2, name="outer_model_output")(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss="mse", optimizer="adam", metrics=["acc"])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
model = keras.models.load_model(saved_model_dir)
if save_format in ["h5", "hdf5", "keras"]:
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(saved_model_dir, "r") as h5file:
num_weight_arrays = len(
[
attr
for attr in h5file["model_weights"][
"nested_model"
].attrs
if attr.startswith("weight_names")
]
)
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_model_saving_to_pre_created_h5py_file(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with tf.Graph().as_default(), self.cached_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(
loss=keras.losses.MSE,
optimizer=optimizer_v1.Adam(),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(),
],
)
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
loaded_model = keras.models.load_model(saved_model_dir)
out1 = loaded_model.predict(x)
self.assertAllClose(out, out1, atol=1e-05)
if save_format in ["tf", "tensorflow"]:
return
# Test h5 format specifically
fd, fname = tempfile.mkstemp(".h5")
with h5py.File(fname, mode="r+") as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File(
"_", driver="core", mode="w", backing_store=False
) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_model_saving_to_new_dir_path(self):
saved_model_dir = os.path.join(
self._save_model_dir(), "newdir", "saved_model"
)
save_format = test_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_model_raise_exception_with_failed_saving(self):
if h5py is None:
self.skipTest("h5py required to run this test")
saved_model_dir = self._save_model_dir()
saved_model_path = os.path.join(saved_model_dir, "saved_model.h5")
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
with self.assertRaisesRegex(OSError, "Unable to.* create file"):
with h5py.File(saved_model_path, "w"):
keras.models.save_model(model, saved_model_path)
def test_saving_constant_initializer_with_numpy(self):
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
2,
input_shape=(3,),
kernel_initializer=keras.initializers.Constant(np.ones((3, 2))),
)
)
model.add(keras.layers.Dense(3))
model.compile(loss="mse", optimizer="sgd", metrics=["acc"])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_group_naming_h5py(self):
# Test saving model with layer which name is prefix to a previous layer
# name.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, "test.h5")
input_layer = keras.layers.Input((None, None, 3), name="test_input")
x = keras.layers.Conv2D(1, 1, name="conv1/conv")(input_layer)
x = keras.layers.Activation("relu", name="conv1")(x)
model = keras.models.Model(inputs=input_layer, outputs=x)
model.save_weights(h5_path)
model.load_weights(h5_path)
def test_primitive_attrs_contain_no_extraneous_strings(self):
if h5py is None:
self.skipTest("h5py required to run this test")
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=[2]))
model.save(saved_model_dir, save_format=save_format)
if save_format in ["tf", "tensorflow"]:
return
h5file = h5py.File(saved_model_dir, "r")
self.assertRegex(
h5file.attrs["keras_version"], r"^[\d]+\.[\d]+\.[\S]+$"
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_functional_model_with_custom_loss_and_metric(self):
def _make_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(8, activation="relu")(inputs)
outputs = keras.layers.Dense(3, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
custom_loss = keras.layers.Lambda(
lambda x: keras.backend.sum(x * x)
)(x)
model.add_loss(custom_loss)
model.add_metric(
custom_loss, aggregation="mean", name="custom_loss"
)
return model
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
with self.cached_session():
model = _make_model()
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=optimizers.gradient_descent_legacy.SGD(),
metrics=[keras.metrics.SparseCategoricalCrossentropy()],
)
x = np.random.normal(size=(32, 4))
y = np.random.randint(0, 3, size=32)
model.train_on_batch(x, y)
evaluation_results = model.evaluate(x, y)
# Save and reload model.
model.save(saved_model_dir, save_format=save_format)
del model # Prevent misuse.
loaded_model = keras.models.load_model(saved_model_dir)
loaded_model_eval_results = loaded_model.evaluate(x, y)
# Assert all evaluation results are the same.
self.assertAllClose(
evaluation_results, loaded_model_eval_results, 1e-9
)
# Check correctness of the loss calculation.
self.assertAllGreater(evaluation_results, 0.0)
evaluation_results = dict(
zip(loaded_model.metrics_names, evaluation_results)
)
self.assertNear(
evaluation_results["sparse_categorical_crossentropy"]
+ evaluation_results["custom_loss"],
evaluation_results["loss"],
1e-6,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_save_uncompiled_model_with_optimizer(self):
with self.cached_session() as session:
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
model = keras.models.Sequential(
[keras.layers.Dense(1, input_shape=(3,))]
)
# Set the model's optimizer but don't compile. This can happen if
# the model is trained with a custom training loop.
model.optimizer = keras.optimizers.legacy.rmsprop.RMSprop(lr=0.0001)
if not tf.executing_eagerly():
session.run([v.initializer for v in model.variables])
model.save(saved_model_dir, save_format=save_format)
if save_format in ["tf", "tensorflow"]:
loaded = keras.models.load_model(saved_model_dir)
self.assertIsInstance(
loaded.optimizer,
keras.optimizers.legacy.optimizer_v2.OptimizerV2,
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_functional_model_with_getitem_op_layer(self):
inp = keras.Input(shape=(8))
out = inp[:]
model = keras.Model(inputs=[inp], outputs=out)
batch_size = 7
x = tf.stack([tf.range(8) for _ in range(batch_size)])
args = [x]
expected = x[:]
self.assertAllEqual(model(args), expected)
self.assertAllEqual(
model.predict(args, batch_size=batch_size), expected
)
# Make sure it can be successfully saved and loaded.
save_format = test_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
self.assertAllEqual(loaded_model(args), expected)
self.assertAllEqual(
loaded_model.predict(args, batch_size=batch_size), expected
)
@test_combinations.generate(
test_combinations.combine(mode=["eager", "graph"])
)
def test_custom_functional_registered(self):
def _get_cls_definition():
class CustomModel(keras.Model):
def c(self):
return "c"
return CustomModel
cls = _get_cls_definition()
self.assertEqual(cls.__bases__[0], keras.Model)
with self.cached_session() as sess:
input_ = keras.layers.Input(shape=(1,))
output = keras.layers.Dense(1)(input_)
model = cls(input_, output)
# `cls` now inherits from `Functional` class.
self.assertEqual(cls.__bases__[0], functional.Functional)
if not tf.executing_eagerly():
sess.run([v.initializer for v in model.variables])
save_format = test_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
loaded_model = keras.models.load_model(
saved_model_dir, custom_objects={"CustomModel": cls}
)
self.assertIsInstance(loaded_model, cls)
# Check with "new" `CustomModel` class definition.
new_cls = _get_cls_definition()
# The new `CustomModel` class is *not* derived from `Functional`.
self.assertEqual(new_cls.__bases__[0], keras.Model)
reloaded_model = keras.models.load_model(
saved_model_dir, custom_objects={"CustomModel": new_cls}
)
self.assertIsInstance(reloaded_model, new_cls)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_custom_sequential_registered_no_scope(self):
@object_registration.register_keras_serializable(package="my_package")
class MyDense(keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
input_shape = [1]
inputs = keras.Input(shape=input_shape)
custom_layer = MyDense(1)
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
model = keras.Sequential(layers=[inputs, custom_layer])
model.save(saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
x = tf.constant([5])
self.assertAllEqual(model(x), loaded_model(x))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_custom_functional_registered_no_scope(self):
@object_registration.register_keras_serializable(package="my_package")
class MyDense(keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
saved_model_dir = self._save_model_dir()
save_format = test_utils.get_save_format()
input_shape = [1]
inputs = keras.Input(shape=input_shape)
outputs = MyDense(1)(inputs)
model = keras.Model(inputs, outputs)
model.save(saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
x = tf.constant([5])
self.assertAllEqual(model(x), loaded_model(x))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_shared_objects(self):
class OuterLayer(keras.layers.Layer):
def __init__(self, inner_layer):
super().__init__()
self.inner_layer = inner_layer
def call(self, inputs):
return self.inner_layer(inputs)
def get_config(self):
return {
"inner_layer": serialization.serialize_keras_object(
self.inner_layer
)
}
@classmethod
def from_config(cls, config):
return cls(
serialization.deserialize_keras_object(
config["inner_layer"]
)
)
class InnerLayer(keras.layers.Layer):
def __init__(self):
super().__init__()
self.v = self.add_weight(name="v", shape=[], dtype=tf.float32)
def call(self, inputs):
return self.v + inputs
@classmethod
def from_config(cls, config):
return cls()
# Create a model with 2 output layers that share the same inner layer.
inner_layer = InnerLayer()
outer_layer_1 = OuterLayer(inner_layer)
outer_layer_2 = OuterLayer(inner_layer)
input_ = keras.Input(shape=(1,))
model = keras.Model(
inputs=input_,
outputs=[outer_layer_1(input_), outer_layer_2(input_)],
)
# Changes to the shared layer should affect both outputs.
model.layers[1].inner_layer.v.assign(5)
self.assertAllEqual(model(1), [6.0, 6.0])
model.layers[1].inner_layer.v.assign(3)
self.assertAllEqual(model(1), [4.0, 4.0])
# After loading, changes to the shared layer should still affect both
# outputs.
def _do_assertions(loaded):
loaded.layers[1].inner_layer.v.assign(5)
self.assertAllEqual(loaded(1), [6.0, 6.0])
loaded.layers[1].inner_layer.v.assign(3)
self.assertAllEqual(loaded(1), [4.0, 4.0])
loaded.layers[2].inner_layer.v.assign(5)
self.assertAllEqual(loaded(1), [6.0, 6.0])
loaded.layers[2].inner_layer.v.assign(3)
self.assertAllEqual(loaded(1), [4.0, 4.0])
# We'd like to make sure we only attach shared object IDs when strictly
# necessary, so we'll recursively traverse the generated config to count
# whether we have the exact number we expect.
def _get_all_keys_recursive(dict_or_iterable):
if isinstance(dict_or_iterable, dict):
for key in dict_or_iterable.keys():
yield key
for key in _get_all_keys_recursive(dict_or_iterable.values()):
yield key
elif isinstance(dict_or_iterable, str):
return
else:
try:
for item in dict_or_iterable:
for key in _get_all_keys_recursive(item):
yield key
# Not an iterable or dictionary
except TypeError:
return
with object_registration.CustomObjectScope(
{"OuterLayer": OuterLayer, "InnerLayer": InnerLayer}
):
# Test saving and loading to disk
save_format = test_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(
model, saved_model_dir, save_format=save_format
)
loaded = keras.models.load_model(saved_model_dir)
_do_assertions(loaded)
# Test recreating directly from config
config = model.get_config()
key_count = collections.Counter(_get_all_keys_recursive(config))
self.assertEqual(key_count[serialization.SHARED_OBJECT_KEY], 2)
loaded = keras.Model.from_config(config)
_do_assertions(loaded)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_shared_objects_wrapper(self):
"""Tests that shared layers wrapped with `Wrapper` restore correctly."""
input_ = keras.Input(shape=(1,))
unwrapped = keras.layers.Layer(name="unwrapped")
wrapped = keras.layers.Wrapper(unwrapped, name="wrapped")
model = keras.Model(
inputs=input_, outputs=[unwrapped(input_), wrapped(input_)]
)
# Test recreating directly from config
config = model.get_config()
loaded = keras.Model.from_config(config)
self.assertIs(loaded.layers[1], loaded.layers[2].layer)
# Test saving and loading to disk
save_format = test_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded = keras.models.load_model(saved_model_dir)
self.assertIs(loaded.layers[1], loaded.layers[2].layer)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"], fit=[True, False])
)
def test_multi_output_metrics_name_stay_same(self, fit):
"""Tests that metric names don't change with each save/load cycle.
e.g. "head_0_accuracy" should not become "head_0_head_0_accuracy" after
saving and loading a model.
Arguments:
fit: Whether the model should be fit before saving.
"""
# This doesn't work at all, so we can't check whether metric names are
# correct.
if not tf.executing_eagerly() and not fit:
self.skipTest("b/181767784")
input_ = keras.Input((4,))
model = keras.Model(
input_,
[
keras.layers.Softmax(name="head_0")(
keras.layers.Dense(3)(input_)
),
keras.layers.Softmax(name="head_1")(
keras.layers.Dense(5)(input_)
),
],
)
metric = keras.metrics.BinaryAccuracy()
model.compile(
optimizer="rmsprop",
loss="mse",
metrics={"head_0": [metric, "accuracy"]},
)
x = np.random.rand(2, 4)
y = {
"head_0": np.random.randint(2, size=(2, 3)),
"head_1": np.random.randint(2, size=(2, 5)),
}
# Make sure metrix prefixing works the same regardless of whether the
# user has fit the model before saving.
if fit:
model.fit(x, y, verbose=0)
# Save and reload.
save_format = test_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded = keras.models.load_model(saved_model_dir)
# Make sure the metrics names from the model before saving match the
# loaded model.
self.assertSequenceEqual(model.metrics_names, loaded.metrics_names)
# Test only in eager mode because ragged tensor inputs
# cannot be used in graph mode.
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
@test_utils.run_v2_only
def test_save_functional_with_ragged_constant_input(self):
input1 = keras.Input(shape=[])
input2 = tf.ragged.constant([[1.0, 2.0], [3.0]])
outputs = keras.layers.Add()([input1, input2])
model = keras.Model(input1, outputs)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir)
keras.models.load_model(saved_model_dir)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
@test_utils.run_v2_only
def test_save_functional_with_constant_input(self):
input1 = keras.Input(shape=[2])
input2 = tf.constant([[1.0, 2.0]])
outputs = keras.layers.Add()([input1, input2])
model = keras.Model(input1, outputs)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir)
keras.models.load_model(saved_model_dir)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
@test_utils.run_v2_only
def test_save_functional_with_constant_string_input(self):
input1 = keras.Input(shape=[2], dtype=tf.string)
input2 = tf.constant([["単", "に"]])
outputs = keras.layers.Concatenate()([input1, input2])
model = keras.Model(input1, outputs)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir)
loaded_model = keras.models.load_model(saved_model_dir)
x = tf.constant([["a", "b"]])
self.assertAllEqual(model(x), loaded_model(x))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
@test_utils.run_v2_only
def test_save_functional_with_ragged_constant_string_input(self):
input1 = keras.Input(shape=[1], dtype=tf.string)
input2 = tf.ragged.constant([["単", "に"], ["単"]])
outputs = keras.layers.Concatenate(axis=0)([input1, input2])
model = keras.Model(input1, outputs)
saved_model_dir = self._save_model_dir()
model.save(saved_model_dir)
loaded_model = keras.models.load_model(saved_model_dir)
x = tf.constant([["a"]])
self.assertAllEqual(model(x), loaded_model(x))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
@test_utils.run_v2_only
def test_save_inputs_spec_with_composite_tensor_names(self):
class KerasModel(keras.Model):
def call(self, inputs):
return inputs
spec = MaskedTensor.Spec(
tf.TensorSpec([None], name="x__values"),
tf.TensorSpec([None], dtype=tf.bool, name="x__mask"),
)
km1 = KerasModel()
inputs = keras.Input(type_spec=spec)
km1(inputs)
self.assertEqual(km1.save_spec()[0][0].mask.name, "x__mask")
# Factory functions to create models that will be serialized inside a Network.
def _make_graph_network(input_size, output_size):
inputs = keras.Input(input_size)
x = keras.layers.Dense(8, activation="relu")(inputs)
y = keras.layers.Dense(output_size)(x)
return keras.Model(inputs=inputs, outputs=y)
def _make_sequential(input_size, output_size):
del input_size
return keras.Sequential(
[
keras.layers.Dense(8, activation="relu"),
keras.layers.Dense(output_size),
]
)
def _make_sequential_built(input_size, output_size):
model = _make_sequential(input_size, output_size)
model.build((None, input_size))
return model
def _make_sequential_graph_network(input_size, output_size):
return keras.Sequential(
[
keras.layers.InputLayer(input_size),
keras.layers.Dense(8, activation="relu"),
keras.layers.Dense(output_size),
]
)
def _make_sequential_input_shape(input_size, output_size):
return keras.Sequential(
[
keras.layers.Dense(8, activation="relu", input_shape=(input_size,)),
keras.layers.Dense(output_size),
]
)
class _make_subclassed(keras.Model):
def __init__(self, input_size, output_size):
super().__init__()
self._config = {"input_size": input_size, "output_size": output_size}
self._hidden_layer = keras.layers.Dense(
8, activation="relu", name="hidden"
)
self._logits_layer = keras.layers.Dense(output_size, name="logits")
def call(self, inputs):
x = self._hidden_layer(inputs)
return self._logits_layer(x)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config):
return cls(**config)
class _make_subclassed_built(_make_subclassed):
def __init__(self, input_size, output_size):
super().__init__(input_size, output_size)
self.build((None, input_size))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class TestWholeModelSavingWithNesting(tf.test.TestCase, parameterized.TestCase):
"""Tests saving a whole model that contains other models."""
@parameterized.named_parameters(
[
("graph_network", _make_graph_network),
("sequential", _make_sequential),
("sequential_built", _make_sequential_built),
("sequential_graph_network", _make_sequential_graph_network),
("sequential_input_shape", _make_sequential_input_shape),
("subclassed", _make_subclassed),
("subclassed_built", _make_subclassed_built),
]
)
def test_functional(self, model_fn):
"""Tests serializing a model that uses a nested model to share
weights."""
if h5py is None:
self.skipTest("h5py required to run this test")
def _make_model():
inputs = (
keras.Input(shape=(4,), name="examples"),
keras.Input(shape=(4,), name="neighbors"),
)
base_model = model_fn(inputs[0].shape.as_list()[-1], 2)
outputs = keras.layers.add(
[base_model(inputs[0]), base_model(inputs[1])]
)
return keras.Model(inputs=inputs, outputs=outputs)
with self.cached_session():
x = (
np.random.normal(size=(16, 4)).astype(np.float32),
np.random.normal(size=(16, 4)).astype(np.float32),
)
model = _make_model()
predictions = model(x)
# Save and reload.
model_path = os.path.join(self.get_temp_dir(), "model.h5")
model.save(model_path)
del model
loaded_model = keras.models.load_model(
model_path,
custom_objects={
"_make_subclassed": _make_subclassed,
"_make_subclassed_built": _make_subclassed_built,
},
compile=False,
)
self.assertAllClose(loaded_model(x), predictions, 1e-9)
if __name__ == "__main__":
with saved_model_utils.keras_option_scope(
save_traces=False, in_tf_saved_model_scope=True
):
tf.test.main()
| tf-keras/tf_keras/saving/legacy/save_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/save_test.py",
"repo_id": "tf-keras",
"token_count": 28971
} | 192 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set based on dict so that it preserves key insertion order.
Python Dicts are order-preserving since 3.6
(https://mail.python.org/pipermail/python-dev/2017-December/151283.html),
but sets are not. This class implements a set on top of a dict so that we get
deterministic iteration order across runs.
"""
import collections.abc
class OrderPreservingSet(collections.abc.MutableSet):
"""A set based on dict so that it preserves key insertion order."""
def __init__(self, iterable=None):
self._dict = {item: None for item in (iterable or [])}
# abstract from collections.MutableSet
def __len__(self):
return len(self._dict)
# abstract from collections.MutableSet
def __contains__(self, value):
return value in self._dict
# override from collections.MutableSet
def __iter__(self):
return iter(self._dict)
# abstract from collections.MutableSet
def add(self, item):
self._dict[item] = None
# abstract from collections.MutableSet
def discard(self, value):
del self._dict[value]
# override from collections.MutableSet
def clear(self):
self._dict = {}
# override from collections.Set
def __eq__(self, other):
if not isinstance(other, OrderPreservingSet):
return NotImplemented
return self._dict.keys() == other._dict.keys()
# override from collections.Set
def __le__(self, other):
if not isinstance(other, OrderPreservingSet):
return NotImplemented
return self._dict.keys() <= other._dict.keys()
# override from collections.Set
def __ge__(self, other):
if not isinstance(other, OrderPreservingSet):
return NotImplemented
return self._dict.keys() >= other._dict.keys()
# override from collections.Set
def __and__(self, other):
# collections.Set defaults to the ordering in other, we want to use self
return self._from_iterable(value for value in self if value in other)
# override from collections.Set
def __or__(self, other):
# ensure that other is ordered before performing __or__
if not isinstance(other, OrderPreservingSet):
raise TypeError(
"cannot union an 'OrderPreservingSet' with an "
"unordered iterable."
)
result = self._from_iterable(value for value in self)
for value in other:
result._dict[value] = None
return result
def union(self, other):
return self | other
| tf-keras/tf_keras/saving/legacy/saved_model/order_preserving_set.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/order_preserving_set.py",
"repo_id": "tf-keras",
"token_count": 1111
} | 193 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras python-based idempotent saving functions."""
import os
import sys
import zipfile
from pathlib import Path
from unittest import mock
import h5py
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tensorflow.python.platform import tf_logging as logging
import tf_keras as keras
from tf_keras import backend
from tf_keras.optimizers import adam
from tf_keras.saving import object_registration
from tf_keras.saving import saving_lib
from tf_keras.saving.legacy.saved_model import json_utils
from tf_keras.testing_infra import test_utils
from tf_keras.utils import io_utils
train_step_message = "This is my training step"
assets_data = "These are my assets"
variables_data = np.random.random((10,))
@keras.utils.register_keras_serializable(package="my_custom_package")
class MyDense(keras.layers.Dense):
def build(self, input_shape):
self.additional_weights = [
self.add_weight(
"my_additional_weight",
initializer="ones",
trainable=True,
),
self.add_weight(
"my_additional_weight_2",
initializer="ones",
trainable=True,
),
]
self.weights_in_dict = {
"my_weight": self.add_weight(
"my_dict_weight",
initializer="ones",
trainable=True,
),
}
self.nested_layer = keras.layers.Dense(1)
return super().build(input_shape)
def call(self, inputs):
call_result = super().call(inputs)
return self.nested_layer(call_result)
def two(self):
return 2
@keras.utils.register_keras_serializable(package="my_custom_package")
class LayerWithCustomSaving(MyDense):
def build(self, input_shape):
self.assets = assets_data
self.stored_variables = variables_data
return super().build(input_shape)
def save_assets(self, inner_path):
with open(os.path.join(inner_path, "assets.txt"), "w") as f:
f.write(self.assets)
def save_own_variables(self, store):
store["variables"] = self.stored_variables
def load_assets(self, inner_path):
with open(os.path.join(inner_path, "assets.txt"), "r") as f:
text = f.read()
self.assets = text
def load_own_variables(self, store):
self.stored_variables = np.array(store["variables"])
@keras.utils.register_keras_serializable(package="my_custom_package")
class CustomModelX(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = MyDense(1)
self.dense2 = MyDense(1)
def call(self, inputs):
out = self.dense1(inputs)
return self.dense2(out)
def train_step(self, data):
tf.print(train_step_message)
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x)
loss = self.compiled_loss(y, y_pred)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return {}
def one(self):
return 1
@keras.utils.register_keras_serializable(package="my_custom_package")
class ModelWithCustomSaving(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_dense = LayerWithCustomSaving(1)
def call(self, inputs):
return self.custom_dense(inputs)
@keras.utils.register_keras_serializable(package="my_custom_package")
class CompileOverridingModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = MyDense(1)
def compile(self, *args, **kwargs):
super().compile(*args, **kwargs)
def call(self, inputs):
return self.dense1(inputs)
@keras.utils.register_keras_serializable(package="my_custom_package")
class CompileOverridingSequential(keras.Sequential):
def compile(self, *args, **kwargs):
super().compile(*args, **kwargs)
@keras.utils.register_keras_serializable(package="my_custom_package")
def my_mean_squared_error(y_true, y_pred):
"""Identical to built-in `mean_squared_error`, added here as a custom
func.
"""
return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1)
module_my_mean_squared_error = my_mean_squared_error
@test_utils.run_v2_only
class SavingV3Test(tf.test.TestCase, parameterized.TestCase):
def _get_subclassed_model(self):
subclassed_model = CustomModelX()
subclassed_model.compile(
optimizer=adam.Adam(),
loss=[
"mse",
keras.losses.mean_squared_error,
keras.losses.MeanSquaredError(),
my_mean_squared_error,
],
)
return subclassed_model
def _get_sequential_model(self):
sequential_model = keras.Sequential([MyDense(1), MyDense(1)])
sequential_model.compile(
optimizer="adam", loss=["mse", keras.losses.mean_squared_error]
)
return sequential_model
def _get_functional_model(self):
inputs = keras.Input(shape=(32,))
x = MyDense(1, name="first_dense")(inputs)
outputs = MyDense(1, name="second_dense")(x)
functional_model = keras.Model(inputs, outputs)
functional_model.compile(
optimizer="adam", loss=["mse", keras.losses.mean_squared_error]
)
return functional_model
def test_saving_after_compile_but_before_fit(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
subclassed_model = self._get_subclassed_model()
subclassed_model.save(temp_filepath, save_format="keras_v3")
# This is so that we can register another function with the same custom
# object key, and make sure the newly registered function is used while
# loading.
del object_registration._GLOBAL_CUSTOM_OBJECTS[
"my_custom_package>my_mean_squared_error"
]
@keras.utils.register_keras_serializable(package="my_custom_package")
def my_mean_squared_error(y_true, y_pred):
"""Function-local `mean_squared_error`."""
return backend.mean(
tf.math.squared_difference(y_pred, y_true), axis=-1
)
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(
subclassed_model._is_compiled, loaded_model._is_compiled
)
# Everything should be the same class or function for the original model
# and the loaded model.
for model in [subclassed_model, loaded_model]:
self.assertIs(
model.optimizer.__class__,
adam.Adam,
)
self.assertIs(
model.compiled_loss.__class__,
keras.engine.compile_utils.LossesContainer,
)
self.assertEqual(model.compiled_loss._losses[0], "mse")
self.assertIs(
model.compiled_loss._losses[1], keras.losses.mean_squared_error
)
self.assertIs(
model.compiled_loss._losses[2].__class__,
keras.losses.MeanSquaredError,
)
self.assertIs(
model.compiled_loss._total_loss_mean.__class__,
keras.metrics.base_metric.Mean,
)
# Except for a custom function used because the loaded model is supposed
# to be using the newly registered custom function.
self.assertIs(
subclassed_model.compiled_loss._losses[3],
module_my_mean_squared_error,
)
self.assertIs(
loaded_model.compiled_loss._losses[3], my_mean_squared_error
)
self.assertIsNot(module_my_mean_squared_error, my_mean_squared_error)
def test_saving_after_fit(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
subclassed_model = self._get_subclassed_model()
x = np.random.random((100, 32))
y = np.random.random((100, 1))
subclassed_model.fit(x, y, epochs=1)
subclassed_model.save(temp_filepath, save_format="keras_v3")
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(
subclassed_model._is_compiled, loaded_model._is_compiled
)
io_utils.enable_interactive_logging()
# `tf.print` writes to stderr. This is to make sure the custom training
# step is used.
with self.captureWritesToStream(sys.stderr) as printed:
loaded_model.fit(x, y, epochs=1)
self.assertRegex(printed.contents(), train_step_message)
# Check that the custom classes do get used.
self.assertIsInstance(loaded_model, CustomModelX)
self.assertIsInstance(loaded_model.dense1, MyDense)
# Check that the custom method is available.
self.assertEqual(loaded_model.one(), 1)
self.assertEqual(loaded_model.dense1.two(), 2)
# Everything should be the same class or function for the original model
# and the loaded model.
for model in [subclassed_model, loaded_model]:
self.assertIs(
model.optimizer.__class__,
adam.Adam,
)
self.assertIs(
model.compiled_loss.__class__,
keras.engine.compile_utils.LossesContainer,
)
self.assertIs(
model.compiled_loss._losses[0].__class__,
keras.losses.LossFunctionWrapper,
)
self.assertIs(
model.compiled_loss._losses[1].__class__,
keras.losses.LossFunctionWrapper,
)
self.assertIs(
model.compiled_loss._losses[2].__class__,
keras.losses.MeanSquaredError,
)
self.assertIs(
model.compiled_loss._losses[3].__class__,
keras.losses.LossFunctionWrapper,
)
self.assertIs(
model.compiled_loss._total_loss_mean.__class__,
keras.metrics.base_metric.Mean,
)
def test_saving_preserve_unbuilt_state(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
subclassed_model = CustomModelX()
subclassed_model.save(temp_filepath, save_format="keras_v3")
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(
subclassed_model._is_compiled, loaded_model._is_compiled
)
self.assertFalse(subclassed_model.built)
self.assertFalse(loaded_model.built)
def test_saving_preserve_built_state(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = self._get_subclassed_model()
x = np.random.random((100, 32))
y = np.random.random((100, 1))
model.fit(x, y, epochs=1)
model.save(temp_filepath, save_format="keras_v3")
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(model._is_compiled, loaded_model._is_compiled)
self.assertTrue(model.built)
self.assertTrue(loaded_model.built)
self.assertEqual(
model._build_input_shape, loaded_model._build_input_shape
)
self.assertEqual(
tf.TensorShape([None, 32]), loaded_model._build_input_shape
)
def test_saved_module_paths_and_class_names(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
subclassed_model = self._get_subclassed_model()
x = np.random.random((100, 32))
y = np.random.random((100, 1))
subclassed_model.fit(x, y, epochs=1)
subclassed_model.save(temp_filepath, save_format="keras_v3")
with zipfile.ZipFile(temp_filepath, "r") as z:
with z.open(saving_lib._CONFIG_FILENAME, "r") as c:
config_json = c.read()
config_dict = json_utils.decode(config_json)
self.assertEqual(
config_dict["registered_name"], "my_custom_package>CustomModelX"
)
self.assertEqual(
config_dict["compile_config"]["optimizer"]["config"][
"is_legacy_optimizer"
],
False,
)
self.assertEqual(
config_dict["compile_config"]["optimizer"]["class_name"],
"Adam",
)
self.assertLen(config_dict["compile_config"]["loss"], 4)
self.assertEqual(
config_dict["compile_config"]["loss"][0],
"mse",
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
layer=["tf_op_lambda", "lambda"],
)
)
def test_functional_model_with_tf_op_lambda_layer(self, layer):
class ToString:
def __init__(self):
self.contents = ""
def __call__(self, msg):
self.contents += msg + "\n"
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
if layer == "lambda":
func = tf.function(lambda x: tf.math.cos(x) + tf.math.sin(x))
inputs = keras.layers.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
outputs = keras.layers.Lambda(func._python_function)(outputs)
elif layer == "tf_op_lambda":
inputs = keras.layers.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
outputs = outputs + inputs
functional_model = keras.Model(inputs, outputs)
functional_to_string = ToString()
functional_model.summary(print_fn=functional_to_string)
functional_model.compile(optimizer="adam", loss="mse", metrics=["mae"])
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
functional_model.fit(x, y, epochs=3)
functional_model.save(temp_filepath, save_format="keras_v3")
loaded_model = saving_lib.load_model(temp_filepath, safe_mode=False)
self.assertEqual(
functional_model._is_compiled, loaded_model._is_compiled
)
loaded_model.fit(x, y, epochs=3)
loaded_to_string = ToString()
loaded_model.summary(print_fn=loaded_to_string)
# Confirming the original and saved/loaded model have same structure.
self.assertEqual(
functional_to_string.contents, loaded_to_string.contents
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
model_type=["sequential", "functional", "subclassed"],
)
)
def test_saving_model_state(self, model_type):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = getattr(self, f"_get_{model_type}_model")()
x = np.random.random((100, 32))
y = np.random.random((100, 1))
model.fit(x, y, epochs=1)
# Assert that the archive has not been saved.
self.assertFalse(os.path.exists(temp_filepath))
# Mutate the `Dense` layer custom weights to ensure that list and
# dict-contained weights get restored.
model.layers[1].additional_weights[0].assign(2)
model.layers[1].weights_in_dict["my_weight"].assign(2)
model.layers[1].nested_layer.kernel.assign([[1]])
model.save(temp_filepath, save_format="keras_v3")
# Assert that the archive has been saved.
self.assertTrue(os.path.exists(temp_filepath))
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(model._is_compiled, loaded_model._is_compiled)
# The weights are supposed to be the same (between original and loaded
# models).
for original_weights, loaded_weights in zip(
model.get_weights(), loaded_model.get_weights()
):
np.testing.assert_allclose(original_weights, loaded_weights)
# The optimizer variables are supposed to be the same (between original
# and loaded models).
for original_weights, loaded_weights in zip(
model.optimizer.variables, loaded_model.optimizer.variables
):
np.testing.assert_allclose(original_weights, loaded_weights)
def test_saving_custom_assets_and_variables(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = ModelWithCustomSaving()
model.compile(
optimizer=adam.Adam(),
loss=[
"mse",
keras.losses.mean_squared_error,
keras.losses.MeanSquaredError(),
my_mean_squared_error,
],
)
x = np.random.random((100, 32))
y = np.random.random((100, 1))
model.fit(x, y, epochs=1)
# Assert that the archive has not been saved.
self.assertFalse(os.path.exists(temp_filepath))
model.save(temp_filepath, save_format="keras_v3")
loaded_model = saving_lib.load_model(temp_filepath)
self.assertEqual(loaded_model.custom_dense.assets, assets_data)
self.assertEqual(
loaded_model.custom_dense.stored_variables.tolist(),
variables_data.tolist(),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
model_type=["subclassed", "sequential"],
)
)
def test_compile_overridden_model_raises_if_no_from_config_overridden(
self, model_type
):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = (
CompileOverridingModel()
if model_type == "subclassed"
else CompileOverridingSequential(
[keras.layers.Embedding(4, 1), MyDense(1), MyDense(1)]
)
)
model.compile("rmsprop", "mse")
model.save(temp_filepath, save_format="keras_v3")
with mock.patch.object(logging, "warning") as mock_warn:
saving_lib.load_model(temp_filepath)
if not mock_warn.call_args_list:
raise AssertionError("Did not warn.")
self.assertIn(
"`compile()` was not called as part of model loading "
"because the model's `compile()` method is custom. ",
mock_warn.call_args_list[0][0][0],
)
def test_metadata(self):
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "my_model.keras")
)
model = CompileOverridingModel()
model.save(temp_filepath, save_format="keras_v3")
with zipfile.ZipFile(temp_filepath, "r") as z:
with z.open(saving_lib._METADATA_FILENAME, "r") as c:
metadata_json = c.read()
metadata = json_utils.decode(metadata_json)
self.assertIn("keras_version", metadata)
self.assertIn("date_saved", metadata)
def test_gfile_local_called(self):
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "my_model.keras")
)
model = CompileOverridingModel()
with mock.patch(
"re.match", autospec=True
) as mock_re_match, mock.patch.object(
tf.io.gfile, "GFile"
) as mock_gfile:
# Check regex matching
mock_re_match.return_value = True
model.save(temp_filepath, save_format="keras_v3")
mock_re_match.assert_called()
self.assertIn(str(temp_filepath), mock_re_match.call_args.args)
# Check gfile opened with filepath specified
self.assertIn(str(temp_filepath), mock_gfile.call_args.args)
def test_load_model_api_endpoint(self):
temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras"))
model = self._get_functional_model()
ref_input = np.random.random((10, 32))
ref_output = model.predict(ref_input)
model.save(temp_filepath, save_format="keras_v3")
model = keras.models.load_model(temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
def test_save_load_weights_only(self):
temp_filepath = Path(
os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
)
model = self._get_functional_model()
ref_input = np.random.random((10, 32))
ref_output = model.predict(ref_input)
saving_lib.save_weights_only(model, temp_filepath)
model = self._get_functional_model()
saving_lib.load_weights_only(model, temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
# Test with Model method
model = self._get_functional_model()
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
def test_load_weights_only_with_keras_file(self):
# Test loading weights from whole saved model
temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras"))
model = self._get_functional_model()
ref_input = np.random.random((10, 32))
ref_output = model.predict(ref_input)
saving_lib.save_model(model, temp_filepath)
model = self._get_functional_model()
saving_lib.load_weights_only(model, temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
# Test with Model method
model = self._get_functional_model()
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
def test_compile_arg(self):
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
model = self._get_functional_model()
model.compile("rmsprop", "mse")
model.fit(np.random.random((10, 32)), np.random.random((10, 1)))
saving_lib.save_model(model, temp_filepath)
model = saving_lib.load_model(temp_filepath)
self.assertEqual(model._is_compiled, True)
model = saving_lib.load_model(temp_filepath, compile=False)
self.assertEqual(model._is_compiled, False)
def test_overwrite(self):
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
model = self._get_functional_model()
model.save(temp_filepath, save_format="keras_v3")
model.save(temp_filepath, save_format="keras_v3", overwrite=True)
with self.assertRaises(EOFError):
model.save(temp_filepath, save_format="keras_v3", overwrite=False)
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.weights.h5")
model = self._get_functional_model()
model.save_weights(temp_filepath)
model.save_weights(temp_filepath, overwrite=True)
with self.assertRaises(EOFError):
model.save_weights(temp_filepath, overwrite=False)
def test_partial_load(self):
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
original_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(4),
keras.layers.Dense(5),
]
)
original_model.save(temp_filepath, save_format="keras_v3")
# Test with a model that has a differently shaped layer
new_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(4),
keras.layers.Dense(6),
]
)
new_layer_kernel_value = new_model.layers[1].kernel.numpy()
with self.assertRaisesRegex(ValueError, "Shape mismatch"):
# Doesn't work by default
new_model.load_weights(temp_filepath)
# Now it works
new_model.load_weights(temp_filepath, skip_mismatch=True)
self.assertAllClose(
original_model.layers[0].get_weights(),
new_model.layers[0].get_weights(),
)
self.assertAllClose(
new_model.layers[1].kernel.numpy(), new_layer_kernel_value
)
# Test with a model that has a new layer
new_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(4),
keras.layers.Dense(5),
keras.layers.Dense(5),
]
)
new_layer_kernel_value = new_model.layers[2].kernel.numpy()
with self.assertRaisesRegex(ValueError, "received 0 variables"):
# Doesn't work by default
new_model.load_weights(temp_filepath)
# Now it works
new_model.load_weights(temp_filepath, skip_mismatch=True)
self.assertAllClose(
original_model.layers[0].get_weights(),
new_model.layers[0].get_weights(),
)
self.assertAllClose(
original_model.layers[1].get_weights(),
new_model.layers[1].get_weights(),
)
self.assertAllClose(
new_model.layers[2].kernel.numpy(), new_layer_kernel_value
)
def test_api_errors(self):
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.notkeras")
model = self._get_functional_model()
with self.assertRaisesRegex(ValueError, "Unknown `save_format`"):
model.save(temp_filepath, save_format="invalid")
with self.assertRaisesRegex(ValueError, "Invalid `filepath` argument"):
model.save(temp_filepath, save_format="keras_v3")
temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras")
with self.assertRaisesRegex(ValueError, "not supported"):
model.save(
temp_filepath, include_optimizer=False, save_format="keras_v3"
)
def test_safe_mode(self):
temp_filepath = os.path.join(self.get_temp_dir(), "unsafe_model.keras")
model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Lambda(lambda x: x * 2),
]
)
model.save(temp_filepath, save_format="keras_v3")
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
model = saving_lib.load_model(temp_filepath)
model = saving_lib.load_model(temp_filepath, safe_mode=False)
def test_normalization_kpl(self):
# With adapt
temp_filepath = os.path.join(self.get_temp_dir(), "norm_model.keras")
model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Normalization(),
]
)
data = np.random.random((3, 3))
model.layers[0].adapt(data)
ref_out = model(data)
model.save(temp_filepath, save_format="keras_v3")
model = saving_lib.load_model(temp_filepath)
out = model(data)
self.assertAllClose(ref_out, out, atol=1e-6)
# Without adapt
model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Normalization(
mean=np.random.random((3,)), variance=np.random.random((3,))
),
]
)
ref_out = model(data)
model.save(temp_filepath, save_format="keras_v3")
model = saving_lib.load_model(temp_filepath)
out = model(data)
self.assertAllClose(ref_out, out, atol=1e-6)
def test_layer_index_naming(self):
weights_filepath = os.path.join(self.get_temp_dir(), "model.weights.h5")
model = keras.Sequential(
[
keras.layers.Dense(10),
keras.layers.Dense(10),
keras.layers.Dense(10),
keras.layers.Dense(10),
]
)
model.build([1, 20])
model.save_weights(weights_filepath)
with h5py.File(weights_filepath, "r") as f:
self.assertAllEqual(
list(f["layers"].keys()),
["dense", "dense_1", "dense_2", "dense_3"],
)
# This custom class lacks custom object registration.
class CustomRNN(keras.layers.Layer):
def __init__(self, units):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = keras.layers.Dense(units=units, activation="tanh")
self.projection_2 = keras.layers.Dense(units=units, activation="tanh")
self.classifier = keras.layers.Dense(1)
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
return self.classifier(features)
# This class is properly registered with a `get_config()` method.
# However, since it does not subclass keras.layers.Layer, it lacks
# `from_config()` for deserialization.
@keras.utils.register_keras_serializable()
class GrowthFactor:
def __init__(self, factor):
self.factor = factor
def __call__(self, inputs):
return inputs * self.factor
def get_config(self):
return {"factor": self.factor}
@keras.utils.register_keras_serializable(package="Complex")
class FactorLayer(keras.layers.Layer):
def __init__(self, factor):
super().__init__()
self.factor = factor
def call(self, x):
return x * self.factor
def get_config(self):
return {"factor": self.factor}
# This custom model does not explicitly deserialize the layers it includes
# in its `get_config`. Explicit deserialization in a `from_config` override
# or `__init__` is needed here, or an error will be thrown at loading time.
@keras.utils.register_keras_serializable(package="Complex")
class ComplexModel(keras.layers.Layer):
def __init__(self, first_layer, second_layer=None, **kwargs):
super().__init__(**kwargs)
self.first_layer = first_layer
if second_layer is not None:
self.second_layer = second_layer
else:
self.second_layer = keras.layers.Dense(8)
def get_config(self):
config = super().get_config()
config.update(
{
"first_layer": self.first_layer,
"second_layer": self.second_layer,
}
)
return config
def call(self, inputs):
return self.first_layer(self.second_layer(inputs))
@test_utils.run_v2_only
class SavingV3BattleTest(tf.test.TestCase, parameterized.TestCase):
def test_custom_model_without_registration_error(self):
temp_filepath = os.path.join(
self.get_temp_dir(), "my_custom_model.keras"
)
timesteps = 10
input_dim = 5
batch_size = 16
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = keras.layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN(32)(x)
model = keras.Model(inputs, outputs)
with self.assertRaisesRegex(
TypeError, "is a custom class, please register it"
):
model.save(temp_filepath)
_ = keras.models.load_model(temp_filepath)
def test_custom_object_without_from_config(self):
temp_filepath = os.path.join(
self.get_temp_dir(), "custom_fn_model.keras"
)
inputs = keras.Input(shape=(4, 4))
outputs = keras.layers.Dense(1, activation=GrowthFactor(0.5))(inputs)
model = keras.Model(inputs, outputs)
model.save(temp_filepath)
with self.assertRaisesRegex(
TypeError, "Unable to reconstruct an instance"
):
_ = keras.models.load_model(temp_filepath)
def test_complex_model_without_explicit_deserialization(self):
temp_filepath = os.path.join(self.get_temp_dir(), "complex_model.keras")
inputs = keras.Input((32,))
outputs = ComplexModel(first_layer=FactorLayer(0.5))(inputs)
model = keras.Model(inputs, outputs)
model.save(temp_filepath)
with self.assertRaisesRegex(TypeError, "are explicitly deserialized"):
_ = keras.models.load_model(temp_filepath)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/saving/saving_lib_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/saving_lib_test.py",
"repo_id": "tf-keras",
"token_count": 15442
} | 194 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample `get_config` results for testing backwards compatibility."""
# inputs = tf.keras.Input(10)
# x = tf.keras.layers.Dense(10, activation='relu')(inputs)
# outputs = tf.keras.layers.Dense(1)(x)
# model = tf.keras.Model(inputs, outputs)
FUNCTIONAL_DNN = {
"input_layers": [["input_1", 0, 0]],
"layers": [
{
"class_name": "InputLayer",
"config": {
"batch_input_shape": (None, 10),
"dtype": "float32",
"name": "input_1",
"ragged": False,
"sparse": False,
},
"inbound_nodes": [],
"name": "input_1",
},
{
"class_name": "Dense",
"config": {
"activation": "relu",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense",
"trainable": True,
"units": 10,
"use_bias": True,
},
"inbound_nodes": [[["input_1", 0, 0, {}]]],
"name": "dense",
},
{
"class_name": "Dense",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense_1",
"trainable": True,
"units": 1,
"use_bias": True,
},
"inbound_nodes": [[["dense", 0, 0, {}]]],
"name": "dense_1",
},
],
"name": "model",
"output_layers": [["dense_1", 0, 0]],
}
# inputs = tf.keras.Input((256, 256, 3))
# x = tf.keras.layers.Conv2D(filters=3, kernel_size=(3, 3))(inputs)
# x = tf.keras.layers.Flatten()(x)
# outputs = tf.keras.layers.Dense(1)(x)
# model = tf.keras.Model(inputs, outputs)
FUNCTIONAL_CNN = {
"input_layers": [["input_2", 0, 0]],
"layers": [
{
"class_name": "InputLayer",
"config": {
"batch_input_shape": (None, 256, 256, 3),
"dtype": "float32",
"name": "input_2",
"ragged": False,
"sparse": False,
},
"inbound_nodes": [],
"name": "input_2",
},
{
"class_name": "Conv2D",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
"dtype": "float32",
"filters": 3,
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"kernel_size": (3, 3),
"name": "conv2d",
"padding": "valid",
"strides": (1, 1),
"trainable": True,
"use_bias": True,
},
"inbound_nodes": [[["input_2", 0, 0, {}]]],
"name": "conv2d",
},
{
"class_name": "Flatten",
"config": {
"data_format": "channels_last",
"dtype": "float32",
"name": "flatten",
"trainable": True,
},
"inbound_nodes": [[["conv2d", 0, 0, {}]]],
"name": "flatten",
},
{
"class_name": "Dense",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense_2",
"trainable": True,
"units": 1,
"use_bias": True,
},
"inbound_nodes": [[["flatten", 0, 0, {}]]],
"name": "dense_2",
},
],
"name": "model_1",
"output_layers": [["dense_2", 0, 0]],
}
# inputs = tf.keras.Input((10, 3))
# x = tf.keras.layers.LSTM(10)(inputs)
# outputs = tf.keras.layers.Dense(1)(x)
# model = tf.keras.Model(inputs, outputs)
FUNCTIONAL_LSTM = {
"input_layers": [["input_5", 0, 0]],
"layers": [
{
"class_name": "InputLayer",
"config": {
"batch_input_shape": (None, 10, 3),
"dtype": "float32",
"name": "input_5",
"ragged": False,
"sparse": False,
},
"inbound_nodes": [],
"name": "input_5",
},
{
"class_name": "LSTM",
"config": {
"activation": "tanh",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dropout": 0.0,
"dtype": "float32",
"go_backwards": False,
"implementation": 2,
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "lstm_2",
"recurrent_activation": "sigmoid",
"recurrent_constraint": None,
"recurrent_dropout": 0.0,
"recurrent_initializer": {
"class_name": "Orthogonal",
"config": {"gain": 1.0, "seed": None},
},
"recurrent_regularizer": None,
"return_sequences": False,
"return_state": False,
"stateful": False,
"time_major": False,
"trainable": True,
"unit_forget_bias": True,
"units": 10,
"unroll": False,
"use_bias": True,
},
"inbound_nodes": [[["input_5", 0, 0, {}]]],
"name": "lstm_2",
},
{
"class_name": "Dense",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense_4",
"trainable": True,
"units": 1,
"use_bias": True,
},
"inbound_nodes": [[["lstm_2", 0, 0, {}]]],
"name": "dense_4",
},
],
"name": "model_3",
"output_layers": [["dense_4", 0, 0]],
}
# model = tf.keras.Sequential()
# model.add(tf.keras.layers.Dense(10))
# model.add(tf.keras.layers.Dense(1))
SEQUENTIAL_DNN = {
"layers": [
{
"class_name": "Dense",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense_2",
"trainable": True,
"units": 10,
"use_bias": True,
},
},
{
"class_name": "Dense",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense_3",
"trainable": True,
"units": 1,
"use_bias": True,
},
},
],
"name": "sequential_1",
}
# model = tf.keras.Sequential()
# model.add(tf.keras.layers.Conv2D(32, (3, 3)))
# model.add(tf.keras.layers.Flatten())
# model.add(tf.keras.layers.Dense(1))
SEQUENTIAL_CNN = {
"layers": [
{
"class_name": "Conv2D",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
"dtype": "float32",
"filters": 32,
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"kernel_size": (3, 3),
"name": "conv2d_1",
"padding": "valid",
"strides": (1, 1),
"trainable": True,
"use_bias": True,
},
},
{
"class_name": "Flatten",
"config": {
"data_format": "channels_last",
"dtype": "float32",
"name": "flatten_1",
"trainable": True,
},
},
{
"class_name": "Dense",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense_6",
"trainable": True,
"units": 1,
"use_bias": True,
},
},
],
"name": "sequential_4",
}
# model = tf.keras.Sequential()
# model.add(tf.keras.layers.LSTM(10))
# model.add(tf.keras.layers.Dense(1))
SEQUENTIAL_LSTM = {
"layers": [
{
"class_name": "LSTM",
"config": {
"activation": "tanh",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dropout": 0.0,
"dtype": "float32",
"go_backwards": False,
"implementation": 2,
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "lstm",
"recurrent_activation": "sigmoid",
"recurrent_constraint": None,
"recurrent_dropout": 0.0,
"recurrent_initializer": {
"class_name": "Orthogonal",
"config": {"gain": 1.0, "seed": None},
},
"recurrent_regularizer": None,
"return_sequences": False,
"return_state": False,
"stateful": False,
"time_major": False,
"trainable": True,
"unit_forget_bias": True,
"units": 10,
"unroll": False,
"use_bias": True,
},
},
{
"class_name": "Dense",
"config": {
"activation": "linear",
"activity_regularizer": None,
"bias_constraint": None,
"bias_initializer": {"class_name": "Zeros", "config": {}},
"bias_regularizer": None,
"dtype": "float32",
"kernel_constraint": None,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {"seed": None},
},
"kernel_regularizer": None,
"name": "dense_4",
"trainable": True,
"units": 1,
"use_bias": True,
},
},
],
"name": "sequential_2",
}
| tf-keras/tf_keras/tests/get_config_samples.py/0 | {
"file_path": "tf-keras/tf_keras/tests/get_config_samples.py",
"repo_id": "tf-keras",
"token_count": 9068
} | 195 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.engine import sequential
from tf_keras.engine import training
from tf_keras.layers import core
from tf_keras.layers.normalization import batch_normalization_v1
from tf_keras.testing_infra import test_combinations
# isort: off
from tensorflow.python.trackable import data_structures
from tensorflow.python.checkpoint import checkpoint as util
class HasList(training.Model):
def __init__(self):
super().__init__()
self.layer_list = tf.__internal__.tracking.wrap([core.Dense(3)])
self.layer_list.append(core.Dense(4))
self.layer_list.extend(
[core.Dense(5), core.Dense(6, kernel_regularizer=tf.reduce_sum)]
)
self.layer_list += [
core.Dense(7, bias_regularizer=tf.reduce_sum),
core.Dense(8),
]
self.layer_list += tf.__internal__.tracking.wrap(
[core.Dense(9)]
) + tf.__internal__.tracking.wrap([core.Dense(10)])
self.layer_list.extend(
tf.__internal__.tracking.wrap(
list([core.Dense(11)]) + [core.Dense(12)]
)
)
self.layers_with_updates = tf.__internal__.tracking.wrap(
[batch_normalization_v1.BatchNormalization()]
)
def call(self, x):
aggregation = 0.0
for l in self.layer_list:
x = l(x)
aggregation += tf.reduce_sum(x)
(bn,) = self.layers_with_updates
return bn(x) / aggregation
class ListTests(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testTracking(self):
with self.test_session():
model = HasList()
output = model(tf.ones([32, 2]))
self.assertAllEqual([32, 12], output.shape)
self.assertEqual(11, len(model.layers))
self.assertEqual(10, len(model.layer_list.layers))
self.assertEqual(
len(model.layers),
len(model.layer_list.layers + model.layers_with_updates),
)
for index in range(10):
self.assertEqual(
3 + index, model.layer_list.layers[index].units
)
children = model._trackable_children()
self.assertLen(children, 2)
self.assertIs(model.layer_list, children["layer_list"])
self.assertIs(
model.layers_with_updates, children["layers_with_updates"]
)
self.assertLen(children["layer_list"]._trackable_children(), 10)
self.evaluate([v.initializer for v in model.variables])
self.evaluate(
model.variables[0].assign([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
)
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(tf.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
self.evaluate(model.variables[0]),
)
v = tf.Variable(1.0)
model.var_list = [v]
self.assertTrue(any(v is t for t in model.variables))
self.assertTrue(any(v is t for t in model.trainable_variables))
self.assertFalse(any(v is t for t in model.non_trainable_variables))
self.assertTrue(
any(
model.layer_list[0].trainable_weights[0] is t
for t in model.trainable_weights
)
)
def testSubModelTracking(self):
model = training.Model()
model.v = tf.Variable(1.0)
self.assertIn(model.v, model.trainable_weights)
model2 = training.Model()
model2.m = [model]
self.assertIn(model.v, model2.trainable_weights)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super().__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(tf.ones([1, 2]))
model2.m = [model]
self.assertIn(layer.kernel, model2.trainable_weights)
def testLayerTrackedThroughSequential(self):
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def ffnet(layer_sizes, name):
ff = sequential.Sequential(name=name)
for i, width in enumerate(layer_sizes):
ff.add(
core.Dense(
width,
activation=(
"relu" if i < len(layer_sizes) - 1 else None
),
)
)
return ff
class MyModel2(training.Model):
def __init__(self, config, name="my_model_2"):
super().__init__(name=name)
self._num_tokens = config.num_tokens
# list of sub-models
self._ffnet = [
ffnet(config.module_layers + (self._num_tokens,), "ff")
]
def null_input(self):
return tf.zeros([1, self._num_tokens], dtype=tf.float32)
def call(self, input_, module_index=None):
return self._ffnet[0](input_)
m2 = MyModel2(AttrDict(num_tokens=5, module_layers=(50, 30)))
# Construct
m2(m2.null_input())
self.assertLen(m2.trainable_variables, 6)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testUpdatesForwarded(self):
model = HasList()
model_input = tf.ones([32, 2])
model(model_input)
if tf.executing_eagerly():
self.assertEqual(0, len(model.updates))
else:
self.assertGreater(len(model.layers_with_updates[0].updates), 0)
self.assertEqual(
set(model.layers_with_updates[0].updates), set(model.updates)
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testLossesForwarded(self):
model = HasList()
model_input = tf.ones([32, 2])
model(model_input)
self.assertEqual(2, len(model.losses))
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super().__init__()
self.l1 = []
self.l2 = []
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1.append(first_layer)
second_layer = HasEqualContainers()
model.l2.append(second_layer)
self.assertEqual([first_layer, second_layer], model.layers)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testTensorConversion(self):
class ListToTensor(training.Model):
def __init__(self):
super().__init__()
self.l = [1.0, 2.0, 3.0]
self.assertAllEqual(
[1.0, 2.0, 3.0], self.evaluate(tf.constant(ListToTensor().l))
)
self.assertAllEqual(
[1.0, 2.0, 3.0],
self.evaluate(tf.raw_ops.Pack(values=ListToTensor().l)),
)
class ListWrapperTest(tf.test.TestCase):
def testLayerCollectionWithExternalMutation(self):
l = []
l_wrapper = tf.__internal__.tracking.wrap(l)
layer = core.Dense(1)
l.append(layer)
self.assertEqual([layer], l_wrapper.layers)
class HasMapping(training.Model):
def __init__(self):
super().__init__()
self.layer_dict = tf.__internal__.tracking.wrap(
dict(output=core.Dense(7))
)
self.layer_dict["norm"] = tf.__internal__.tracking.wrap([])
self.layer_dict["dense"] = tf.__internal__.tracking.wrap([])
self.layer_dict["dense"].extend(
[core.Dense(5), core.Dense(6, kernel_regularizer=tf.reduce_sum)]
)
self.layer_dict["norm"].append(
batch_normalization_v1.BatchNormalization()
)
self.layer_dict["norm"].append(
batch_normalization_v1.BatchNormalization()
)
def call(self, x):
aggregation = 0.0
for norm, dense in zip(
self.layer_dict["norm"], self.layer_dict["dense"]
):
x = norm(dense(x))
aggregation += tf.reduce_sum(x)
return self.layer_dict["output"](x) / aggregation
class MappingTests(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testTracking(self):
with self.test_session():
model = HasMapping()
output = model(tf.ones([32, 2]))
self.assertAllEqual([32, 7], output.shape.as_list())
self.assertEqual(5, len(model.layers))
self.assertEqual(len(model.layers), len(model.layer_dict.layers))
self.assertLen(model._trackable_children(), 1)
self.assertIs(
model.layer_dict, model._trackable_children()["layer_dict"]
)
self.evaluate([v.initializer for v in model.variables])
test_var = model.layer_dict["output"].kernel
self.evaluate(test_var.assign(tf.ones([6, 7])))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(test_var.assign(tf.zeros([6, 7])))
model.load_weights(save_path)
self.assertAllEqual(numpy.ones([6, 7]), self.evaluate(test_var))
def testLayerCollectionWithExternalMutation(self):
d = {}
root = tf.Module()
root.wrapper = d
self.assertEqual([], root.wrapper.layers)
self.assertEqual([], root.wrapper.trainable_weights)
layer1 = core.Dense(1)
layer2 = core.Dense(1)
d["a"] = layer1
d["b"] = layer2
self.assertEqual([layer1, layer2], root.wrapper.layers)
# The layers have still not created variables
self.assertEqual([], root.wrapper.trainable_weights)
def testDictWrapperBadKeys(self):
a = tf.Module()
a.d = {}
a.d[1] = tf.__internal__.tracking.wrap([])
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegex(ValueError, "non-string key"):
model.save_weights(save_path)
def testDictWrapperNoDependency(self):
a = tf.Module()
a.d = data_structures.NoDependency({})
a.d[1] = [3]
self.assertEqual([a], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonStringKeyNotTrackableValue(self):
a = tf.Module()
a.d = {}
a.d["a"] = [3]
a.d[1] = data_structures.NoDependency([3])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonAppendNotTrackable(self):
# Non-append mutations (deleting or overwriting values) are OK when the
# values aren't tracked.
a = tf.Module()
a.d = {}
a.d["a"] = [3]
a.d[1] = 3
a.d[1] = 2
self.assertEqual(2, a.d[1])
del a.d[1]
a.d[2] = data_structures.NoDependency(tf.Module())
second = tf.Module()
a.d[2] = data_structures.NoDependency(second)
self.assertIs(second, a.d[2])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testPopNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = []
model.d.pop("a")
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegex(ValueError, "Unable to save"):
model.save_weights(save_path)
def testExternalModificationNoSave(self):
model = training.Model()
external_reference = {}
model.d = external_reference
external_reference["a"] = []
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegex(ValueError, "modified outside the wrapper"):
model.save_weights(save_path)
def testOverwriteCanStillSave(self):
model = training.Model()
model.d = {}
model.d["a"] = {}
model.d["a"] = {}
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
def testIter(self):
model = training.Model()
model.d = {1: 3}
model.d[1] = 3
self.assertEqual([1], list(model.d))
new_dict = {}
# This update() is super tricky. If the dict wrapper subclasses dict,
# CPython will access its storage directly instead of calling any
# methods/properties on the object. So the options are either not to
# subclass dict (in which case update will call normal iter methods, but
# the object won't pass isinstance checks) or to subclass dict and keep
# that storage updated (no shadowing all its methods like ListWrapper).
new_dict.update(model.d)
self.assertEqual({1: 3}, new_dict)
class HasTuple(training.Model):
def __init__(self):
super().__init__()
self.layer_list = (
core.Dense(3),
core.Dense(4),
core.Dense(5, kernel_regularizer=tf.reduce_sum),
)
self.layers_with_updates = (
batch_normalization_v1.BatchNormalization(),
)
def call(self, x):
aggregation = 0.0
for l in self.layer_list:
x = l(x)
aggregation += tf.reduce_sum(x)
(bn,) = self.layers_with_updates
return bn(x) / aggregation
class TupleTests(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testTracking(self):
with self.test_session():
model = HasTuple()
output = model(tf.ones([32, 2]))
self.assertAllEqual([32, 5], output.shape.as_list())
self.assertLen(model.layers, 4)
self.assertLen(model.layer_list.layers, 3)
self.assertEqual(
len(model.layers),
len(tuple(model.layer_list.layers) + model.layers_with_updates),
)
self.assertEqual(3, model.layer_list.layers[0].units)
self.assertEqual(4, model.layer_list.layers[1].units)
self.assertEqual(5, model.layer_list.layers[2].units)
self.assertLen(model._trackable_children(), 2)
self.assertIs(
model.layer_list, model._trackable_children()["layer_list"]
)
self.assertIs(
model.layers_with_updates,
model._trackable_children()["layers_with_updates"],
)
self.assertLen(model.layer_list._trackable_children(), 3)
self.evaluate([v.initializer for v in model.variables])
self.evaluate(
model.variables[0].assign([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
)
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(tf.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
self.evaluate(model.variables[0]),
)
v = tf.Variable(1.0)
model.var_list = (v,)
self.assertIn(id(v), [id(obj) for obj in model.variables])
self.assertIn(id(v), [id(obj) for obj in model.trainable_variables])
self.assertNotIn(
id(v), [id(obj) for obj in model.non_trainable_variables]
)
self.assertIn(
id(model.layer_list[0].trainable_weights[0]),
[id(obj) for obj in model.trainable_weights],
)
@parameterized.named_parameters(
("Module", tf.Module),
("Model", training.Model),
)
def testSubModelTracking(self, module_subclass):
model = module_subclass()
model.v = tf.Variable(1.0)
self.assertIn(model.v, model.trainable_variables)
model2 = module_subclass()
model2.m = (model,)
self.assertIn(model.v, model2.trainable_variables)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super().__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(tf.ones([1, 2]))
model2.m = (model,)
self.assertIn(layer.kernel, model2.trainable_weights)
def testUpdatesForwarded(self):
with tf.Graph().as_default():
model = HasTuple()
model_input = tf.ones([32, 2])
model(model_input)
self.assertNotEmpty(model.layers_with_updates[0].updates)
self.assertEqual(
set(model.layers_with_updates[0].updates), set(model.updates)
)
model = HasTuple()
model_input = tf.ones([32, 2])
model(model_input)
self.assertEmpty(model.updates)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testLossesForwarded(self):
model = HasTuple()
model_input = tf.ones([32, 2])
model(model_input)
self.assertLen(model.losses, 1)
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super().__init__()
self.l1 = ()
self.l2 = ()
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1 = (first_layer,)
second_layer = HasEqualContainers()
model.l2 = (second_layer,)
self.assertEqual((first_layer,), model.l1)
d = {model.l1: 1, model.l2: 2}
self.assertEqual(1, d[model.l1])
self.assertEqual(1, d[(first_layer,)])
self.assertEqual(2, d[model.l2])
self.assertEqual(2, d[(second_layer,)])
self.assertEqual([first_layer, second_layer], model.layers)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testTensorConversion(self):
class TupleToTensor(training.Model):
def __init__(self):
super().__init__()
self.l = (1.0, 2.0, 3.0)
self.assertAllEqual(
(1.0, 2.0, 3.0), self.evaluate(tf.constant(TupleToTensor().l))
)
self.assertAllEqual(
(1.0, 2.0, 3.0),
self.evaluate(tf.raw_ops.Pack(values=TupleToTensor().l)),
)
class InterfaceTests(test_combinations.TestCase):
def testNoDependency(self):
root = tf.Module()
hasdep = tf.Module()
root.hasdep = hasdep
nodep = tf.Module()
root.nodep = data_structures.NoDependency(nodep)
self.assertLen(root._trackable_children(), 1)
self.assertIs(root._trackable_children()["hasdep"], root.hasdep)
self.assertIs(root.hasdep, hasdep)
self.assertIs(root.nodep, nodep)
class NoDependencyModel(training.Model):
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self):
super().__init__()
self.a = []
self.b = tf.Module()
nodeps = NoDependencyModel()
self.assertEqual([nodeps], util.list_objects(nodeps))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testDictionariesBasic(self):
a = training.Model()
b = training.Model()
a.attribute = {"b": b}
c = training.Model()
a.attribute["c"] = []
a.attribute["c"].append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
self.assertIs(b, a.attribute["b"])
self.assertEqual({"b", "c"}, a.attribute._trackable_children().keys())
self.assertEqual([b, c], a.layers)
self.assertEqual([b, c], a.attribute.layers)
self.assertEqual([c], a.attribute["c"].layers)
checkpoint = tf.train.Checkpoint(a=a)
save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
with self.cached_session():
checkpoint.restore(
save_path
).assert_consumed().initialize_or_restore()
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testNoDepList(self):
a = training.Model()
a.l1 = data_structures.NoDependency([])
a.l1.insert(1, 0)
self.assertIsInstance(a.l1, list)
checkpoint = tf.train.Checkpoint(a=a)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
a.l2 = []
a.l2.insert(1, tf.Module())
with self.assertRaisesRegex(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| tf-keras/tf_keras/tests/tracking_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/tracking_test.py",
"repo_id": "tf-keras",
"token_count": 11366
} | 196 |
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for audio_dataset when tfio is available."""
import os
import shutil
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import audio_dataset
@test_utils.run_v2_only
class AudioDatasetFromDirectoryWithTfioTest(test_combinations.TestCase):
def _get_audio_samples(self, count=16, different_sequence_lengths=False):
sequence_length = 30
num_channels = 1
audio_samples = []
for _ in range(count):
if different_sequence_lengths:
random_sequence_length = np.random.randint(
10, sequence_length + 1
)
audio = np.random.random((random_sequence_length, num_channels))
else:
audio = np.random.random((sequence_length, num_channels))
audio_samples.append(tf.audio.encode_wav(audio, 1000))
return audio_samples
def _prepare_directory(
self,
num_classes=2,
nested_dirs=False,
count=16,
different_sequence_lengths=False,
):
# Get a unique temp directory
temp_dir = os.path.join(
self.get_temp_dir(), str(np.random.randint(1e6))
)
os.mkdir(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save audio samples to the paths
i = 0
for audio in self._get_audio_samples(
count=count, different_sequence_lengths=different_sequence_lengths
):
path = paths[i % len(paths)]
ext = "wav"
filename = os.path.join(path, f"audio_{i}.{ext}")
with open(os.path.join(temp_dir, filename), "wb") as f:
f.write(audio.numpy())
i += 1
return temp_dir
def test_audio_dataset_from_directory_standalone_with_resampling(self):
# Test retrieving audio samples withouts labels from a directory and its
# subdirs where we double the sampling rate.
# Save a few extra audio in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, audio in enumerate(self._get_audio_samples(3)):
filename = f"audio_{i}.wav"
with open(os.path.join(directory, filename), "wb") as f:
f.write(audio.numpy())
dataset = audio_dataset.audio_dataset_from_directory(
directory,
batch_size=5,
output_sequence_length=30,
labels=None,
sampling_rate=2000, # Twice the original sample rate.
)
batch = next(iter(dataset))
# We return plain audio. Expect twice as many samples now.
self.assertEqual(batch.shape, (5, 60, 1))
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
if __name__ == "__main__":
try:
import tensorflow_io # noqa: F401
# Only run these tests if tensorflow_io is installed.
tf.test.main()
except ImportError:
pass
| tf-keras/tf_keras/utils/audio_dataset_with_tfio_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/audio_dataset_with_tfio_test.py",
"repo_id": "tf-keras",
"token_count": 2114
} | 197 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_dataset."""
import os
import shutil
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import image_dataset
from tf_keras.utils import image_utils
try:
import PIL
except ImportError:
PIL = None
@test_utils.run_v2_only
class ImageDatasetFromDirectoryTest(test_combinations.TestCase):
def _get_images(self, count=16, color_mode="rgb"):
width = height = 24
imgs = []
for _ in range(count):
if color_mode == "grayscale":
img = np.random.randint(0, 256, size=(height, width, 1))
elif color_mode == "rgba":
img = np.random.randint(0, 256, size=(height, width, 4))
else:
img = np.random.randint(0, 256, size=(height, width, 3))
img = image_utils.array_to_img(img)
imgs.append(img)
return imgs
def _prepare_directory(
self,
num_classes=2,
grayscale=False,
nested_dirs=False,
color_mode="rgb",
count=16,
):
# Get a unique temp directory
temp_dir = os.path.join(
self.get_temp_dir(), str(np.random.randint(1e6))
)
os.mkdir(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save images to the paths
i = 0
for img in self._get_images(color_mode=color_mode, count=count):
path = paths[i % len(paths)]
if color_mode == "rgb":
ext = "jpg"
else:
ext = "png"
filename = os.path.join(path, f"image_{i}.{ext}")
img.save(os.path.join(temp_dir, filename))
i += 1
return temp_dir
def test_image_dataset_from_directory_standalone(self):
# Test retrieving images without labels from a directory and its
# subdirs.
if PIL is None:
return # Skip test if PIL is not available.
# Save a few extra images in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, img in enumerate(self._get_images(3)):
filename = f"image_{i}.jpg"
img.save(os.path.join(directory, filename))
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=5, image_size=(18, 18), labels=None
)
batch = next(iter(dataset))
# We return plain images
self.assertEqual(batch.shape, (5, 18, 18, 3))
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_image_dataset_from_directory_binary(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="binary"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_static_shape_in_graph(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
test_case = self
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), [None, 18, 18, 3])
symbolic_fn(dataset)
def test_sample_count(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_image_dataset_from_directory_multiclass(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8, 18, 18, 3))
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_image_dataset_from_directory_color_modes(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=4, color_mode="rgba")
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode="rgba"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 4))
self.assertEqual(batch[0].dtype.name, "float32")
directory = self._prepare_directory(
num_classes=4, color_mode="grayscale"
)
dataset = image_dataset.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode="grayscale"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 1))
self.assertEqual(batch[0].dtype.name, "float32")
def test_image_dataset_from_directory_validation_split(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=10)
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 18, 18, 3))
train_dataset, val_dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="both",
seed=1337,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 18, 18, 3))
def test_image_dataset_from_directory_manual_labels(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1],
shuffle=False,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_image_dataset_from_directory_follow_links(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_image_dataset_from_directory_no_images(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No images found."):
_ = image_dataset.image_dataset_from_directory(directory)
def test_image_dataset_from_directory_crop_to_aspect_ratio(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=5)
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=5,
image_size=(18, 18),
crop_to_aspect_ratio=True,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (5, 18, 18, 3))
def test_image_dataset_from_directory_errors(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = image_dataset.image_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = image_dataset.image_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(ValueError, "`color_mode` must be one of"):
_ = image_dataset.image_dataset_from_directory(
directory, color_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = image_dataset.image_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = image_dataset.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = image_dataset.image_dataset_from_directory(
directory, class_names=["class_0", "class_2"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = image_dataset.image_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = image_dataset.image_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_image_dataset_from_directory_not_batched(self):
if PIL is None:
return # Skip test if PIL is not available.
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset.image_dataset_from_directory(
directory,
batch_size=None,
image_size=(18, 18),
label_mode=None,
shuffle=False,
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 3)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/image_dataset_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/image_dataset_test.py",
"repo_id": "tf-keras",
"token_count": 7928
} | 198 |
FROM mcr.microsoft.com/vscode/devcontainers/python:3.10
COPY setup.sh /setup.sh
| autokeras/.devcontainer/Dockerfile/0 | {
"file_path": "autokeras/.devcontainer/Dockerfile",
"repo_id": "autokeras",
"token_count": 34
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import tensorflow as tf
from autokeras.engine import adapter as adapter_module
class HeadAdapter(adapter_module.Adapter):
def __init__(self, name, **kwargs):
super().__init__(**kwargs)
self.name = name
def check(self, dataset):
supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series)
if not isinstance(dataset, supported_types):
raise TypeError(
f"Expect the target data of {self.name} to be tf.data.Dataset,"
f" np.ndarray, pd.DataFrame or pd.Series, "
f"but got {type(dataset)}."
)
def convert_to_dataset(self, dataset, batch_size):
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
if isinstance(dataset, pd.Series):
dataset = dataset.values
return super().convert_to_dataset(dataset, batch_size)
class ClassificationAdapter(HeadAdapter):
pass
class RegressionAdapter(HeadAdapter):
pass
class SegmentationHeadAdapter(ClassificationAdapter):
pass
| autokeras/autokeras/adapters/output_adapters.py/0 | {
"file_path": "autokeras/autokeras/adapters/output_adapters.py",
"repo_id": "autokeras",
"token_count": 622
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import tensorflow as tf
from tensorflow import nest
from tensorflow.keras import layers
from autokeras.engine import block as block_module
from autokeras.utils import layer_utils
from autokeras.utils import utils
REDUCTION_TYPE = "reduction_type"
FLATTEN = "flatten"
GLOBAL_MAX = "global_max"
GLOBAL_AVG = "global_avg"
def shape_compatible(shape1, shape2):
if len(shape1) != len(shape2):
return False
# TODO: If they can be the same after passing through any layer,
# they are compatible. e.g. (32, 32, 3), (16, 16, 2) are compatible
return shape1[:-1] == shape2[:-1]
class Merge(block_module.Block):
"""Merge block to merge multiple nodes into one.
# Arguments
merge_type: String. 'add' or 'concatenate'. If left unspecified, it will
be tuned automatically.
"""
def __init__(self, merge_type: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.merge_type = merge_type
def get_config(self):
config = super().get_config()
config.update({"merge_type": self.merge_type})
return config
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
if len(inputs) == 1:
return inputs
if not all(
[
shape_compatible(input_node.shape, inputs[0].shape)
for input_node in inputs
]
):
inputs = [Flatten().build(hp, input_node) for input_node in inputs]
# TODO: Even inputs have different shape[-1], they can still be Add(
# ) after another layer. Check if the inputs are all of the same
# shape
if self._inputs_same_shape(inputs):
merge_type = self.merge_type or hp.Choice(
"merge_type", ["add", "concatenate"], default="add"
)
if merge_type == "add":
return layers.Add()(inputs)
return layers.Concatenate()(inputs)
def _inputs_same_shape(self, inputs):
return all(
input_node.shape.as_list() == inputs[0].shape.as_list()
for input_node in inputs
)
class Flatten(block_module.Block):
"""Flatten the input tensor with Keras Flatten layer."""
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
if len(input_node.shape) > 2:
return layers.Flatten()(input_node)
return input_node
class Reduction(block_module.Block):
def __init__(self, reduction_type: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.reduction_type = reduction_type
def get_config(self):
config = super().get_config()
config.update({REDUCTION_TYPE: self.reduction_type})
return config
def global_max(self, input_node):
raise NotImplementedError
def global_avg(self, input_node):
raise NotImplementedError
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
# No need to reduce.
if len(output_node.shape) <= 2:
return output_node
if self.reduction_type is not None:
return self._build_block(hp, output_node, self.reduction_type)
reduction_type = hp.Choice(
REDUCTION_TYPE, [FLATTEN, GLOBAL_MAX, GLOBAL_AVG]
)
with hp.conditional_scope(REDUCTION_TYPE, [reduction_type]):
return self._build_block(hp, output_node, reduction_type)
def _build_block(self, hp, output_node, reduction_type):
if reduction_type == FLATTEN:
output_node = Flatten().build(hp, output_node)
elif reduction_type == GLOBAL_MAX:
output_node = self.global_max(output_node)
elif reduction_type == GLOBAL_AVG:
output_node = self.global_avg(output_node)
return output_node
class SpatialReduction(Reduction):
"""Reduce the dimension of a spatial tensor, e.g. image, to a vector.
# Arguments
reduction_type: String. 'flatten', 'global_max' or 'global_avg'.
If left unspecified, it will be tuned automatically.
"""
def __init__(self, reduction_type: Optional[str] = None, **kwargs):
super().__init__(reduction_type, **kwargs)
def global_max(self, input_node):
return layer_utils.get_global_max_pooling(input_node.shape)()(
input_node
)
def global_avg(self, input_node):
return layer_utils.get_global_average_pooling(input_node.shape)()(
input_node
)
class TemporalReduction(Reduction):
"""Reduce the dim of a temporal tensor, e.g. output of RNN, to a vector.
# Arguments
reduction_type: String. 'flatten', 'global_max' or 'global_avg'. If left
unspecified, it will be tuned automatically.
"""
def __init__(self, reduction_type: Optional[str] = None, **kwargs):
super().__init__(reduction_type, **kwargs)
def global_max(self, input_node):
return tf.math.reduce_max(input_node, axis=-2)
def global_avg(self, input_node):
return tf.math.reduce_mean(input_node, axis=-2)
| autokeras/autokeras/blocks/reduction.py/0 | {
"file_path": "autokeras/autokeras/blocks/reduction.py",
"repo_id": "autokeras",
"token_count": 2448
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from typing import List
from typing import Optional
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
from autokeras import adapters
from autokeras import analysers
from autokeras import blocks
from autokeras import hyper_preprocessors as hpps_module
from autokeras import keras_layers
from autokeras import preprocessors
from autokeras.engine import io_hypermodel
from autokeras.engine import node as node_module
from autokeras.utils import utils
def serialize(obj):
return utils.serialize_keras_object(obj)
def deserialize(config, custom_objects=None):
return utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="nodes",
)
class Input(node_module.Node, io_hypermodel.IOHyperModel):
"""Input node for tensor data.
The data should be numpy.ndarray or tf.data.Dataset.
# Arguments
name: String. The name of the input node. If unspecified, it will be set
automatically with the class name.
"""
def __init__(self, name: Optional[str] = None, **kwargs):
super().__init__(name=name, **kwargs)
def build_node(self, hp):
return keras.Input(shape=self.shape, dtype=self.dtype)
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
return keras_layers.CastToFloat32()(input_node)
def get_adapter(self):
return adapters.InputAdapter()
def get_analyser(self):
return analysers.InputAnalyser()
def get_block(self):
return blocks.GeneralBlock()
def get_hyper_preprocessors(self):
return []
class ImageInput(Input):
"""Input node for image data.
The input data should be numpy.ndarray or tf.data.Dataset. The shape of the
data should be should be (samples, width, height) or (samples, width,
height, channels).
# Arguments
name: String. The name of the input node. If unspecified, it will be set
automatically with the class name.
"""
def __init__(self, name: Optional[str] = None, **kwargs):
super().__init__(name=name, **kwargs)
def build(self, hp, inputs=None):
inputs = super().build(hp, inputs)
output_node = nest.flatten(inputs)[0]
if len(output_node.shape) == 3:
output_node = keras_layers.ExpandLastDim()(output_node)
return output_node
def get_adapter(self):
return adapters.ImageAdapter()
def get_analyser(self):
return analysers.ImageAnalyser()
def get_block(self):
return blocks.ImageBlock()
class TextInput(Input):
"""Input node for text data.
The input data should be numpy.ndarray or tf.data.Dataset. The data should
be one-dimensional. Each element in the data should be a string which is a
full sentence.
# Arguments
name: String. The name of the input node. If unspecified, it will be set
automatically with the class name.
"""
def __init__(self, name: Optional[str] = None, **kwargs):
super().__init__(name=name, **kwargs)
def build_node(self, hp):
return keras.Input(shape=self.shape, dtype=tf.string)
def build(self, hp, inputs=None):
output_node = nest.flatten(inputs)[0]
if len(output_node.shape) == 1:
output_node = keras_layers.ExpandLastDim()(output_node)
return output_node
def get_adapter(self):
return adapters.TextAdapter()
def get_analyser(self):
return analysers.TextAnalyser()
def get_block(self):
return blocks.TextBlock()
class StructuredDataInput(Input):
"""Input node for structured data.
The input data should be numpy.ndarray, pandas.DataFrame or
tensorflow.Dataset. The data should be two-dimensional with numerical or
categorical values.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the
data. Defaults to None. If None, it will be obtained from the
header of the csv file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should
either be 'numerical' or 'categorical', indicating the type of that
column. Defaults to None. If not None, the column_names need to be
specified. If None, it will be inferred from the data. A column will
be judged as categorical if the number of different values is less
than 5% of the number of instances.
name: String. The name of the input node. If unspecified, it will be set
automatically with the class name.
"""
def __init__(
self,
column_names: Optional[List[str]] = None,
column_types: Optional[Dict[str, str]] = None,
name: Optional[str] = None,
**kwargs
):
super().__init__(name=name, **kwargs)
self.column_names = column_names
self.column_types = column_types
def get_config(self):
config = super().get_config()
config.update(
{
"column_names": self.column_names,
"column_types": self.column_types,
}
)
return config
def get_adapter(self):
return adapters.StructuredDataAdapter()
def get_analyser(self):
return analysers.StructuredDataAnalyser(
self.column_names, self.column_types
)
def get_block(self):
return blocks.StructuredDataBlock()
def config_from_analyser(self, analyser):
super().config_from_analyser(analyser)
self.column_names = analyser.column_names
# Analyser keeps the specified ones and infer the missing ones.
self.column_types = analyser.column_types
def build(self, hp, inputs=None):
return inputs
class TimeseriesInput(StructuredDataInput):
"""Input node for timeseries data.
# Arguments
lookback: Int. The range of history steps to consider for each
prediction. For example, if lookback=n, the data in the range of [i
- n, i - 1] is used to predict the value of step i. If unspecified,
it will be tuned automatically.
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the
data. Defaults to None. If None, it will be obtained from the
header of the csv file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should
either be 'numerical' or 'categorical', indicating the type of that
column. Defaults to None. If not None, the column_names need to be
specified. If None, it will be inferred from the data. A column
will be judged as categorical if the number of different values is
less than 5% of the number of instances.
name: String. The name of the input node. If unspecified, it will be set
automatically with the class name.
"""
def __init__(
self,
lookback: Optional[int] = None,
column_names: Optional[List[str]] = None,
column_types: Optional[Dict[str, str]] = None,
name: Optional[str] = None,
**kwargs
):
super().__init__(
column_names=column_names,
column_types=column_types,
name=name,
**kwargs
)
self.lookback = lookback
def get_config(self):
config = super().get_config()
config.update({"lookback": self.lookback})
return config
def get_adapter(self):
return adapters.TimeseriesAdapter()
def get_analyser(self):
return analysers.TimeseriesAnalyser(
column_names=self.column_names, column_types=self.column_types
)
def get_block(self):
return blocks.TimeseriesBlock()
def config_from_analyser(self, analyser):
super().config_from_analyser(analyser)
def get_hyper_preprocessors(self):
hyper_preprocessors = []
if self.column_names:
hyper_preprocessors.append(
hpps_module.DefaultHyperPreprocessor(
preprocessors.CategoricalToNumericalPreprocessor(
column_names=self.column_names,
column_types=self.column_types,
)
)
)
hyper_preprocessors.append(
hpps_module.DefaultHyperPreprocessor(
preprocessors.SlidingWindow(
lookback=self.lookback, batch_size=self.batch_size
)
)
)
return hyper_preprocessors
| autokeras/autokeras/nodes.py/0 | {
"file_path": "autokeras/autokeras/nodes.py",
"repo_id": "autokeras",
"token_count": 3771
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import nest
from autokeras import graph
from autokeras.prototype import graph_state
from autokeras.prototype import pipeline as pipeline_module
class Graph(graph.Graph):
def __init__(self, inputs=None, outputs=None, **kwargs):
super().__init__(inputs, outputs, **kwargs)
def build(self, hp):
"""Build the HyperModel into a Keras Model."""
state = graph_state.init_state()
self.compile()
keras_nodes = {}
# keras_input_nodes = []
# Preparing the inputs of the pipeline.
# for node in self.inputs:
# node_id = self._node_to_id[node]
# input_node = node.build_node(hp)
# output_node = node.build(hp, input_node)
# keras_input_nodes.append(input_node)
# keras_nodes[node_id] = output_node
# Connecting through the blocks.
# Don't check the block type to deal with the output since the block has
# sub blocks of different types. The difference should all be handled in
# block._build_wrapper().
for block in self.blocks:
temp_inputs = [
keras_nodes[self._node_to_id[input_node]]
for input_node in block.inputs
]
outputs = block.build(hp, inputs=temp_inputs)
outputs = nest.flatten(outputs)
for output_node, real_output_node in zip(block.outputs, outputs):
keras_nodes[self._node_to_id[output_node]] = real_output_node
for output_node in self.outputs:
node = keras_nodes[self._node_to_id[output_node]]
state.register_outputs(node)
model = state.build_model()
self._compile_keras_model(hp, model)
pipeline = pipeline_module.Pipeline.from_state(graph_state.get_state())
return pipeline
| autokeras/autokeras/prototype/graph.py/0 | {
"file_path": "autokeras/autokeras/prototype/graph.py",
"repo_id": "autokeras",
"token_count": 961
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras import layers
def get_global_average_pooling(shape):
return [
layers.GlobalAveragePooling1D,
layers.GlobalAveragePooling2D,
layers.GlobalAveragePooling3D,
][len(shape) - 3]
def get_global_max_pooling(shape):
return [
layers.GlobalMaxPool1D,
layers.GlobalMaxPool2D,
layers.GlobalMaxPool3D,
][len(shape) - 3]
def get_max_pooling(shape):
return [
layers.MaxPool1D,
layers.MaxPool2D,
layers.MaxPool3D,
][len(shape) - 3]
def get_conv(shape):
return [layers.Conv1D, layers.Conv2D, layers.Conv3D][len(shape) - 3]
def get_sep_conv(shape):
return [
layers.SeparableConv1D,
layers.SeparableConv2D,
layers.Conv3D,
][len(shape) - 3]
| autokeras/autokeras/utils/layer_utils.py/0 | {
"file_path": "autokeras/autokeras/utils/layer_utils.py",
"repo_id": "autokeras",
"token_count": 527
} | 5 |
# Using Autokeras via Docker
This directory contains `Dockerfile` to make it easy to get up and running with
Autokeras via [Docker](http://www.docker.com/).
## Installing Docker
General installation instructions are
[on the Docker site](https://docs.docker.com/installation/), but we give some
quick links here:
* [OSX](https://docs.docker.com/installation/mac/): [docker toolbox](https://www.docker.com/toolbox)
* [ubuntu](https://docs.docker.com/installation/ubuntulinux/)
## Running the container
We are using `Makefile` to simplify docker commands within make commands.
Build the container and start a Jupyter Notebook
$ make notebook
Build the container and start an iPython shell
$ make ipython
Build the container and start a bash
$ make bash
For GPU support install NVIDIA drivers (ideally latest) and
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker). Run using
$ make notebook GPU=all # or [ipython, bash]
Mount a volume for external data sets
$ make DATA=~/mydata
Prints all make tasks
$ make help
Note: If you would have a problem running nvidia-docker you may try the old way
we have used. But it is not recommended. If you find a bug in the nvidia-docker report
it there please and try using the nvidia-docker as described above.
$ export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
$ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
$ docker run -it -p 8888:8888 $CUDA_SO $DEVICES gcr.io/tensorflow/tensorflow:latest-gpu
| autokeras/docker/README.md/0 | {
"file_path": "autokeras/docker/README.md",
"repo_id": "autokeras",
"token_count": 490
} | 6 |
<jupyter_start><jupyter_code>!pip install autokeras
import pandas as pd
import tensorflow as tf
import autokeras as ak<jupyter_output><empty_output><jupyter_text>To make this tutorial easy to follow, we use the UCI Airquality dataset, and try toforecast the AH value at the different timesteps. Some basic preprocessing has alsobeen performed on the dataset as it required cleanup. A Simple ExampleThe first step is to prepare your data. Here we use the [UCI Airqualitydataset](https://archive.ics.uci.edu/ml/datasets/Air+Quality) as an example.<jupyter_code>dataset = tf.keras.utils.get_file(
fname="AirQualityUCI.csv",
origin="https://archive.ics.uci.edu/ml/machine-learning-databases/00360/"
"AirQualityUCI.zip",
extract=True,
)
dataset = pd.read_csv(dataset, sep=";")
dataset = dataset[dataset.columns[:-2]]
dataset = dataset.dropna()
dataset = dataset.replace(",", ".", regex=True)
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[
[
"CO(GT)",
"PT08.S1(CO)",
"NMHC(GT)",
"C6H6(GT)",
"PT08.S2(NMHC)",
"NOx(GT)",
"PT08.S3(NOx)",
"NO2(GT)",
"PT08.S4(NO2)",
"PT08.S5(O3)",
"T",
"RH",
]
].astype("float64")
data_x_val = validation_data[
[
"CO(GT)",
"PT08.S1(CO)",
"NMHC(GT)",
"C6H6(GT)",
"PT08.S2(NMHC)",
"NOx(GT)",
"PT08.S3(NOx)",
"NO2(GT)",
"PT08.S4(NO2)",
"PT08.S5(O3)",
"T",
"RH",
]
].astype("float64")
# Data with train data and the unseen data from subsequent time steps.
data_x_test = dataset[
[
"CO(GT)",
"PT08.S1(CO)",
"NMHC(GT)",
"C6H6(GT)",
"PT08.S2(NMHC)",
"NOx(GT)",
"PT08.S3(NOx)",
"NO2(GT)",
"PT08.S4(NO2)",
"PT08.S5(O3)",
"T",
"RH",
]
].astype("float64")
data_y = data_train["AH"].astype("float64")
data_y_val = validation_data["AH"].astype("float64")
print(data_x.shape) # (6549, 12)
print(data_y.shape) # (6549,)<jupyter_output><empty_output><jupyter_text>The second step is to run the [TimeSeriesForecaster](/time_series_forecaster).As a quick demo, we set epochs to 10.You can also leave the epochs unspecified for an adaptive number of epochs.<jupyter_code>predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(
lookback=lookback,
predict_from=predict_from,
predict_until=predict_until,
max_trials=1,
objective="val_loss",
)
# Train the TimeSeriesForecaster with train data
clf.fit(
x=data_x,
y=data_y,
validation_data=(data_x_val, data_y_val),
batch_size=32,
epochs=10,
)
# Predict with the best model(includes original training data).
predictions = clf.predict(data_x_test)
print(predictions.shape)
# Evaluate the best model with testing data.
print(clf.evaluate(data_x_val, data_y_val))<jupyter_output><empty_output> | autokeras/docs/ipynb/timeseries_forecaster.ipynb/0 | {
"file_path": "autokeras/docs/ipynb/timeseries_forecaster.ipynb",
"repo_id": "autokeras",
"token_count": 1395
} | 7 |
"""shell
pip install autokeras
"""
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
import autokeras as ak
"""
## A Simple Example
The first step is to prepare your data. Here we use the [California housing
dataset](
https://scikit-learn.org/stable/datasets/real_world.html#california-housing-dataset)
as an example.
"""
house_dataset = fetch_california_housing()
df = pd.DataFrame(
np.concatenate(
(house_dataset.data, house_dataset.target.reshape(-1, 1)), axis=1
),
columns=house_dataset.feature_names + ["Price"],
)
train_size = int(df.shape[0] * 0.9)
df[:train_size].to_csv("train.csv", index=False)
df[train_size:].to_csv("eval.csv", index=False)
train_file_path = "train.csv"
test_file_path = "eval.csv"
"""
The second step is to run the
[StructuredDataRegressor](/structured_data_regressor).
As a quick demo, we set epochs to 10.
You can also leave the epochs unspecified for an adaptive number of epochs.
"""
# Initialize the structured data regressor.
reg = ak.StructuredDataRegressor(
overwrite=True, max_trials=3
) # It tries 3 different models.
# Feed the structured data regressor with training data.
reg.fit(
# The path to the train.csv file.
train_file_path,
# The name of the label column.
"Price",
epochs=10,
)
# Predict with the best model.
predicted_y = reg.predict(test_file_path)
# Evaluate the best model with testing data.
print(reg.evaluate(test_file_path, "Price"))
"""
## Data Format
The AutoKeras StructuredDataRegressor is quite flexible for the data format.
The example above shows how to use the CSV files directly. Besides CSV files,
it also supports numpy.ndarray, pandas.DataFrame or [tf.data.Dataset](
https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable). The
data should be two-dimensional with numerical or categorical values.
For the regression targets, it should be a vector of numerical values.
AutoKeras accepts numpy.ndarray, pandas.DataFrame, or pandas.Series.
The following examples show how the data can be prepared with numpy.ndarray,
pandas.DataFrame, and tensorflow.data.Dataset.
"""
# x_train as pandas.DataFrame, y_train as pandas.Series
x_train = pd.read_csv(train_file_path)
print(type(x_train)) # pandas.DataFrame
y_train = x_train.pop("Price")
print(type(y_train)) # pandas.Series
# You can also use pandas.DataFrame for y_train.
y_train = pd.DataFrame(y_train)
print(type(y_train)) # pandas.DataFrame
# You can also use numpy.ndarray for x_train and y_train.
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
print(type(x_train)) # numpy.ndarray
print(type(y_train)) # numpy.ndarray
# Preparing testing data.
x_test = pd.read_csv(test_file_path)
y_test = x_test.pop("Price")
# It tries 10 different models.
reg = ak.StructuredDataRegressor(max_trials=3, overwrite=True)
# Feed the structured data regressor with training data.
reg.fit(x_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = reg.predict(x_test)
# Evaluate the best model with testing data.
print(reg.evaluate(x_test, y_test))
"""
The following code shows how to convert numpy.ndarray to tf.data.Dataset.
"""
train_set = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_set = tf.data.Dataset.from_tensor_slices((x_test, y_test))
reg = ak.StructuredDataRegressor(max_trials=3, overwrite=True)
# Feed the tensorflow Dataset to the regressor.
reg.fit(train_set, epochs=10)
# Predict with the best model.
predicted_y = reg.predict(test_set)
# Evaluate the best model with testing data.
print(reg.evaluate(test_set))
"""
You can also specify the column names and types for the data as follows. The
`column_names` is optional if the training data already have the column names,
e.g. pandas.DataFrame, CSV file. Any column, whose type is not specified will
be inferred from the training data.
"""
# Initialize the structured data regressor.
reg = ak.StructuredDataRegressor(
column_names=[
"MedInc",
"HouseAge",
"AveRooms",
"AveBedrms",
"Population",
"AveOccup",
"Latitude",
"Longitude",
],
column_types={"MedInc": "numerical", "Latitude": "numerical"},
max_trials=10, # It tries 10 different models.
overwrite=True,
)
"""
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data. As
shown in the example below, you can use `validation_split` to specify the
percentage.
"""
reg.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=10,
)
"""
You can also use your own validation set
instead of splitting it from the training data with `validation_data`.
"""
split = 500
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
reg.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=10,
)
"""
## Customized Search Space
For advanced users, you may customize your search space by using
[AutoModel](/auto_model/#automodel-class) instead of
[StructuredDataRegressor](/structured_data_regressor). You can configure the
[StructuredDataBlock](/block/#structureddatablock-class) for some high-level
configurations, e.g., `categorical_encoding` for whether to use the
[CategoricalToNumerical](/block/#categoricaltonumerical-class). You can also do
not specify these arguments, which would leave the different choices to be
tuned automatically. See the following example for detail.
"""
input_node = ak.StructuredDataInput()
output_node = ak.StructuredDataBlock(categorical_encoding=True)(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=3
)
reg.fit(x_train, y_train, epochs=10)
"""
The usage of [AutoModel](/auto_model/#automodel-class) is similar to the
[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.
Basically, you are building a graph, whose edges are blocks and the nodes are
intermediate outputs of blocks. To add an edge from `input_node` to
`output_node` with `output_node = ak.[some_block]([block_args])(input_node)`.
You can even also use more fine grained blocks to customize the search space
even further. See the following example.
"""
input_node = ak.StructuredDataInput()
output_node = ak.CategoricalToNumerical()(input_node)
output_node = ak.DenseBlock()(output_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, max_trials=3, overwrite=True
)
reg.fit(x_train, y_train, epochs=10)
"""
You can also export the best model found by AutoKeras as a Keras Model.
"""
model = reg.export_model()
model.summary()
# numpy array in object (mixed type) is not supported.
# you need convert it to unicode or float first.
model.predict(x_train)
"""
## Reference
[StructuredDataRegressor](/structured_data_regressor),
[AutoModel](/auto_model/#automodel-class),
[StructuredDataBlock](/block/#structureddatablock-class),
[DenseBlock](/block/#denseblock-class),
[StructuredDataInput](/node/#structureddatainput-class),
[RegressionHead](/block/#regressionhead-class),
[CategoricalToNumerical](/block/#categoricaltonumerical-class).
"""
| autokeras/docs/py/structured_data_regression.py/0 | {
"file_path": "autokeras/docs/py/structured_data_regression.py",
"repo_id": "autokeras",
"token_count": 2539
} | 8 |
#
<img src="/img/row_red.svg" alt="drawing" width="400px" style="display: block; margin-left: auto; margin-right: auto"/>
##
{{autogenerated}}
| autokeras/docs/templates/index.md/0 | {
"file_path": "autokeras/docs/templates/index.md",
"repo_id": "autokeras",
"token_count": 55
} | 9 |
[tool.black]
line-length = 80
[tool.isort]
profile = "black"
known_first_party = ["autokeras", "tests"]
default_section = "THIRDPARTY"
line_length = 80
force_single_line = "True" | autokeras/pyproject.toml/0 | {
"file_path": "autokeras/pyproject.toml",
"repo_id": "autokeras",
"token_count": 68
} | 10 |
# Keras CV
| Status | Proposed |
:-------------- |:---------------------------------------------------- |
| **Author(s)** | Zhenyu Tan ([email protected]), Francois Chollet ([email protected]) |
| **Updated** | 2020-08-27 |
## Objective
This document describes the scope of the [keras-cv](https://github.com/keras-team/keras-cv) package, especially:
- What use cases `keras-cv` should cover
- Boundaries between `keras-cv` and [TensorFlow Addons](https://github.com/tensorflow/addons)
- Boundaries between `keras-cv` and [TensorFlow model garden](https://github.com/tensorflow/models)
- Boundaries between `keras-cv` and [tf.keras.applications](https://keras.io/api/applications/)
## Motivation
Computer vision (CV) is a major application area for our users.
Keras on its own provides good support for image classification tasks, in particular via `tf.keras.applications`.
However, a Keras-native modeling solutions for more advanced tasks,
such as object detection, instance segmentation, etc., is still lacking.
As a result, the open-source community has rolled out many different solutions for these use cases,
made available via PyPI and GitHub. These third-party solutions are not always kept up to date, and
many still rely on the legacy multi-backend Keras. They also raise the issue of API standardization.
To fix this, we want machine learning engineers to have access to a standard Keras-native,
optimized, and well-tested set of components to build their advanced computer vision models.
This provides key user benefits:
- The package would be first-party and thus always up to date with modern best practices.
- High code quality and testing standards and strict quality control: same level of trust as core Keras
- A shared API standard across the community
- Ability for the open-source community to build more advanced solutions *on top* of this package instead of reinventing it
## Design Proposal
`keras-cv` will provide components that cover the following areas:
- Object Detection tasks.
- Instance Segmentation tasks.
- Semantic Segmentation tasks.
- Keypoint Detection tasks.
- Video Classification tasks.
- Object Tracking tasks.
Specifically, for Object Detection tasks, `keras-cv` will include most anchor-based modules:
- Common objects such as anchor generator, box matcher.
- Keras layer components such as ROI generator, NMS postprocessor.
- Keras backbone components that fills the gap from keras-applications.
- Keras losses and metrics, such as Focal loss and coco metrics.
- Data loader and preprocessing for different dataset, such as COCO.
For Semantic Segmentation tasks, `keras-cv` will include:
- Keras head components such as Atrous Spatial Pyramid Pooling (ASPP).
### Success criteria for `keras-cv`
- Cover all modeling tasks listed above
- Easy-to-use API
- Models run on CPU/GPU/TPU seamlessly
- State of the art performance
- Models can be readily deployed to production
### Boundaries between keras-cv and keras-applications
- keras-applications will be improved to include basic building blocks such as mobilenet bottleneck, that
include feature maps
- keras-cv will depend on keras-applications for importing backbones.
### Boundaries between keras-cv and Tensorflow Addons
- Highly experimental modeling, layers, losses, etc, live in addons.
- Components from addons will graduate to keras-cv, given it incurs more usage,
and it works in CPU/GPU/TPU. The API interface will remain experimental after graduation.
### Boundaries between keras-cv and Model Garden
- End to end modeling workflow and model specific details live in Model Garden
- Model garden will re-use most of the building blocks from keras-cv and Tensorflow Addons.
- Components from Model Garden can graduate to keras-cv, given it is widely accepted,
it works performant in CPU/GPU/TPU. The API interface should remain stable after graduation.
## Dependencies
- Tensorflow version >= 2.4
- Tensorflow datasets
- Keras-applications
## Backwards compatibility
We propose to guarantee major release backwards compatibility.
## Maintenance & development process
The `keras-cv` codebase will be primarily maintained by the Keras team at Google,
with help and contributions from the community. The codebase will be developed
on GitHub as part of the `keras-team` organization. The same process for tracking
issues and reviewing PRs will be used as for the core Keras repository.
## Performance benchmark
We will set up Keras benchmark utilities to help users contribute to this repository.
## Detailed Design
Detailed design will be shared in a separate document (this document only focuses on scope).
## Questions and Discussion Topics
Please share any questions or suggestion.
| governance/rfcs/20200827-keras-cv-scoping-design.md/0 | {
"file_path": "governance/rfcs/20200827-keras-cv-scoping-design.md",
"repo_id": "governance",
"token_count": 1252
} | 11 |
"""MobileNet v3 models for Keras.
The following table describes the performance of MobileNets:
------------------------------------------------------------------------
MACs stands for Multiply Adds
| Classification Checkpoint| MACs(M)| Parameters(M)| Top1 Accuracy| Pixel1 CPU(ms)|
| [mobilenet_v3_large_1.0_224] | 217 | 5.4 | 75.6 | 51.2 |
| [mobilenet_v3_large_0.75_224] | 155 | 4.0 | 73.3 | 39.8 |
| [mobilenet_v3_large_minimalistic_1.0_224] | 209 | 3.9 | 72.3 | 44.1 |
| [mobilenet_v3_small_1.0_224] | 66 | 2.9 | 68.1 | 15.8 |
| [mobilenet_v3_small_0.75_224] | 44 | 2.4 | 65.4 | 12.8 |
| [mobilenet_v3_small_minimalistic_1.0_224] | 65 | 2.0 | 61.9 | 12.2 |
The weights for all 6 models are obtained and
translated from the Tensorflow checkpoints
from TensorFlow checkpoints found [here]
(https://github.com/tensorflow/models/tree/master/research/
slim/nets/mobilenet/README.md).
# Reference
This file contains building code for MobileNetV3, based on
[Searching for MobileNetV3]
(https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
from . import correct_pad
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import _obtain_input_shape
from .imagenet_utils import decode_predictions
backend = None
layers = None
models = None
keras_utils = None
BASE_WEIGHT_PATH = ('https://github.com/DrSlink/mobilenet_v3_keras/'
'releases/download/v1.0/')
WEIGHTS_HASHES = {
'large_224_0.75_float': (
'765b44a33ad4005b3ac83185abf1d0eb',
'c256439950195a46c97ede7c294261c6'),
'large_224_1.0_float': (
'59e551e166be033d707958cf9e29a6a7',
'12c0a8442d84beebe8552addf0dcb950'),
'large_minimalistic_224_1.0_float': (
'675e7b876c45c57e9e63e6d90a36599c',
'c1cddbcde6e26b60bdce8e6e2c7cae54'),
'small_224_0.75_float': (
'cb65d4e5be93758266aa0a7f2c6708b7',
'c944bb457ad52d1594392200b48b4ddb'),
'small_224_1.0_float': (
'8768d4c2e7dee89b9d02b2d03d65d862',
'5bec671f47565ab30e540c257bba8591'),
'small_minimalistic_224_1.0_float': (
'99cd97fb2fcdad2bf028eb838de69e37',
'1efbf7e822e03f250f45faa3c6bbe156'),
}
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.)(x + 3.) * (1. / 6.)
def hard_swish(x):
return layers.Multiply()([layers.Activation(hard_sigmoid)(x), x])
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py
def _depth(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _se_block(inputs, filters, se_ratio, prefix):
x = layers.GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(inputs)
if backend.image_data_format() == 'channels_first':
x = layers.Reshape((filters, 1, 1))(x)
else:
x = layers.Reshape((1, 1, filters))(x)
x = layers.Conv2D(_depth(filters * se_ratio),
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv')(x)
x = layers.ReLU(name=prefix + 'squeeze_excite/Relu')(x)
x = layers.Conv2D(filters,
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv_1')(x)
x = layers.Activation(hard_sigmoid)(x)
if backend.backend() == 'theano':
# For the Theano backend, we have to explicitly make
# the excitation weights broadcastable.
x = layers.Lambda(
lambda br: backend.pattern_broadcast(br, [True, True, True, False]),
output_shape=lambda input_shape: input_shape,
name=prefix + 'squeeze_excite/broadcast')(x)
x = layers.Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
return x
def _inverted_res_block(x, expansion, filters, kernel_size, stride,
se_ratio, activation, block_id):
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
shortcut = x
prefix = 'expanded_conv/'
infilters = backend.int_shape(x)[channel_axis]
if block_id:
# Expand
prefix = 'expanded_conv_{}/'.format(block_id)
x = layers.Conv2D(_depth(infilters * expansion),
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'expand')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand/BatchNorm')(x)
x = layers.Activation(activation)(x)
if stride == 2:
x = layers.ZeroPadding2D(padding=correct_pad(backend, x, kernel_size),
name=prefix + 'depthwise/pad')(x)
x = layers.DepthwiseConv2D(kernel_size,
strides=stride,
padding='same' if stride == 1 else 'valid',
use_bias=False,
name=prefix + 'depthwise')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise/BatchNorm')(x)
x = layers.Activation(activation)(x)
if se_ratio:
x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)
x = layers.Conv2D(filters,
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'project')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project/BatchNorm')(x)
if stride == 1 and infilters == filters:
x = layers.Add(name=prefix + 'Add')([shortcut, x])
return x
def MobileNetV3(stack_fn,
last_point_ch,
input_shape=None,
alpha=1.0,
model_type='large',
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
**kwargs):
"""Instantiates the MobileNetV3 architecture.
# Arguments
stack_fn: a function that returns output tensor for the
stacked residual blocks.
last_point_ch: number channels at the last layer (before top)
input_shape: optional shape tuple, to be specified if you would
like to use a model with an input img resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
model_type: MobileNetV3 is defined as two models: large and small. These
models are targeted at high and low resource use cases respectively.
minimalistic: In addition to large and small models this module also contains
so-called minimalistic models, these models have the same per-layer
dimensions characteristic as MobilenetV3 however, they don't utilize any
of the advanced blocks (squeeze-and-excite units, hard-swish, and 5x5
convolutions). While these models are less efficient on CPU, they are
much more performant on GPU/DSP.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
dropout_rate: fraction of the input units to drop on the last layer
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid model type, argument for `weights`,
or invalid input shape when weights='imagenet'
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
keras_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is not type input_tensor')
if is_input_t_tensor:
if backend.image_data_format == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('input_shape: ', input_shape,
'and input_tensor: ', input_tensor,
'do not meet the same shape requirements')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError('input_shape: ', input_shape,
'and input_tensor: ', input_tensor,
'do not meet the same shape requirements')
else:
raise ValueError('input_tensor specified: ', input_tensor,
'is not a keras tensor')
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is type: ', type(input_tensor),
'which is not a valid type')
if backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
input_shape = (3, cols, rows)
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
input_shape = (cols, rows, 3)
# If input_shape is None and input_tensor is None using standart shape
if input_shape is None and input_tensor is None:
input_shape = (None, None, 3)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if rows and cols and (rows < 32 or cols < 32):
raise ValueError('Input size must be at least 32x32; got `input_shape=' +
str(input_shape) + '`')
if weights == 'imagenet':
if minimalistic is False and alpha not in [0.75, 1.0] \
or minimalistic is True and alpha != 1.0:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of `0.75`, `1.0` for non minimalistic'
' or `1.0` for minimalistic only.')
if rows != cols or rows != 224:
warnings.warn('`input_shape` is undefined or non-square, '
'or `rows` is not 224.'
' Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
if minimalistic:
kernel = 3
activation = relu
se_ratio = None
else:
kernel = 5
activation = hard_swish
se_ratio = 0.25
x = layers.ZeroPadding2D(padding=correct_pad(backend, img_input, 3),
name='Conv_pad')(img_input)
x = layers.Conv2D(16,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=False,
name='Conv')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='Conv/BatchNorm')(x)
x = layers.Activation(activation)(x)
x = stack_fn(x, kernel, activation, se_ratio)
last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_point_ch = _depth(last_point_ch * alpha)
x = layers.Conv2D(last_conv_ch,
kernel_size=1,
padding='same',
use_bias=False,
name='Conv_1')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='Conv_1/BatchNorm')(x)
x = layers.Activation(activation)(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
if channel_axis == 1:
x = layers.Reshape((last_conv_ch, 1, 1))(x)
else:
x = layers.Reshape((1, 1, last_conv_ch))(x)
x = layers.Conv2D(last_point_ch,
kernel_size=1,
padding='same',
name='Conv_2')(x)
x = layers.Activation(activation)(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate)(x)
x = layers.Conv2D(classes,
kernel_size=1,
padding='same',
name='Logits')(x)
x = layers.Flatten()(x)
x = layers.Softmax(name='Predictions/Softmax')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='MobilenetV3' + model_type)
# Load weights.
if weights == 'imagenet':
model_name = "{}{}_224_{}_float".format(
model_type, '_minimalistic' if minimalistic else '', str(alpha))
if include_top:
file_name = 'weights_mobilenet_v3_' + model_name + '.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = 'weights_mobilenet_v3_' + model_name + '_no_top.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = keras_utils.get_file(file_name,
BASE_WEIGHT_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def MobileNetV3Small(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
**kwargs):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
x = _inverted_res_block(x, 72. / 16, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 88. / 24, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 10)
return x
return MobileNetV3(stack_fn,
1024,
input_shape,
alpha,
'small',
minimalistic,
include_top,
weights,
input_tensor,
classes,
pooling,
dropout_rate,
**kwargs)
def MobileNetV3Large(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
**kwargs):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio,
activation, 12)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio,
activation, 13)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio,
activation, 14)
return x
return MobileNetV3(stack_fn,
1280,
input_shape,
alpha,
'large',
minimalistic,
include_top,
weights,
input_tensor,
classes,
pooling,
dropout_rate,
**kwargs)
setattr(MobileNetV3Small, '__doc__', MobileNetV3.__doc__)
setattr(MobileNetV3Large, '__doc__', MobileNetV3.__doc__)
| keras-applications/keras_applications/mobilenet_v3.py/0 | {
"file_path": "keras-applications/keras_applications/mobilenet_v3.py",
"repo_id": "keras-applications",
"token_count": 11835
} | 12 |
MIT License
Copyright (c) 2017 Fariz Rahman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| keras-contrib/LICENSE/0 | {
"file_path": "keras-contrib/LICENSE",
"repo_id": "keras-contrib",
"token_count": 274
} | 13 |
{% extends "base.html" %}
{% block content %}
<h1 id="search">Search Results</h1>
<form id="content_search" action="search.html">
<span role="status" aria-live="polite" class="ui-helper-hidden-accessible"></span>
<input name="q" id="mkdocs-search-query" type="text" class="search_input search-query ui-autocomplete-input" placeholder="Search the Docs" autocomplete="off" autofocus title="Type search term here">
</form>
<div id="mkdocs-search-results" class="search-results">
Searching...
</div>
{% endblock %}
| keras-contrib/contrib_docs/theme/search.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/search.html",
"repo_id": "keras-contrib",
"token_count": 188
} | 14 |
from __future__ import absolute_import
from .squash import squash
| keras-contrib/keras_contrib/activations/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/activations/__init__.py",
"repo_id": "keras-contrib",
"token_count": 17
} | 15 |
from keras.callbacks import TensorBoard
import numpy as np
import os
class TensorBoardGrouped(TensorBoard):
"""TensorBoard basic visualizations.
[TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard)
is a visualization tool provided with TensorFlow.
This callback is a subclass of `keras.callbacks.TensorBoard`.
The only difference is that the training and validation logs are
grouped and written to the same plot.
It's a drop-in replacement for the keras callback.
The arguments are the same.
"""
def __init__(self, log_dir='./logs', *args, **kwargs):
self.base_log_dir = log_dir
self.train_log_dir = os.path.join(log_dir, 'train')
self.val_log_dir = os.path.join(log_dir, 'val')
super(TensorBoardGrouped, self).__init__(self.train_log_dir,
*args,
**kwargs)
def set_model(self, model):
super(TensorBoardGrouped, self).set_model(model)
import tensorflow as tf
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
def _write_logs(self, logs, index):
import tensorflow as tf
for name, value in logs.items():
if name in ['batch', 'size']:
continue
if name.startswith('val_'):
writer = self.val_writer
name = name[4:] # remove val_
else:
writer = self.writer
summary = tf.Summary()
summary_value = summary.value.add()
if isinstance(value, np.ndarray):
summary_value.simple_value = value.item()
else:
summary_value.simple_value = value
summary_value.tag = name
writer.add_summary(summary, index)
self.writer.flush()
self.val_writer.flush()
def on_train_end(self, _):
self.writer.close()
self.val_writer.flush()
| keras-contrib/keras_contrib/callbacks/tensorboard.py/0 | {
"file_path": "keras-contrib/keras_contrib/callbacks/tensorboard.py",
"repo_id": "keras-contrib",
"token_count": 930
} | 16 |
from keras import backend as K
from keras.optimizers import Optimizer
class LARS(Optimizer):
"""Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You,
I. Gitman, and B. Ginsburg. (https://arxiv.org/abs/1708.03888)
Implements the LARS learning rate scheme presented in the paper above. This
optimizer is useful when scaling the batch size to up to 32K without
significant performance degradation. It is recommended to use the optimizer
in conjunction with:
- Gradual learning rate warm-up
- Linear learning rate scaling
- Poly rule learning rate decay
Note, LARS scaling is currently only enabled for dense tensors.
Args:
lr: A `Tensor` or floating point value. The base learning rate.
momentum: A floating point value. Momentum hyperparameter.
weight_decay: A floating point value. Weight decay hyperparameter.
eeta: LARS coefficient as used in the paper. Dfault set to LARS
coefficient from the paper. (eeta / weight_decay) determines the
highest scaling factor in LARS.
epsilon: Optional epsilon parameter to be set in models that have very
small gradients. Default set to 0.0.
nesterov: when set to True, nesterov momentum will be enabled
"""
def __init__(self,
lr,
momentum=0.9,
weight_decay=0.0001,
eeta=0.001,
epsilon=0.0,
nesterov=False,
**kwargs):
if momentum < 0.0:
raise ValueError("momentum should be positive: %s" % momentum)
if weight_decay < 0.0:
raise ValueError("weight_decay is not positive: %s" % weight_decay)
super(LARS, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.weight_decay = K.variable(weight_decay, name='weight_decay')
self.eeta = K.variable(eeta, name='eeta')
self.epsilon = epsilon
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
weights = self.get_weights()
self.updates = [K.update_add(self.iterations, 1)]
scaled_lr = self.lr
w_norm = K.sqrt(K.sum([K.sum(K.square(weight))
for weight in weights]))
g_norm = K.sqrt(K.sum([K.sum(K.square(grad))
for grad in grads]))
scaled_lr = K.switch(K.greater(w_norm * g_norm, K.zeros([1])),
K.expand_dims((self.eeta * w_norm /
(g_norm + self.weight_decay * w_norm +
self.epsilon)) * self.lr),
K.ones([1]) * self.lr)
if K.backend() == 'theano':
scaled_lr = scaled_lr[0] # otherwise theano raise broadcasting error
# momentum
moments = [K.zeros(K.int_shape(param), dtype=K.dtype(param))
for param in params]
self.weights = [self.iterations] + moments
for param, grad, moment in zip(params, grads, moments):
v0 = (moment * self.momentum)
v1 = scaled_lr * grad # velocity
veloc = v0 - v1
self.updates.append(K.update(moment, veloc))
if self.nesterov:
new_param = param + (veloc * self.momentum) - v1
else:
new_param = param + veloc
# Apply constraints.
if getattr(param, 'constraint', None) is not None:
new_param = param.constraint(new_param)
self.updates.append(K.update(param, new_param))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'weight_decay': float(K.get_value(self.weight_decay)),
'epsilon': self.epsilon,
'eeta': float(K.get_value(self.eeta)),
'nesterov': self.nesterov}
base_config = super(LARS, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/optimizers/lars.py/0 | {
"file_path": "keras-contrib/keras_contrib/optimizers/lars.py",
"repo_id": "keras-contrib",
"token_count": 2155
} | 17 |
from setuptools import setup
from setuptools import find_packages
import os
if os.environ.get('USE_TF_KERAS', None) == '1':
name = 'tf_keras_contrib'
install_requires = []
else:
name = 'keras_contrib'
install_requires = ['keras']
setup(name=name,
version='2.0.8',
description='Keras Deep Learning for Python, Community Contributions',
author='Fariz Rahman',
author_email='[email protected]',
url='https://github.com/farizrahman4u/keras-contrib',
license='MIT',
install_requires=install_requires,
extras_require={
'h5py': ['h5py'],
'visualize': ['pydot>=1.2.0'],
'tests': ['pytest',
'pytest-pep8',
'pytest-xdist',
'pytest-cov'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=find_packages())
| keras-contrib/setup.py/0 | {
"file_path": "keras-contrib/setup.py",
"repo_id": "keras-contrib",
"token_count": 627
} | 18 |
import numpy as np
import pytest
from keras import backend as K
from keras_contrib import backend as KC
from keras_contrib.layers import SubPixelUpscaling
from keras_contrib.utils.test_utils import layer_test
# TensorFlow does not support full convolution.
if K.backend() == 'theano':
_convolution_border_modes = ['valid', 'same']
data_format = 'channels_first'
else:
_convolution_border_modes = ['valid', 'same']
data_format = 'channels_last'
@pytest.mark.parametrize('scale_factor', [2, 3, 4])
def test_sub_pixel_upscaling(scale_factor):
num_samples = 2
num_row = 16
num_col = 16
input_dtype = K.floatx()
nb_channels = 4 * (scale_factor ** 2)
input_data = np.random.random((num_samples, nb_channels, num_row, num_col))
input_data = input_data.astype(input_dtype)
if K.image_data_format() == 'channels_last':
input_data = input_data.transpose((0, 2, 3, 1))
input_tensor = K.variable(input_data)
expected_output = K.eval(KC.depth_to_space(input_tensor,
scale=scale_factor))
layer_test(SubPixelUpscaling,
kwargs={'scale_factor': scale_factor},
input_data=input_data,
expected_output=expected_output,
expected_output_dtype=K.floatx())
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/convolutional/test_subpixelupscaling.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/convolutional/test_subpixelupscaling.py",
"repo_id": "keras-contrib",
"token_count": 593
} | 19 |
"""Benchmark rnn layers.
To run benchmarks, see the following command for an example, please change the
flag to your custom value:
```
python3 -m benchmarks.layer_benchmark.rnn_benchmark \
--benchmark_name=benchmark_lstm \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
```
"""
import tensorflow as tf
from absl import app
from absl import flags
import keras_core
from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark
FLAGS = flags.FLAGS
def benchmark_conv_lstm1d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ConvLSTM1D"
init_args = {
"filters": 16,
"kernel_size": 2,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[32, 256, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_conv_lstm2d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ConvLSTM2D"
init_args = {
"filters": 16,
"kernel_size": 2,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[32, 32, 32, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_conv_lstm3d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ConvLSTM3D"
init_args = {
"filters": 8,
"kernel_size": 2,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[8, 16, 16, 16, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_gru(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "GRU"
init_args = {
"units": 32,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_lstm(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "LSTM"
init_args = {
"units": 32,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_simple_rnn(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "SimpleRNN"
init_args = {
"units": 32,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_bidirectional(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Bidirectional"
init_args = {}
keras_core_layer = keras_core.layers.Bidirectional(
keras_core.layers.LSTM(32)
)
tf_keras_layer = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32))
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
keras_core_layer=keras_core_layer,
tf_keras_layer=tf_keras_layer,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_time_distributed(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "TimeDistributed"
init_args = {}
keras_core_layer = keras_core.layers.TimeDistributed(
keras_core.layers.Conv2D(16, (3, 3))
)
tf_keras_layer = tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(16, (3, 3))
)
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[10, 32, 32, 3],
jit_compile=jit_compile,
keras_core_layer=keras_core_layer,
tf_keras_layer=tf_keras_layer,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
BENCHMARK_NAMES = {
"benchmark_conv_lstm1d": benchmark_conv_lstm1d,
"benchmark_conv_lstm2d": benchmark_conv_lstm2d,
"benchmark_conv_lstm3d": benchmark_conv_lstm3d,
"benchmark_gru": benchmark_gru,
"benchmark_lstm": benchmark_lstm,
"benchmark_simple_rnn": benchmark_simple_rnn,
"benchmark_bidirectional": benchmark_bidirectional,
"benchmark_time_distributed": benchmark_time_distributed,
}
def main(_):
benchmark_name = FLAGS.benchmark_name
num_samples = FLAGS.num_samples
batch_size = FLAGS.batch_size
jit_compile = FLAGS.jit_compile
if benchmark_name is None:
for name, benchmark_fn in BENCHMARK_NAMES.items():
benchmark_fn(num_samples, batch_size, jit_compile)
return
if benchmark_name not in BENCHMARK_NAMES:
raise ValueError(
f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must "
f"be one of {BENCHMARK_NAMES.keys()}"
)
benchmark_fn = BENCHMARK_NAMES[benchmark_name]
benchmark_fn(num_samples, batch_size, jit_compile)
if __name__ == "__main__":
app.run(main)
| keras-core/benchmarks/layer_benchmark/rnn_benchmark.py/0 | {
"file_path": "keras-core/benchmarks/layer_benchmark/rnn_benchmark.py",
"repo_id": "keras-core",
"token_count": 3042
} | 20 |
import numpy as np
from keras import Model
from keras import layers
from keras import losses
from keras import metrics
from keras import optimizers
import keras as keras
keras.config.disable_traceback_filtering()
inputs = layers.Input((100,))
x = layers.Dense(512, activation="relu")(inputs)
residual = x
x = layers.Dense(512, activation="relu")(x)
x = layers.Dense(512, activation="relu")(x)
x += residual
x = layers.Dense(512, activation="relu")(x)
residual = x
x = layers.Dense(512, activation="relu")(x)
x = layers.Dense(512, activation="relu")(x)
x += residual
residual = x
x = layers.Dense(512, activation="relu")(x)
x = layers.Dense(512, activation="relu")(x)
x += residual
outputs = layers.Dense(16)(x)
model = Model(inputs, outputs)
model.summary()
x = np.random.random((50000, 100))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 5
model.compile(
optimizer=optimizers.Adam(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[
metrics.CategoricalAccuracy(name="acc"),
metrics.MeanSquaredError(name="mse"),
],
)
print("\nTrain model")
history = model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
print("\nHistory:")
print(history.history)
print("\nEvaluate model")
scores = model.evaluate(x, y, return_dict=True)
print(scores)
print("\nRun inference")
pred = model.predict(x)
print(f"Inferred output shape {pred.shape}")
| keras-core/examples/demo_functional.py/0 | {
"file_path": "keras-core/examples/demo_functional.py",
"repo_id": "keras-core",
"token_count": 545
} | 21 |
"""
Title: Structured data learning with TabTransformer
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2022/01/18
Last modified: 2022/01/18
Description: Using contextual embeddings for structured data classification.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates how to do structured data classification using
[TabTransformer](https://arxiv.org/abs/2012.06678), a deep tabular data modeling
architecture for supervised and semi-supervised learning.
The TabTransformer is built upon self-attention based Transformers.
The Transformer layers transform the embeddings of categorical features
into robust contextual embeddings to achieve higher predictive accuracy.
## Setup
"""
import keras_core as keras
from keras_core import layers
from keras_core import ops
import math
import numpy as np
import pandas as pd
from tensorflow import data as tf_data
import matplotlib.pyplot as plt
from functools import partial
"""
## Prepare the data
This example uses the
[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/census+income)
provided by the
[UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).
The task is binary classification
to predict whether a person is likely to be making over USD 50,000 a year.
The dataset includes 48,842 instances with 14 input features: 5 numerical features and 9 categorical features.
First, let's load the dataset from the UCI Machine Learning Repository into a Pandas
DataFrame:
"""
CSV_HEADER = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income_bracket",
]
train_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
)
train_data = pd.read_csv(train_data_url, header=None, names=CSV_HEADER)
test_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
)
test_data = pd.read_csv(test_data_url, header=None, names=CSV_HEADER)
print(f"Train dataset shape: {train_data.shape}")
print(f"Test dataset shape: {test_data.shape}")
"""
Remove the first record (because it is not a valid data example) and a trailing 'dot' in the class labels.
"""
test_data = test_data[1:]
test_data.income_bracket = test_data.income_bracket.apply(
lambda value: value.replace(".", "")
)
"""
Now we store the training and test data in separate CSV files.
"""
train_data_file = "train_data.csv"
test_data_file = "test_data.csv"
train_data.to_csv(train_data_file, index=False, header=False)
test_data.to_csv(test_data_file, index=False, header=False)
"""
## Define dataset metadata
Here, we define the metadata of the dataset that will be useful for reading and parsing
the data into input features, and encoding the input features with respect to their types.
"""
# A list of the numerical feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"education_num",
"capital_gain",
"capital_loss",
"hours_per_week",
]
# A dictionary of the categorical features and their vocabulary.
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
"workclass": sorted(list(train_data["workclass"].unique())),
"education": sorted(list(train_data["education"].unique())),
"marital_status": sorted(list(train_data["marital_status"].unique())),
"occupation": sorted(list(train_data["occupation"].unique())),
"relationship": sorted(list(train_data["relationship"].unique())),
"race": sorted(list(train_data["race"].unique())),
"gender": sorted(list(train_data["gender"].unique())),
"native_country": sorted(list(train_data["native_country"].unique())),
}
# Name of the column to be used as instances weight.
WEIGHT_COLUMN_NAME = "fnlwgt"
# A list of the categorical feature names.
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys())
# A list of all the input features.
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
# A list of column default values for each feature.
COLUMN_DEFAULTS = [
[0.0] if feature_name in NUMERIC_FEATURE_NAMES + [WEIGHT_COLUMN_NAME] else ["NA"]
for feature_name in CSV_HEADER
]
# The name of the target feature.
TARGET_FEATURE_NAME = "income_bracket"
# A list of the labels of the target features.
TARGET_LABELS = [" <=50K", " >50K"]
"""
## Configure the hyperparameters
The hyperparameters includes model architecture and training configurations.
"""
LEARNING_RATE = 0.001
WEIGHT_DECAY = 0.0001
DROPOUT_RATE = 0.2
BATCH_SIZE = 265
NUM_EPOCHS = 15
NUM_TRANSFORMER_BLOCKS = 3 # Number of transformer blocks.
NUM_HEADS = 4 # Number of attention heads.
EMBEDDING_DIMS = 16 # Embedding dimensions of the categorical features.
MLP_HIDDEN_UNITS_FACTORS = [
2,
1,
] # MLP hidden layer units, as factors of the number of inputs.
NUM_MLP_BLOCKS = 2 # Number of MLP blocks in the baseline model.
"""
## Implement data reading pipeline
We define an input function that reads and parses the file, then converts features
and labels into a[`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets)
for training or evaluation.
"""
target_label_lookup = layers.StringLookup(
vocabulary=TARGET_LABELS, mask_token=None, num_oov_indices=0
)
def prepare_example(features, target):
target_index = target_label_lookup(target)
weights = features.pop(WEIGHT_COLUMN_NAME)
return features, target_index, weights
lookup_dict = {}
for feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token, nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = layers.StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
lookup_dict[feature_name] = lookup
def encode_categorical(batch_x, batch_y, weights):
for feature_name in CATEGORICAL_FEATURE_NAMES:
batch_x[feature_name] = lookup_dict[feature_name](batch_x[feature_name])
return batch_x, batch_y, weights
def get_dataset_from_csv(csv_file_path, batch_size=128, shuffle=False):
dataset = (
tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
column_defaults=COLUMN_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
num_epochs=1,
header=False,
na_value="?",
shuffle=shuffle,
)
.map(prepare_example, num_parallel_calls=tf.data.AUTOTUNE, deterministic=False)
.map(encode_categorical)
)
return dataset.cache()
"""
## Implement a training and evaluation procedure
"""
def run_experiment(
model,
train_data_file,
test_data_file,
num_epochs,
learning_rate,
weight_decay,
batch_size,
):
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
model.compile(
optimizer=optimizer,
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy(name="accuracy")],
)
train_dataset = get_dataset_from_csv(train_data_file, batch_size, shuffle=True)
validation_dataset = get_dataset_from_csv(test_data_file, batch_size)
print("Start training the model...")
history = model.fit(
train_dataset, epochs=num_epochs, validation_data=validation_dataset
)
print("Model training finished")
_, accuracy = model.evaluate(validation_dataset, verbose=0)
print(f"Validation accuracy: {round(accuracy * 100, 2)}%")
return history
"""
## Create model inputs
Now, define the inputs for the models as a dictionary, where the key is the feature name,
and the value is a `keras.layers.Input` tensor with the corresponding feature shape
and data type.
"""
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="float32"
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="float32"
)
return inputs
"""
## Encode features
The `encode_inputs` method returns `encoded_categorical_feature_list` and `numerical_feature_list`.
We encode the categorical features as embeddings, using a fixed `embedding_dims` for all the features,
regardless their vocabulary sizes. This is required for the Transformer model.
"""
def encode_inputs(inputs, embedding_dims):
encoded_categorical_feature_list = []
numerical_feature_list = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token, nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
# Convert the string input values into integer indices.
# Create an embedding layer with the specified dimensions.
embedding = layers.Embedding(
input_dim=len(vocabulary), output_dim=embedding_dims
)
# Convert the index values to embedding representations.
encoded_categorical_feature = embedding(inputs[feature_name])
encoded_categorical_feature_list.append(encoded_categorical_feature)
else:
# Use the numerical features as-is.
numerical_feature = ops.expand_dims(inputs[feature_name], -1)
numerical_feature_list.append(numerical_feature)
return encoded_categorical_feature_list, numerical_feature_list
"""
## Implement an MLP block
"""
def create_mlp(hidden_units, dropout_rate, activation, normalization_layer, name=None):
mlp_layers = []
for units in hidden_units:
mlp_layers.append(normalization_layer()),
mlp_layers.append(layers.Dense(units, activation=activation))
mlp_layers.append(layers.Dropout(dropout_rate))
return keras.Sequential(mlp_layers, name=name)
"""
## Experiment 1: a baseline model
In the first experiment, we create a simple multi-layer feed-forward network.
"""
def create_baseline_model(
embedding_dims, num_mlp_blocks, mlp_hidden_units_factors, dropout_rate
):
# Create model inputs.
inputs = create_model_inputs()
# encode features.
encoded_categorical_feature_list, numerical_feature_list = encode_inputs(
inputs, embedding_dims
)
# Concatenate all features.
features = layers.concatenate(
encoded_categorical_feature_list + numerical_feature_list
)
# Compute Feedforward layer units.
feedforward_units = [features.shape[-1]]
# Create several feedforwad layers with skip connections.
for layer_idx in range(num_mlp_blocks):
features = create_mlp(
hidden_units=feedforward_units,
dropout_rate=dropout_rate,
activation=keras.activations.gelu,
normalization_layer=layers.LayerNormalization,
name=f"feedforward_{layer_idx}",
)(features)
# Compute MLP hidden_units.
mlp_hidden_units = [
factor * features.shape[-1] for factor in mlp_hidden_units_factors
]
# Create final MLP.
features = create_mlp(
hidden_units=mlp_hidden_units,
dropout_rate=dropout_rate,
activation=keras.activations.selu,
normalization_layer=layers.BatchNormalization,
name="MLP",
)(features)
# Add a sigmoid as a binary classifer.
outputs = layers.Dense(units=1, activation="sigmoid", name="sigmoid")(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
baseline_model = create_baseline_model(
embedding_dims=EMBEDDING_DIMS,
num_mlp_blocks=NUM_MLP_BLOCKS,
mlp_hidden_units_factors=MLP_HIDDEN_UNITS_FACTORS,
dropout_rate=DROPOUT_RATE,
)
print("Total model weights:", baseline_model.count_params())
keras.utils.plot_model(baseline_model, show_shapes=True, rankdir="LR")
"""
Let's train and evaluate the baseline model:
"""
history = run_experiment(
model=baseline_model,
train_data_file=train_data_file,
test_data_file=test_data_file,
num_epochs=NUM_EPOCHS,
learning_rate=LEARNING_RATE,
weight_decay=WEIGHT_DECAY,
batch_size=BATCH_SIZE,
)
"""
The baseline linear model achieves ~81% validation accuracy.
"""
"""
## Experiment 2: TabTransformer
The TabTransformer architecture works as follows:
1. All the categorical features are encoded as embeddings, using the same `embedding_dims`.
This means that each value in each categorical feature will have its own embedding vector.
2. A column embedding, one embedding vector for each categorical feature, is added (point-wise) to the categorical feature embedding.
3. The embedded categorical features are fed into a stack of Transformer blocks.
Each Transformer block consists of a multi-head self-attention layer followed by a feed-forward layer.
3. The outputs of the final Transformer layer, which are the *contextual embeddings* of the categorical features,
are concatenated with the input numerical features, and fed into a final MLP block.
4. A `softmax` classifer is applied at the end of the model.
The [paper](https://arxiv.org/abs/2012.06678) discusses both addition and concatenation of the column embedding in the
*Appendix: Experiment and Model Details* section.
The architecture of TabTransformer is shown below, as presented in the paper.
<img src="https://raw.githubusercontent.com/keras-team/keras-io/master/examples/structured_data/img/tabtransformer/tabtransformer.png" width="500"/>
"""
def create_tabtransformer_classifier(
num_transformer_blocks,
num_heads,
embedding_dims,
mlp_hidden_units_factors,
dropout_rate,
use_column_embedding=False,
):
# Create model inputs.
inputs = create_model_inputs()
# encode features.
encoded_categorical_feature_list, numerical_feature_list = encode_inputs(
inputs, embedding_dims
)
# Stack categorical feature embeddings for the Tansformer.
encoded_categorical_features = ops.stack(encoded_categorical_feature_list, axis=1)
# Concatenate numerical features.
numerical_features = layers.concatenate(numerical_feature_list)
# Add column embedding to categorical feature embeddings.
if use_column_embedding:
num_columns = encoded_categorical_features.shape[1]
column_embedding = layers.Embedding(
input_dim=num_columns, output_dim=embedding_dims
)
column_indices = ops.arange(start=0, stop=num_columns, step=1)
encoded_categorical_features = encoded_categorical_features + column_embedding(
column_indices
)
# Create multiple layers of the Transformer block.
for block_idx in range(num_transformer_blocks):
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=embedding_dims,
dropout=dropout_rate,
name=f"multihead_attention_{block_idx}",
)(encoded_categorical_features, encoded_categorical_features)
# Skip connection 1.
x = layers.Add(name=f"skip_connection1_{block_idx}")(
[attention_output, encoded_categorical_features]
)
# Layer normalization 1.
x = layers.LayerNormalization(name=f"layer_norm1_{block_idx}", epsilon=1e-6)(x)
# Feedforward.
feedforward_output = create_mlp(
hidden_units=[embedding_dims],
dropout_rate=dropout_rate,
activation=keras.activations.gelu,
normalization_layer=partial(
layers.LayerNormalization, epsilon=1e-6
), # using partial to provide keyword arguments before initialization
name=f"feedforward_{block_idx}",
)(x)
# Skip connection 2.
x = layers.Add(name=f"skip_connection2_{block_idx}")([feedforward_output, x])
# Layer normalization 2.
encoded_categorical_features = layers.LayerNormalization(
name=f"layer_norm2_{block_idx}", epsilon=1e-6
)(x)
# Flatten the "contextualized" embeddings of the categorical features.
categorical_features = layers.Flatten()(encoded_categorical_features)
# Apply layer normalization to the numerical features.
numerical_features = layers.LayerNormalization(epsilon=1e-6)(numerical_features)
# Prepare the input for the final MLP block.
features = layers.concatenate([categorical_features, numerical_features])
# Compute MLP hidden_units.
mlp_hidden_units = [
factor * features.shape[-1] for factor in mlp_hidden_units_factors
]
# Create final MLP.
features = create_mlp(
hidden_units=mlp_hidden_units,
dropout_rate=dropout_rate,
activation=keras.activations.selu,
normalization_layer=layers.BatchNormalization,
name="MLP",
)(features)
# Add a sigmoid as a binary classifer.
outputs = layers.Dense(units=1, activation="sigmoid", name="sigmoid")(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
tabtransformer_model = create_tabtransformer_classifier(
num_transformer_blocks=NUM_TRANSFORMER_BLOCKS,
num_heads=NUM_HEADS,
embedding_dims=EMBEDDING_DIMS,
mlp_hidden_units_factors=MLP_HIDDEN_UNITS_FACTORS,
dropout_rate=DROPOUT_RATE,
)
print("Total model weights:", tabtransformer_model.count_params())
keras.utils.plot_model(tabtransformer_model, show_shapes=True, rankdir="LR")
"""
Let's train and evaluate the TabTransformer model:
"""
history = run_experiment(
model=tabtransformer_model,
train_data_file=train_data_file,
test_data_file=test_data_file,
num_epochs=NUM_EPOCHS,
learning_rate=LEARNING_RATE,
weight_decay=WEIGHT_DECAY,
batch_size=BATCH_SIZE,
)
"""
The TabTransformer model achieves ~85% validation accuracy.
Note that, with the default parameter configurations, both the baseline and the TabTransformer
have similar number of trainable weights: 109,629 and 92,151 respectively, and both use the same training hyperparameters.
"""
"""
## Conclusion
TabTransformer significantly outperforms MLP and recent
deep networks for tabular data while matching the performance of tree-based ensemble models.
TabTransformer can be learned in end-to-end supervised training using labeled examples.
For a scenario where there are a few labeled examples and a large number of unlabeled
examples, a pre-training procedure can be employed to train the Transformer layers using unlabeled data.
This is followed by fine-tuning of the pre-trained Transformer layers along with
the top MLP layer using the labeled data.
Example available on HuggingFace.
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/tab_transformer) | [](https://huggingface.co/spaces/keras-io/TabTransformer_Classification) |
"""
| keras-core/examples/keras_io/structured_data/tabtransformer.py/0 | {
"file_path": "keras-core/examples/keras_io/structured_data/tabtransformer.py",
"repo_id": "keras-core",
"token_count": 7250
} | 22 |
"""
Title: Serving TensorFlow models with TFServing
Author: [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)
Date created: 2023/01/02
Last modified: 2023/01/02
Description: How to serve TensorFlow models with TensorFlow Serving.
Accelerator: NONE
"""
"""
## Introduction
Once you build a machine learning model, the next step is to serve it.
You may want to do that by exposing your model as an endpoint service.
There are many frameworks that you can use to do that, but the TensorFlow
ecosystem has its own solution called
[TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving).
From the TensorFlow Serving
[GitHub page](https://github.com/tensorflow/serving):
> TensorFlow Serving is a flexible, high-performance serving system for machine
learning models, designed for production environments. It deals with the
inference aspect of machine learning, taking models after training and
managing their lifetimes, providing clients with versioned access via a
high-performance, reference-counted lookup table. TensorFlow Serving provides
out-of-the-box integration with TensorFlow models, but can be easily extended
to serve other types of models and data."
To note a few features:
- It can serve multiple models, or multiple versions of the same model
simultaneously
- It exposes both gRPC as well as HTTP inference endpoints
- It allows deployment of new model versions without changing any client code
- It supports canarying new versions and A/B testing experimental models
- It adds minimal latency to inference time due to efficient, low-overhead
implementation
- It features a scheduler that groups individual inference requests into batches
for joint execution on GPU, with configurable latency controls
- It supports many servables: Tensorflow models, embeddings, vocabularies,
feature transformations and even non-Tensorflow-based machine learning models
This guide creates a simple [MobileNet](https://arxiv.org/abs/1704.04861)
model using the [Keras applications API](https://keras.io/api/applications/),
and then serves it with [TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving).
The focus is on TensorFlow Serving, rather than the modeling and training in
TensorFlow.
> Note: you can find a Colab notebook with the full working code at
[this link](https://colab.research.google.com/drive/1nwuIJa4so1XzYU0ngq8tX_-SGTO295Mu?usp=sharing).
"""
"""
## Dependencies
"""
import os
import json
import shutil
import requests
import numpy as np
import tensorflow as tf
import keras_core as keras
import matplotlib.pyplot as plt
"""
## Model
Here we load a pre-trained [MobileNet](https://arxiv.org/abs/1704.04861)
from the [Keras applications](https://keras.io/api/applications/), this is the
model that we are going to serve.
"""
model = keras.applications.MobileNet()
"""
## Preprocessing
Most models don't work out of the box on raw data, they usually require some
kind of preprocessing step to adjust the data to the model requirements,
in the case of this MobileNet we can see from its
[API page](https://keras.io/api/applications/mobilenet/) that it requires
three basic steps for its input images:
- Pixel values normalized to the `[0, 1]` range
- Pixel values scaled to the `[-1, 1]` range
- Images with the shape of `(224, 224, 3)` meaning `(height, width, channels)`
We can do all of that with the following function:
"""
def preprocess(image, mean=0.5, std=0.5, shape=(224, 224)):
"""Scale, normalize and resizes images."""
image = image / 255.0 # Scale
image = (image - mean) / std # Normalize
image = tf.image.resize(image, shape) # Resize
return image
"""
**A note regarding preprocessing and postprocessing using the "keras.applications" API**
All models that are available at the [Keras applications](https://keras.io/api/applications/)
API also provide `preprocess_input` and `decode_predictions` functions, those
functions are respectively responsible for the preprocessing and postprocessing
of each model, and already contains all the logic necessary for those steps.
That is the recommended way to process inputs and outputs when using Keras
applications models.
For this guide, we are not using them to present the advantages of custom
signatures in a clearer way.
"""
"""
## Postprocessing
In the same context most models output values that need extra processing to
meet the user requirements, for instance, the user does not want to know the
logits values for each class given an image, what the user wants is to know
from which class it belongs. For our model, this translates to the following
transformations on top of the model outputs:
- Get the index of the class with the highest prediction
- Get the name of the class from that index
"""
# Download human-readable labels for ImageNet.
imagenet_labels_url = "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt"
response = requests.get(imagenet_labels_url)
# Skiping backgroung class
labels = [x for x in response.text.split("\n") if x != ""][1:]
# Convert the labels to the TensorFlow data format
tf_labels = tf.constant(labels, dtype=tf.string)
def postprocess(prediction, labels=tf_labels):
"""Convert from probs to labels."""
indices = tf.argmax(prediction, axis=-1) # Index with highest prediction
label = tf.gather(params=labels, indices=indices) # Class name
return label
"""
Now let's download a banana picture and see how everything comes together.
"""
response = requests.get("https://i.imgur.com/j9xCCzn.jpeg", stream=True)
with open("banana.jpeg", "wb") as f:
shutil.copyfileobj(response.raw, f)
sample_img = plt.imread("./banana.jpeg")
print(f"Original image shape: {sample_img.shape}")
print(f"Original image pixel range: ({sample_img.min()}, {sample_img.max()})")
plt.imshow(sample_img)
plt.show()
preprocess_img = preprocess(sample_img)
print(f"Preprocessed image shape: {preprocess_img.shape}")
print(
f"Preprocessed image pixel range: ({preprocess_img.numpy().min()},",
f"{preprocess_img.numpy().max()})",
)
batched_img = tf.expand_dims(preprocess_img, axis=0)
batched_img = tf.cast(batched_img, tf.float32)
print(f"Batched image shape: {batched_img.shape}")
model_outputs = model(batched_img)
print(f"Model output shape: {model_outputs.shape}")
print(f"Predicted class: {postprocess(model_outputs)}")
"""
## Save the model
To load our trained model into TensorFlow Serving, we first need to save it in
[SavedModel](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/saved_model)
format. This will create a protobuf file in a well-defined directory hierarchy,
and will include a version number.
[TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving) allows us
to select which version of a model, or "servable" we want to use when we make
inference requests. Each version will be exported to a different sub-directory
under the given path.
"""
model_dir = "./model"
model_version = 1
model_export_path = f"{model_dir}/{model_version}"
tf.saved_model.save(
model,
export_dir=model_export_path,
)
print(f"SavedModel files: {os.listdir(model_export_path)}")
"""
## Examine your saved model
We'll use the command line utility `saved_model_cli` to look at the
[MetaGraphDefs](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/MetaGraphDef)
(the models) and [SignatureDefs](https://www.tensorflow.org/tfx/serving/signature_defs)
(the methods you can call) in our SavedModel. See
[this discussion of the SavedModel CLI](https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/saved_model.md#cli-to-inspect-and-execute-savedmodel)
in the TensorFlow Guide.
"""
"""shell
saved_model_cli show --dir {model_export_path} --tag_set serve --signature_def serving_default
"""
"""
That tells us a lot about our model! For instance, we can see that its inputs
have a 4D shape `(-1, 224, 224, 3)` which means
`(batch_size, height, width, channels)`, also note that this model requires a
specific image shape `(224, 224, 3)` this means that we may need to reshape
our images before sending them to the model. We can also see that the model's
outputs have a `(-1, 1000)` shape which are the logits for the 1000 classes of
the [ImageNet](https://www.image-net.org) dataset.
This information doesn't tell us everything, like the fact that the pixel
values needs to be in the `[-1, 1]` range, but it's a great start.
## Serve your model with TensorFlow Serving
### Install TFServing
We're preparing to install TensorFlow Serving using
[Aptitude](https://wiki.debian.org/Aptitude) since this Colab runs in a Debian
environment. We'll add the `tensorflow-model-server` package to the list of
packages that Aptitude knows about. Note that we're running as root.
> Note: This example is running TensorFlow Serving natively, but [you can also
run it in a Docker container](https://www.tensorflow.org/tfx/serving/docker),
which is one of the easiest ways to get started using TensorFlow Serving.
```shell
wget 'http://storage.googleapis.com/tensorflow-serving-apt/pool/tensorflow-model-server-universal-2.8.0/t/tensorflow-model-server-universal/tensorflow-model-server-universal_2.8.0_all.deb'
dpkg -i tensorflow-model-server-universal_2.8.0_all.deb
```
"""
"""
### Start running TensorFlow Serving
This is where we start running TensorFlow Serving and load our model. After it
loads, we can start making inference requests using REST. There are some
important parameters:
- `port`: The port that you'll use for gRPC requests.
- `rest_api_port`: The port that you'll use for REST requests.
- `model_name`: You'll use this in the URL of REST requests. It can be
anything.
- `model_base_path`: This is the path to the directory where you've saved your
model.
Check the [TFServing API reference](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/model_servers/main.cc)
to get all the parameters available.
"""
# Environment variable with the path to the model
os.environ["MODEL_DIR"] = f"{model_dir}"
"""
```shell
%%bash --bg
nohup tensorflow_model_server \
--port=8500 \
--rest_api_port=8501 \
--model_name=model \
--model_base_path=$MODEL_DIR >server.log 2>&1
```
```shell
# We can check the logs to the server to help troubleshooting
!cat server.log
```
outputs:
```
[warn] getaddrinfo: address family for nodename not supported
[evhttp_server.cc : 245] NET_LOG: Entering the event loop ...
```
```shell
# Now we can check if tensorflow is in the active services
!sudo lsof -i -P -n | grep LISTEN
```
outputs:
```
node 7 root 21u IPv6 19100 0t0 TCP *:8080 (LISTEN)
kernel_ma 34 root 7u IPv4 18874 0t0 TCP 172.28.0.12:6000 (LISTEN)
colab-fil 63 root 5u IPv4 17975 0t0 TCP *:3453 (LISTEN)
colab-fil 63 root 6u IPv6 17976 0t0 TCP *:3453 (LISTEN)
jupyter-n 81 root 6u IPv4 18092 0t0 TCP 172.28.0.12:9000 (LISTEN)
python3 101 root 23u IPv4 18252 0t0 TCP 127.0.0.1:44915 (LISTEN)
python3 132 root 3u IPv4 20548 0t0 TCP 127.0.0.1:15264 (LISTEN)
python3 132 root 4u IPv4 20549 0t0 TCP 127.0.0.1:37977 (LISTEN)
python3 132 root 9u IPv4 20662 0t0 TCP 127.0.0.1:40689 (LISTEN)
tensorflo 1101 root 5u IPv4 35543 0t0 TCP *:8500 (LISTEN)
tensorflo 1101 root 12u IPv4 35548 0t0 TCP *:8501 (LISTEN)
```
## Make a request to your model in TensorFlow Serving
Now let's create the JSON object for an inference request, and see how well
our model classifies it:
### REST API
#### Newest version of the servable
We'll send a predict request as a POST to our server's REST endpoint, and pass
it as an example. We'll ask our server to give us the latest version of our
servable by not specifying a particular version.
"""
data = json.dumps(
{
"signature_name": "serving_default",
"instances": batched_img.numpy().tolist(),
}
)
url = "http://localhost:8501/v1/models/model:predict"
def predict_rest(json_data, url):
json_response = requests.post(url, data=json_data)
response = json.loads(json_response.text)
rest_outputs = np.array(response["predictions"])
return rest_outputs
"""
```python
rest_outputs = predict_rest(data, url)
print(f"REST output shape: {rest_outputs.shape}")
print(f"Predicted class: {postprocess(rest_outputs)}")
```
outputs:
```
REST output shape: (1, 1000)
Predicted class: [b'banana']
```
### gRPC API
[gRPC](https://grpc.io/) is based on the Remote Procedure Call (RPC) model and
is a technology for implementing RPC APIs that uses HTTP 2.0 as its underlying
transport protocol. gRPC is usually preferred for low-latency, highly scalable,
and distributed systems. If you wanna know more about the REST vs gRPC
tradeoffs, checkout
[this article](https://cloud.google.com/blog/products/api-management/understanding-grpc-openapi-and-rest-and-when-to-use-them).
"""
import grpc
# Create a channel that will be connected to the gRPC port of the container
channel = grpc.insecure_channel("localhost:8500")
"""
```shell
pip install -q tensorflow_serving_api
```
```python
from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc
# Create a stub made for prediction
# This stub will be used to send the gRPCrequest to the TF Server
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
```
"""
# Get the serving_input key
loaded_model = tf.saved_model.load(model_export_path)
input_name = list(
loaded_model.signatures["serving_default"]
.structured_input_signature[1]
.keys()
)[0]
"""
```python
def predict_grpc(data, input_name, stub):
# Create a gRPC request made for prediction
request = predict_pb2.PredictRequest()
# Set the name of the model, for this use case it is "model"
request.model_spec.name = "model"
# Set which signature is used to format the gRPC query
# here the default one "serving_default"
request.model_spec.signature_name = "serving_default"
# Set the input as the data
# tf.make_tensor_proto turns a TensorFlow tensor into a Protobuf tensor
request.inputs[input_name].CopyFrom(tf.make_tensor_proto(data.numpy().tolist()))
# Send the gRPC request to the TF Server
result = stub.Predict(request)
return result
grpc_outputs = predict_grpc(batched_img, input_name, stub)
grpc_outputs = np.array([grpc_outputs.outputs['predictions'].float_val])
print(f"gRPC output shape: {grpc_outputs.shape}")
print(f"Predicted class: {postprocess(grpc_outputs)}")
```
outputs:
```
gRPC output shape: (1, 1000)
Predicted class: [b'banana']
```
"""
"""
## Custom signature
Note that for this model we always need to preprocess and postprocess all
samples to get the desired output, this can get quite tricky if are
maintaining and serving several models developed by a large team, and each one
of them might require different processing logic.
TensorFlow allows us to customize the model graph to embed all of that
processing logic, which makes model serving much easier, there are different
ways to achieve this, but since we are going to server the models using
TFServing we can customize the model graph straight into the serving signature.
We can just use the following code to export the same model that already
contains the preprocessing and postprocessing logic as the default signature,
this allows this model to make predictions on raw data.
"""
def export_model(model, labels):
@tf.function(
input_signature=[tf.TensorSpec([None, None, None, 3], tf.float32)]
)
def serving_fn(image):
processed_img = preprocess(image)
probs = model(processed_img)
label = postprocess(probs)
return {"label": label}
return serving_fn
model_sig_version = 2
model_sig_export_path = f"{model_dir}/{model_sig_version}"
tf.saved_model.save(
model,
export_dir=model_sig_export_path,
signatures={"serving_default": export_model(model, labels)},
)
"""shell
saved_model_cli show --dir {model_sig_export_path} --tag_set serve --signature_def serving_default
"""
"""
Note that this model has a different signature, its input is still 4D but now
with a `(-1, -1, -1, 3)` shape, which means that it supports images with any
height and width size. Its output also has a different shape, it no longer
outputs the 1000-long logits.
We can test the model's prediction using a specific signature using this API
below:
"""
batched_raw_img = tf.expand_dims(sample_img, axis=0)
batched_raw_img = tf.cast(batched_raw_img, tf.float32)
loaded_model = tf.saved_model.load(model_sig_export_path)
loaded_model.signatures["serving_default"](**{"image": batched_raw_img})
"""
## Prediction using a particular version of the servable
Now let's specify a particular version of our servable. Note that when we
saved the model with a custom signature we used a different folder, the first
model was saved in folder `/1` (version 1), and the one with a custom
signature in folder `/2` (version 2). By default, TFServing will serve all
models that share the same base parent folder.
### REST API
"""
data = json.dumps(
{
"signature_name": "serving_default",
"instances": batched_raw_img.numpy().tolist(),
}
)
url_sig = "http://localhost:8501/v1/models/model/versions/2:predict"
"""
```python
print(f"REST output shape: {rest_outputs.shape}")
print(f"Predicted class: {rest_outputs}")
```
outputs:
```
REST output shape: (1,)
Predicted class: ['banana']
```
### gRPC API
"""
"""
```python
channel = grpc.insecure_channel("localhost:8500")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
```
"""
input_name = list(
loaded_model.signatures["serving_default"]
.structured_input_signature[1]
.keys()
)[0]
"""
```python
grpc_outputs = predict_grpc(batched_raw_img, input_name, stub)
grpc_outputs = np.array([grpc_outputs.outputs['label'].string_val])
print(f"gRPC output shape: {grpc_outputs.shape}")
print(f"Predicted class: {grpc_outputs}")
```
outputs:
```
gRPC output shape: (1, 1)
Predicted class: [[b'banana']]
```
## Additional resources
- [Colab notebook with the full working code](https://colab.research.google.com/drive/1nwuIJa4so1XzYU0ngq8tX_-SGTO295Mu?usp=sharing)
- [Train and serve a TensorFlow model with TensorFlow Serving - TensorFlow blog](https://www.tensorflow.org/tfx/tutorials/serving/rest_simple#make_a_request_to_your_model_in_tensorflow_serving)
- [TensorFlow Serving playlist - TensorFlow YouTube channel](https://www.youtube.com/playlist?list=PLQY2H8rRoyvwHdpVQVohY7-qcYf2s1UYK)
"""
| keras-core/examples/keras_io/tensorflow/keras_recipes/tf_serving.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/keras_recipes/tf_serving.py",
"repo_id": "keras-core",
"token_count": 6124
} | 23 |
"""
Title: Image classification with Swin Transformers
Author: [Rishit Dagli](https://twitter.com/rishit_dagli)
Date created: 2021/09/08
Last modified: 2021/09/08
Description: Image classification using Swin Transformers, a general-purpose backbone for computer vision.
Accelerator: GPU
"""
"""
This example implements [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)
by Liu et al. for image classification, and demonstrates it on the
[CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html).
Swin Transformer (**S**hifted **Win**dow Transformer) can serve as a general-purpose backbone
for computer vision. Swin Transformer is a hierarchical Transformer whose
representations are computed with _shifted windows_. The shifted window scheme
brings greater efficiency by limiting self-attention computation to
non-overlapping local windows while also allowing for cross-window connections.
This architecture has the flexibility to model information at various scales and has
a linear computational complexity with respect to image size.
This example requires TensorFlow 2.5 or higher.
"""
"""
## Setup
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import keras_core as keras
from keras_core import layers
"""
## Prepare the data
We load the CIFAR-100 dataset through `tf.keras.datasets`,
normalize the images, and convert the integer labels to one-hot encoded vectors.
"""
num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.numerical_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.numerical_utils.to_categorical(y_test, num_classes)
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i])
plt.show()
"""
## Configure the hyperparameters
A key parameter to pick is the `patch_size`, the size of the input patches.
In order to use each pixel as an individual input, you can set `patch_size` to `(1, 1)`.
Below, we take inspiration from the original paper settings
for training on ImageNet-1K, keeping most of the original settings for this example.
"""
patch_size = (2, 2) # 2-by-2 sized patches
dropout_rate = 0.03 # Dropout rate
num_heads = 8 # Attention heads
embed_dim = 64 # Embedding dimension
num_mlp = 256 # MLP layer size
qkv_bias = True # Convert embedded patches to query, key, and values with a learnable additive value
window_size = 2 # Size of attention window
shift_size = 1 # Size of shifting window
image_dimension = 32 # Initial image size
num_patch_x = input_shape[0] // patch_size[0]
num_patch_y = input_shape[1] // patch_size[1]
learning_rate = 1e-3
batch_size = 128
num_epochs = 1
validation_split = 0.1
weight_decay = 0.0001
label_smoothing = 0.1
"""
## Helper functions
We create two helper functions to help us get a sequence of
patches from the image, merge patches, and apply dropout.
"""
def window_partition(x, window_size):
_, height, width, channels = x.shape
patch_num_y = height // window_size
patch_num_x = width // window_size
x = tf.reshape(
x,
shape=(
-1,
patch_num_y,
window_size,
patch_num_x,
window_size,
channels,
),
)
x = tf.transpose(x, (0, 1, 3, 2, 4, 5))
windows = tf.reshape(x, shape=(-1, window_size, window_size, channels))
return windows
def window_reverse(windows, window_size, height, width, channels):
patch_num_y = height // window_size
patch_num_x = width // window_size
x = tf.reshape(
windows,
shape=(
-1,
patch_num_y,
patch_num_x,
window_size,
window_size,
channels,
),
)
x = tf.transpose(x, perm=(0, 1, 3, 2, 4, 5))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
class DropPath(layers.Layer):
def __init__(self, drop_prob=None, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prob
def call(self, x):
input_shape = tf.shape(x)
batch_size = input_shape[0]
rank = x.shape.rank
shape = (batch_size,) + (1,) * (rank - 1)
random_tensor = (1 - self.drop_prob) + tf.random.uniform(
shape, dtype=x.dtype
)
path_mask = tf.floor(random_tensor)
output = tf.math.divide(x, 1 - self.drop_prob) * path_mask
return output
"""
## Window based multi-head self-attention
Usually Transformers perform global self-attention, where the relationships between
a token and all other tokens are computed. The global computation leads to quadratic
complexity with respect to the number of tokens. Here, as the [original paper](https://arxiv.org/abs/2103.14030)
suggests, we compute self-attention within local windows, in a non-overlapping manner.
Global self-attention leads to quadratic computational complexity in the number of patches,
whereas window-based self-attention leads to linear complexity and is easily scalable.
"""
class WindowAttention(layers.Layer):
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = layers.Dense(dim * 3, use_bias=qkv_bias)
self.dropout = layers.Dropout(dropout_rate)
self.proj = layers.Dense(dim)
def build(self, input_shape):
num_window_elements = (2 * self.window_size[0] - 1) * (
2 * self.window_size[1] - 1
)
self.relative_position_bias_table = self.add_weight(
shape=(num_window_elements, self.num_heads),
initializer=tf.initializers.Zeros(),
trainable=True,
)
coords_h = np.arange(self.window_size[0])
coords_w = np.arange(self.window_size[1])
coords_matrix = np.meshgrid(coords_h, coords_w, indexing="ij")
coords = np.stack(coords_matrix)
coords_flatten = coords.reshape(2, -1)
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
)
relative_coords = relative_coords.transpose([1, 2, 0])
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.relative_position_index = tf.Variable(
initial_value=lambda: tf.convert_to_tensor(relative_position_index),
trainable=False,
)
def call(self, x, mask=None):
_, size, channels = x.shape
head_dim = channels // self.num_heads
x_qkv = self.qkv(x)
x_qkv = tf.reshape(x_qkv, shape=(-1, size, 3, self.num_heads, head_dim))
x_qkv = tf.transpose(x_qkv, perm=(2, 0, 3, 1, 4))
q, k, v = x_qkv[0], x_qkv[1], x_qkv[2]
q = q * self.scale
k = tf.transpose(k, perm=(0, 1, 3, 2))
attn = q @ k
num_window_elements = self.window_size[0] * self.window_size[1]
relative_position_index_flat = tf.reshape(
self.relative_position_index, shape=(-1,)
)
relative_position_bias = tf.gather(
self.relative_position_bias_table, relative_position_index_flat
)
relative_position_bias = tf.reshape(
relative_position_bias,
shape=(num_window_elements, num_window_elements, -1),
)
relative_position_bias = tf.transpose(
relative_position_bias, perm=(2, 0, 1)
)
attn = attn + tf.expand_dims(relative_position_bias, axis=0)
if mask is not None:
nW = mask.shape[0]
mask_float = tf.cast(
tf.expand_dims(tf.expand_dims(mask, axis=1), axis=0), tf.float32
)
attn = (
tf.reshape(attn, shape=(-1, nW, self.num_heads, size, size))
+ mask_float
)
attn = tf.reshape(attn, shape=(-1, self.num_heads, size, size))
attn = keras.activations.softmax(attn, axis=-1)
else:
attn = keras.activations.softmax(attn, axis=-1)
attn = self.dropout(attn)
x_qkv = attn @ v
x_qkv = tf.transpose(x_qkv, perm=(0, 2, 1, 3))
x_qkv = tf.reshape(x_qkv, shape=(-1, size, channels))
x_qkv = self.proj(x_qkv)
x_qkv = self.dropout(x_qkv)
return x_qkv
"""
## The complete Swin Transformer model
Finally, we put together the complete Swin Transformer by replacing the standard multi-head
attention (MHA) with shifted windows attention. As suggested in the
original paper, we create a model comprising of a shifted window-based MHA
layer, followed by a 2-layer MLP with GELU nonlinearity in between, applying
`LayerNormalization` before each MSA layer and each MLP, and a residual
connection after each of these layers.
Notice that we only create a simple MLP with 2 Dense and
2 Dropout layers. Often you will see models using ResNet-50 as the MLP which is
quite standard in the literature. However in this paper the authors use a
2-layer MLP with GELU nonlinearity in between.
"""
class SwinTransformer(layers.Layer):
def __init__(
self,
dim,
num_patch,
num_heads,
window_size=7,
shift_size=0,
num_mlp=1024,
qkv_bias=True,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim # number of input dimensions
self.num_patch = num_patch # number of embedded patches
self.num_heads = num_heads # number of attention heads
self.window_size = window_size # size of window
self.shift_size = shift_size # size of window shift
self.num_mlp = num_mlp # number of MLP nodes
self.norm1 = layers.LayerNormalization(epsilon=1e-5)
self.attn = WindowAttention(
dim,
window_size=(self.window_size, self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)
self.drop_path = DropPath(dropout_rate)
self.norm2 = layers.LayerNormalization(epsilon=1e-5)
self.mlp = keras.Sequential(
[
layers.Dense(num_mlp),
layers.Activation(keras.activations.gelu),
layers.Dropout(dropout_rate),
layers.Dense(dim),
layers.Dropout(dropout_rate),
]
)
if min(self.num_patch) < self.window_size:
self.shift_size = 0
self.window_size = min(self.num_patch)
def build(self, input_shape):
if self.shift_size == 0:
self.attn_mask = None
else:
height, width = self.num_patch
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
mask_array = np.zeros((1, height, width, 1))
count = 0
for h in h_slices:
for w in w_slices:
mask_array[:, h, w, :] = count
count += 1
mask_array = tf.convert_to_tensor(mask_array)
# mask array to windows
mask_windows = window_partition(mask_array, self.window_size)
mask_windows = tf.reshape(
mask_windows, shape=[-1, self.window_size * self.window_size]
)
attn_mask = tf.expand_dims(mask_windows, axis=1) - tf.expand_dims(
mask_windows, axis=2
)
attn_mask = tf.where(attn_mask != 0, -100.0, attn_mask)
attn_mask = tf.where(attn_mask == 0, 0.0, attn_mask)
self.attn_mask = tf.Variable(
initial_value=attn_mask, trainable=False
)
def call(self, x):
height, width = self.num_patch
_, num_patches_before, channels = x.shape
x_skip = x
x = self.norm1(x)
x = tf.reshape(x, shape=(-1, height, width, channels))
if self.shift_size > 0:
shifted_x = tf.roll(
x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2]
)
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = tf.reshape(
x_windows, shape=(-1, self.window_size * self.window_size, channels)
)
attn_windows = self.attn(x_windows, mask=self.attn_mask)
attn_windows = tf.reshape(
attn_windows,
shape=(-1, self.window_size, self.window_size, channels),
)
shifted_x = window_reverse(
attn_windows, self.window_size, height, width, channels
)
if self.shift_size > 0:
x = tf.roll(
shifted_x, shift=[self.shift_size, self.shift_size], axis=[1, 2]
)
else:
x = shifted_x
x = tf.reshape(x, shape=(-1, height * width, channels))
x = self.drop_path(x)
x = x_skip + x
x_skip = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x_skip + x
return x
"""
## Model training and evaluation
### Extract and embed patches
We first create 3 layers to help us extract, embed and merge patches from the
images on top of which we will later use the Swin Transformer class we built.
"""
class PatchExtract(layers.Layer):
def __init__(self, patch_size, **kwargs):
super().__init__(**kwargs)
self.patch_size_x = patch_size[0]
self.patch_size_y = patch_size[0]
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=(1, self.patch_size_x, self.patch_size_y, 1),
strides=(1, self.patch_size_x, self.patch_size_y, 1),
rates=(1, 1, 1, 1),
padding="VALID",
)
patch_dim = patches.shape[-1]
patch_num = patches.shape[1]
return tf.reshape(
patches, (batch_size, patch_num * patch_num, patch_dim)
)
class PatchEmbedding(layers.Layer):
def __init__(self, num_patch, embed_dim, **kwargs):
super().__init__(**kwargs)
self.num_patch = num_patch
self.proj = layers.Dense(embed_dim)
self.pos_embed = layers.Embedding(
input_dim=num_patch, output_dim=embed_dim
)
def call(self, patch):
pos = tf.range(start=0, limit=self.num_patch, delta=1)
return self.proj(patch) + self.pos_embed(pos)
class PatchMerging(keras.layers.Layer):
def __init__(self, num_patch, embed_dim):
super().__init__()
self.num_patch = num_patch
self.embed_dim = embed_dim
self.linear_trans = layers.Dense(2 * embed_dim, use_bias=False)
def call(self, x):
height, width = self.num_patch
_, _, C = x.shape
x = tf.reshape(x, shape=(-1, height, width, C))
x0 = x[:, 0::2, 0::2, :]
x1 = x[:, 1::2, 0::2, :]
x2 = x[:, 0::2, 1::2, :]
x3 = x[:, 1::2, 1::2, :]
x = tf.concat((x0, x1, x2, x3), axis=-1)
x = tf.reshape(x, shape=(-1, (height // 2) * (width // 2), 4 * C))
return self.linear_trans(x)
"""
### Build the model
We put together the Swin Transformer model.
"""
input = layers.Input(input_shape)
x = layers.RandomCrop(image_dimension, image_dimension)(input)
x = layers.RandomFlip("horizontal")(x)
x = PatchExtract(patch_size)(x)
x = PatchEmbedding(num_patch_x * num_patch_y, embed_dim)(x)
x = SwinTransformer(
dim=embed_dim,
num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads,
window_size=window_size,
shift_size=0,
num_mlp=num_mlp,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)(x)
x = SwinTransformer(
dim=embed_dim,
num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads,
window_size=window_size,
shift_size=shift_size,
num_mlp=num_mlp,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)(x)
x = PatchMerging((num_patch_x, num_patch_y), embed_dim=embed_dim)(x)
x = layers.GlobalAveragePooling1D()(x)
output = layers.Dense(num_classes, activation="softmax")(x)
"""
### Train on CIFAR-100
We train the model on CIFAR-100. Here, we only train the model
for 40 epochs to keep the training time short in this example.
In practice, you should train for 150 epochs to reach convergence.
"""
model = keras.Model(input, output)
model.compile(
loss=keras.losses.CategoricalCrossentropy(label_smoothing=label_smoothing),
optimizer=keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=validation_split,
)
"""
Let's visualize the training progress of the model.
"""
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
"""
Let's display the final results of the training on CIFAR-100.
"""
loss, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test loss: {round(loss, 2)}")
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
"""
The Swin Transformer model we just trained has just 152K parameters, and it gets
us to ~75% test top-5 accuracy within just 40 epochs without any signs of overfitting
as well as seen in above graph. This means we can train this network for longer
(perhaps with a bit more regularization) and obtain even better performance.
This performance can further be improved by additional techniques like cosine
decay learning rate schedule, other data augmentation techniques. While experimenting,
I tried training the model for 150 epochs with a slightly higher dropout and greater
embedding dimensions which pushes the performance to ~72% test accuracy on CIFAR-100
as you can see in the screenshot.

The authors present a top-1 accuracy of 87.3% on ImageNet. The authors also present
a number of experiments to study how input sizes, optimizers etc. affect the final
performance of this model. The authors further present using this model for object detection,
semantic segmentation and instance segmentation as well and report competitive results
for these. You are strongly advised to also check out the
[original paper](https://arxiv.org/abs/2103.14030).
This example takes inspiration from the official
[PyTorch](https://github.com/microsoft/Swin-Transformer) and
[TensorFlow](https://github.com/VcampSoldiers/Swin-Transformer-Tensorflow) implementations.
"""
| keras-core/examples/keras_io/tensorflow/vision/swim_transformers.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/swim_transformers.py",
"repo_id": "keras-core",
"token_count": 8722
} | 24 |
"""
Title: Keypoint Detection with Transfer Learning
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com)
Date created: 2021/05/02
Last modified: 2023/07/19
Description: Training a keypoint detector with data augmentation and transfer learning.
Accelerator: GPU
"""
"""
Keypoint detection consists of locating key object parts. For example, the key parts
of our faces include nose tips, eyebrows, eye corners, and so on. These parts help to
represent the underlying object in a feature-rich manner. Keypoint detection has
applications that include pose estimation, face detection, etc.
In this example, we will build a keypoint detector using the
[StanfordExtra dataset](https://github.com/benjiebob/StanfordExtra),
using transfer learning. This example requires TensorFlow 2.4 or higher,
as well as [`imgaug`](https://imgaug.readthedocs.io/) library,
which can be installed using the following command:
"""
"""shell
pip install -q -U imgaug
"""
"""
## Data collection
"""
"""
The StanfordExtra dataset contains 12,000 images of dogs together with keypoints and
segmentation maps. It is developed from the [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/).
It can be downloaded with the command below:
"""
"""shell
wget -q http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar
"""
"""
Annotations are provided as a single JSON file in the StanfordExtra dataset and one needs
to fill [this form](https://forms.gle/sRtbicgxsWvRtRmUA) to get access to it. The
authors explicitly instruct users not to share the JSON file, and this example respects this wish:
you should obtain the JSON file yourself.
The JSON file is expected to be locally available as `stanfordextra_v12.zip`.
After the files are downloaded, we can extract the archives.
"""
"""shell
tar xf images.tar
unzip -qq ~/stanfordextra_v12.zip
"""
"""
## Imports
"""
from keras_core import layers
import keras_core as keras
from imgaug.augmentables.kps import KeypointsOnImage
from imgaug.augmentables.kps import Keypoint
import imgaug.augmenters as iaa
from PIL import Image
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import json
import os
"""
## Define hyperparameters
"""
IMG_SIZE = 224
BATCH_SIZE = 64
EPOCHS = 5
NUM_KEYPOINTS = 24 * 2 # 24 pairs each having x and y coordinates
"""
## Load data
The authors also provide a metadata file that specifies additional information about the
keypoints, like color information, animal pose name, etc. We will load this file in a `pandas`
dataframe to extract information for visualization purposes.
"""
IMG_DIR = "Images"
JSON = "StanfordExtra_V12/StanfordExtra_v12.json"
KEYPOINT_DEF = "https://github.com/benjiebob/StanfordExtra/raw/master/keypoint_definitions.csv"
# Load the ground-truth annotations.
with open(JSON) as infile:
json_data = json.load(infile)
# Set up a dictionary, mapping all the ground-truth information
# with respect to the path of the image.
json_dict = {i["img_path"]: i for i in json_data}
"""
A single entry of `json_dict` looks like the following:
```
'n02085782-Japanese_spaniel/n02085782_2886.jpg':
{'img_bbox': [205, 20, 116, 201],
'img_height': 272,
'img_path': 'n02085782-Japanese_spaniel/n02085782_2886.jpg',
'img_width': 350,
'is_multiple_dogs': False,
'joints': [[108.66666666666667, 252.0, 1],
[147.66666666666666, 229.0, 1],
[163.5, 208.5, 1],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[54.0, 244.0, 1],
[77.33333333333333, 225.33333333333334, 1],
[79.0, 196.5, 1],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[150.66666666666666, 86.66666666666667, 1],
[88.66666666666667, 73.0, 1],
[116.0, 106.33333333333333, 1],
[109.0, 123.33333333333333, 1],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
'seg': ...}
```
"""
"""
In this example, the keys we are interested in are:
* `img_path`
* `joints`
There are a total of 24 entries present inside `joints`. Each entry has 3 values:
* x-coordinate
* y-coordinate
* visibility flag of the keypoints (1 indicates visibility and 0 indicates non-visibility)
As we can see `joints` contain multiple `[0, 0, 0]` entries which denote that those
keypoints were not labeled. In this example, we will consider both non-visible as well as
unlabeled keypoints in order to allow mini-batch learning.
"""
# Load the metdata definition file and preview it.
keypoint_def = pd.read_csv(KEYPOINT_DEF)
keypoint_def.head()
# Extract the colours and labels.
colours = keypoint_def["Hex colour"].values.tolist()
colours = ["#" + colour for colour in colours]
labels = keypoint_def["Name"].values.tolist()
# Utility for reading an image and for getting its annotations.
def get_dog(name):
data = json_dict[name]
img_data = plt.imread(os.path.join(IMG_DIR, data["img_path"]))
# If the image is RGBA convert it to RGB.
if img_data.shape[-1] == 4:
img_data = img_data.astype(np.uint8)
img_data = Image.fromarray(img_data)
img_data = np.array(img_data.convert("RGB"))
data["img_data"] = img_data
return data
"""
## Visualize data
Now, we write a utility function to visualize the images and their keypoints.
"""
# Parts of this code come from here:
# https://github.com/benjiebob/StanfordExtra/blob/master/demo.ipynb
def visualize_keypoints(images, keypoints):
fig, axes = plt.subplots(nrows=len(images), ncols=2, figsize=(16, 12))
[ax.axis("off") for ax in np.ravel(axes)]
for (ax_orig, ax_all), image, current_keypoint in zip(
axes, images, keypoints
):
ax_orig.imshow(image)
ax_all.imshow(image)
# If the keypoints were formed by `imgaug` then the coordinates need
# to be iterated differently.
if isinstance(current_keypoint, KeypointsOnImage):
for idx, kp in enumerate(current_keypoint.keypoints):
ax_all.scatter(
[kp.x],
[kp.y],
c=colours[idx],
marker="x",
s=50,
linewidths=5,
)
else:
current_keypoint = np.array(current_keypoint)
# Since the last entry is the visibility flag, we discard it.
current_keypoint = current_keypoint[:, :2]
for idx, (x, y) in enumerate(current_keypoint):
ax_all.scatter(
[x], [y], c=colours[idx], marker="x", s=50, linewidths=5
)
plt.tight_layout(pad=2.0)
plt.show()
# Select four samples randomly for visualization.
samples = list(json_dict.keys())
num_samples = 4
selected_samples = np.random.choice(samples, num_samples, replace=False)
images, keypoints = [], []
for sample in selected_samples:
data = get_dog(sample)
image = data["img_data"]
keypoint = data["joints"]
images.append(image)
keypoints.append(keypoint)
visualize_keypoints(images, keypoints)
"""
The plots show that we have images of non-uniform sizes, which is expected in most
real-world scenarios. However, if we resize these images to have a uniform shape (for
instance (224 x 224)) their ground-truth annotations will also be affected. The same
applies if we apply any geometric transformation (horizontal flip, for e.g.) to an image.
Fortunately, `imgaug` provides utilities that can handle this issue.
In the next section, we will write a data generator inheriting the
[`keras.utils.Sequence`](https://keras.io/api/utils/python_utils/#sequence-class) class
that applies data augmentation on batches of data using `imgaug`.
"""
"""
## Prepare data generator
"""
class KeyPointsDataset(keras.utils.PyDataset):
def __init__(
self, image_keys, aug, batch_size=BATCH_SIZE, train=True, **kwargs
):
super().__init__(**kwargs)
self.image_keys = image_keys
self.aug = aug
self.batch_size = batch_size
self.train = train
self.on_epoch_end()
def __len__(self):
return len(self.image_keys) // self.batch_size
def on_epoch_end(self):
self.indexes = np.arange(len(self.image_keys))
if self.train:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
indexes = self.indexes[
index * self.batch_size : (index + 1) * self.batch_size
]
image_keys_temp = [self.image_keys[k] for k in indexes]
(images, keypoints) = self.__data_generation(image_keys_temp)
return (images, keypoints)
def __data_generation(self, image_keys_temp):
batch_images = np.empty(
(self.batch_size, IMG_SIZE, IMG_SIZE, 3), dtype="int"
)
batch_keypoints = np.empty(
(self.batch_size, 1, 1, NUM_KEYPOINTS), dtype="float32"
)
for i, key in enumerate(image_keys_temp):
data = get_dog(key)
current_keypoint = np.array(data["joints"])[:, :2]
kps = []
# To apply our data augmentation pipeline, we first need to
# form Keypoint objects with the original coordinates.
for j in range(0, len(current_keypoint)):
kps.append(
Keypoint(x=current_keypoint[j][0], y=current_keypoint[j][1])
)
# We then project the original image and its keypoint coordinates.
current_image = data["img_data"]
kps_obj = KeypointsOnImage(kps, shape=current_image.shape)
# Apply the augmentation pipeline.
(new_image, new_kps_obj) = self.aug(
image=current_image, keypoints=kps_obj
)
batch_images[i,] = new_image
# Parse the coordinates from the new keypoint object.
kp_temp = []
for keypoint in new_kps_obj:
kp_temp.append(np.nan_to_num(keypoint.x))
kp_temp.append(np.nan_to_num(keypoint.y))
# More on why this reshaping later.
batch_keypoints[i,] = np.array(kp_temp).reshape(1, 1, 24 * 2)
# Scale the coordinates to [0, 1] range.
batch_keypoints = batch_keypoints / IMG_SIZE
return (batch_images, batch_keypoints)
"""
To know more about how to operate with keypoints in `imgaug` check out
[this document](https://imgaug.readthedocs.io/en/latest/source/examples_keypoints.html).
"""
"""
## Define augmentation transforms
"""
train_aug = iaa.Sequential(
[
iaa.Resize(IMG_SIZE, interpolation="linear"),
iaa.Fliplr(0.3),
# `Sometimes()` applies a function randomly to the inputs with
# a given probability (0.3, in this case).
iaa.Sometimes(0.3, iaa.Affine(rotate=10, scale=(0.5, 0.7))),
]
)
test_aug = iaa.Sequential([iaa.Resize(IMG_SIZE, interpolation="linear")])
"""
## Create training and validation splits
"""
np.random.shuffle(samples)
train_keys, validation_keys = (
samples[int(len(samples) * 0.15) :],
samples[: int(len(samples) * 0.15)],
)
"""
## Data generator investigation
"""
train_dataset = KeyPointsDataset(
train_keys, train_aug, workers=2, use_multiprocessing=True
)
validation_dataset = KeyPointsDataset(
validation_keys, test_aug, train=False, workers=2, use_multiprocessing=True
)
print(f"Total batches in training set: {len(train_dataset)}")
print(f"Total batches in validation set: {len(validation_dataset)}")
sample_images, sample_keypoints = next(iter(train_dataset))
assert sample_keypoints.max() == 1.0
assert sample_keypoints.min() == 0.0
sample_keypoints = sample_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE
visualize_keypoints(sample_images[:4], sample_keypoints)
"""
## Model building
The [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/) (on which
the StanfordExtra dataset is based) was built using the [ImageNet-1k dataset](http://image-net.org/).
So, it is likely that the models pretrained on the ImageNet-1k dataset would be useful
for this task. We will use a MobileNetV2 pre-trained on this dataset as a backbone to
extract meaningful features from the images and then pass those to a custom regression
head for predicting coordinates.
"""
def get_model():
# Load the pre-trained weights of MobileNetV2 and freeze the weights
backbone = keras.applications.MobileNetV2(
weights="imagenet",
include_top=False,
input_shape=(IMG_SIZE, IMG_SIZE, 3),
)
backbone.trainable = False
inputs = layers.Input((IMG_SIZE, IMG_SIZE, 3))
x = keras.applications.mobilenet_v2.preprocess_input(inputs)
x = backbone(x)
x = layers.Dropout(0.3)(x)
x = layers.SeparableConv2D(
NUM_KEYPOINTS, kernel_size=5, strides=1, activation="relu"
)(x)
outputs = layers.SeparableConv2D(
NUM_KEYPOINTS, kernel_size=3, strides=1, activation="sigmoid"
)(x)
return keras.Model(inputs, outputs, name="keypoint_detector")
"""
Our custom network is fully-convolutional which makes it more parameter-friendly than the
same version of the network having fully-connected dense layers.
"""
get_model().summary()
"""
Notice the output shape of the network: `(None, 1, 1, 48)`. This is why we have reshaped
the coordinates as: `batch_keypoints[i, :] = np.array(kp_temp).reshape(1, 1, 24 * 2)`.
"""
"""
## Model compilation and training
For this example, we will train the network only for five epochs.
"""
model = get_model()
model.compile(loss="mse", optimizer=keras.optimizers.Adam(1e-4))
model.fit(train_dataset, validation_data=validation_dataset, epochs=EPOCHS)
"""
## Make predictions and visualize them
"""
sample_val_images, sample_val_keypoints = next(iter(validation_dataset))
sample_val_images = sample_val_images[:4]
sample_val_keypoints = sample_val_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE
predictions = model.predict(sample_val_images).reshape(-1, 24, 2) * IMG_SIZE
# Ground-truth
visualize_keypoints(sample_val_images, sample_val_keypoints)
# Predictions
visualize_keypoints(sample_val_images, predictions)
"""
Predictions will likely improve with more training.
"""
"""
## Going further
* Try using other augmentation transforms from `imgaug` to investigate how that changes
the results.
* Here, we transferred the features from the pre-trained network linearly that is we did
not [fine-tune](https://keras.io/guides/transfer_learning/) it. You are encouraged to fine-tune it on this task and see if that
improves the performance. You can also try different architectures and see how they
affect the final performance.
"""
| keras-core/examples/keras_io/vision/keypoint_detection.py/0 | {
"file_path": "keras-core/examples/keras_io/vision/keypoint_detection.py",
"repo_id": "keras-core",
"token_count": 5823
} | 25 |
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
@keras_core_export("keras_core.activations.relu")
def relu(x, negative_slope=0.0, max_value=None, threshold=0.0):
"""Applies the rectified linear unit activation function.
With default values, this returns the standard ReLU activation:
`max(x, 0)`, the element-wise maximum of 0 and the input tensor.
Modifying default parameters allows you to use non-zero thresholds,
change the max value of the activation,
and to use a non-zero multiple of the input for values below the threshold.
Examples:
>>> x = [-10, -5, 0.0, 5, 10]
>>> keras_core.activations.relu(x)
[ 0., 0., 0., 5., 10.]
>>> keras_core.activations.relu(x, negative_slope=0.5)
[-5. , -2.5, 0. , 5. , 10. ]
>>> keras_core.activations.relu(x, max_value=5.)
[0., 0., 0., 5., 5.]
>>> keras_core.activations.relu(x, threshold=5.)
[-0., -0., 0., 0., 10.]
Args:
x: Input tensor.
negative_slope: A `float` that controls the slope
for values lower than the threshold.
max_value: A `float` that sets the saturation threshold (the largest
value the function will return).
threshold: A `float` giving the threshold value of the activation
function below which values will be damped or set to zero.
Returns:
A tensor with the same shape and dtype as input `x`.
"""
if backend.any_symbolic_tensors((x,)):
return ReLU(
negative_slope=negative_slope,
max_value=max_value,
threshold=threshold,
)(x)
return ReLU.static_call(
x,
negative_slope=negative_slope,
max_value=max_value,
threshold=threshold,
)
class ReLU(ops.Operation):
def __init__(
self, negative_slope=0.0, max_value=None, threshold=0.0, name=None
):
super().__init__(name=name)
self.negative_slope = negative_slope
self.max_value = max_value
self.threshold = threshold
def call(self, x):
return self.static_call(
x,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def compute_output_spec(self, x):
return backend.KerasTensor(x.shape, x.dtype)
@staticmethod
def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0):
x = backend.convert_to_tensor(x)
if negative_slope != 0.0:
if max_value is None and threshold == 0:
return backend.nn.leaky_relu(x, negative_slope=negative_slope)
if threshold != 0:
negative_part = backend.nn.relu(-x + threshold)
else:
negative_part = backend.nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
threshold = ops.cast(threshold, dtype=x.dtype)
x = x * backend.cast(
backend.numpy.greater(x, threshold), dtype=x.dtype
)
elif max_value == 6:
# if no threshold, then can use nn.relu6 native op for performance
x = backend.nn.relu6(x)
clip_max = False
else:
x = backend.nn.relu(x)
if clip_max:
min_value = ops.cast(0.0, dtype=x.dtype)
max_value = ops.cast(max_value, dtype=x.dtype)
x = backend.numpy.clip(x, min_value, max_value)
if negative_slope != 0.0:
x -= negative_slope * negative_part
return x
@keras_core_export("keras_core.activations.leaky_relu")
def leaky_relu(x, negative_slope=0.2):
"""Leaky relu activation function.
Args:
x: Input tensor.
negative_slope: A `float` that controls the slope
for values lower than the threshold.
"""
return ops.leaky_relu(x, negative_slope=negative_slope)
@keras_core_export("keras_core.activations.relu6")
def relu6(x):
"""Relu6 activation function.
It's the ReLU function, but truncated to a maximum value of 6.
Args:
x: Input tensor.
"""
return ops.relu6(x)
@keras_core_export("keras_core.activations.softmax")
def softmax(x, axis=-1):
"""Softmax converts a vector of values to a probability distribution.
The elements of the output vector are in range `[0, 1]` and sum to 1.
Each input vector is handled independently.
The `axis` argument sets which axis of the input the function
is applied along.
Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.
The softmax of each vector x is computed as
`exp(x) / sum(exp(x))`.
The input values in are the log-odds of the resulting probability.
Args:
x : Input tensor.
axis: Integer, axis along which the softmax is applied.
"""
output = ops.softmax(x, axis=axis)
# Cache the logits to use for crossentropy loss.
try:
output._keras_logits = x
except AttributeError:
# We're dealing with a C-type.
pass
return output
@keras_core_export("keras_core.activations.elu")
def elu(x, alpha=1.0):
"""Exponential Linear Unit.
The exponential linear unit (ELU) with `alpha > 0` is define as:
- `x` if `x > 0`
- alpha * `exp(x) - 1` if `x < 0`
ELUs have negative values which pushes the mean of the activations
closer to zero.
Mean activations that are closer to zero enable faster learning as they
bring the gradient closer to the natural gradient.
ELUs saturate to a negative value when the argument gets smaller.
Saturation means a small derivative which decreases the variation
and the information that is propagated to the next layer.
Args:
x: Input tensor.
Reference:
- [Clevert et al., 2016](https://arxiv.org/abs/1511.07289)
"""
return ops.elu(x, alpha=alpha)
@keras_core_export("keras_core.activations.selu")
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is defined as:
- `scale * x` if `x > 0`
- `scale * alpha * (exp(x) - 1)` if `x < 0`
where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).
Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `keras_core.activations.elu` function to ensure a slope larger
than one for positive inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `keras_core.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).
Args:
x: Input tensor.
Notes:
- To be used together with the
`keras_core.initializers.LecunNormal` initializer.
- To be used together with the dropout variant
`keras_core.layers.AlphaDropout` (rather than regular dropout).
Reference:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
return ops.selu(x)
@keras_core_export("keras_core.activations.softplus")
def softplus(x):
"""Softplus activation function.
It is defined as: `softplus(x) = log(exp(x) + 1)`.
Args:
x: Input tensor.
"""
return ops.softplus(x)
@keras_core_export("keras_core.activations.softsign")
def softsign(x):
"""Softsign activation function.
Softsign is defined as: `softsign(x) = x / (abs(x) + 1)`.
Args:
x: Input tensor.
"""
return ops.softsign(x)
@keras_core_export(
["keras_core.activations.silu", "keras_core.activations.swish"]
)
def silu(x):
"""Swish (or Silu) activation function.
It is defined as: `swish(x) = x * sigmoid(x)`.
The Swish (or Silu) activation function is a smooth,
non-monotonic function that is unbounded above and
bounded below.
Args:
x: Input tensor.
Reference:
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
"""
return ops.silu(x)
@keras_core_export("keras_core.activations.gelu")
def gelu(x, approximate=False):
"""Gaussian error linear unit (GELU) activation function.
The Gaussian error linear unit (GELU) is defined as:
`gelu(x) = x * P(X <= x)` where `P(X) ~ N(0, 1)`,
i.e. `gelu(x) = 0.5 * x * (1 + erf(x / sqrt(2)))`.
GELU weights inputs by their value, rather than gating
inputs by their sign as in ReLU.
Args:
x: Input tensor.
approximate: A `bool`, whether to enable approximation.
Reference:
- [Hendrycks et al., 2016](https://arxiv.org/abs/1606.08415)
"""
return ops.gelu(x, approximate=approximate)
@keras_core_export("keras_core.activations.tanh")
def tanh(x):
"""Hyperbolic tangent activation function.
It is defined as:
`tanh(x) = sinh(x) / cosh(x)`, i.e.
`tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.
Args:
x: Input tensor.
"""
return ops.tanh(x)
@keras_core_export("keras_core.activations.sigmoid")
def sigmoid(x):
"""Sigmoid activation function.
It is defined as: `sigmoid(x) = 1 / (1 + exp(-x))`.
For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.
Sigmoid is equivalent to a 2-element softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.
Args:
x: Input tensor.
"""
output = ops.sigmoid(x)
# Cache the logits to use for crossentropy loss.
try:
output._keras_logits = x
except AttributeError:
# We're dealing with a C-type.
pass
return output
@keras_core_export("keras_core.activations.exponential")
def exponential(x):
"""Exponential activation function.
Args:
x: Input tensor.
"""
return ops.exp(x)
@keras_core_export("keras_core.activations.hard_sigmoid")
def hard_sigmoid(x):
"""Hard sigmoid activation function.
The hard sigmoid activation is defined as:
- `0` if `if x < -2.5`
- `1` if `x > 2.5`
- `0.2 * x + 0.5` if `-2.5 <= x <= 2.5`
It's a faster, piecewise linear approximation
of the sigmoid activation.
Args:
x: Input tensor.
Reference:
- [Wikipedia "Hard sigmoid"](https://en.wikipedia.org/wiki/Hard_sigmoid)
"""
return ops.hard_sigmoid(x)
@keras_core_export("keras_core.activations.linear")
def linear(x):
"""Linear activation function (pass-through).
A "linear" activation is an identity function:
it returns the input, unmodified.
Args:
x: Input tensor.
"""
return x
class Mish(ops.Operation):
def call(self, x):
return self.static_call(x)
def compute_output_spec(self, x):
return backend.KerasTensor(x.shape, x.dtype)
@staticmethod
def static_call(x):
return x * backend.nn.tanh(backend.nn.softplus(x))
@keras_core_export("keras_core.activations.mish")
def mish(x):
"""Mish activation function.
It is defined as:
`mish(x) = x * tanh(softplus(x))`
where `softplus` is defined as:
`softplus(x) = log(exp(x) + 1)`
Args:
x: Input tensor.
Reference:
- [Misra, 2019](https://arxiv.org/abs/1908.08681)
"""
x = backend.convert_to_tensor(x)
return Mish.static_call(x)
@keras_core_export("keras_core.activations.log_softmax")
def log_softmax(x, axis=-1):
"""Log-Softmax activation function.
Each input vector is handled independently.
The `axis` argument sets which axis of the input the function
is applied along.
Args:
x: Input tensor.
axis: Integer, axis along which the softmax is applied.
"""
return ops.log_softmax(x, axis=axis)
| keras-core/keras_core/activations/activations.py/0 | {
"file_path": "keras-core/keras_core/activations/activations.py",
"repo_id": "keras-core",
"token_count": 4990
} | 26 |
import warnings
from keras_core import backend
from keras_core import layers
from keras_core.api_export import keras_core_export
from keras_core.applications import imagenet_utils
from keras_core.models import Functional
from keras_core.ops import operation_utils
from keras_core.utils import file_utils
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/nasnet/"
)
NASNET_MOBILE_WEIGHT_PATH = BASE_WEIGHTS_PATH + "NASNet-mobile.h5"
NASNET_MOBILE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + "NASNet-mobile-no-top.h5"
NASNET_LARGE_WEIGHT_PATH = BASE_WEIGHTS_PATH + "NASNet-large.h5"
NASNET_LARGE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + "NASNet-large-no-top.h5"
def NASNet(
input_shape=None,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
default_size=None,
classifier_activation="softmax",
):
"""Instantiates a NASNet model.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For NasNet, call `keras_core.applications.nasnet.preprocess_input`
on your inputs before passing them to the model.
`nasnet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, the input shape
is by default `(331, 331, 3)` for NASNetLarge and
`(224, 224, 3)` for NASNetMobile.
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
penultimate_filters: Number of filters in the penultimate layer.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
num_blocks: Number of repeated blocks of the NASNet model.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
stem_block_filters: Number of filters in the initial stem block
skip_reduction: Whether to skip the reduction step at the tail
end of the network.
filter_multiplier: Controls the width of the network.
- If `filter_multiplier` < 1.0, proportionally decreases the number
of filters in each layer.
- If `filter_multiplier` > 1.0, proportionally increases the number
of filters in each layer.
- If `filter_multiplier` = 1, default number of filters from the
paper are used at each layer.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
default_size: Specifies the default image size of the model
classifier_activation: A `str` or callable.
The activation function to use on the "top" layer.
Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits
of the "top" layer. When loading pretrained weights,
`classifier_activation` can only be `None` or `"softmax"`.
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top` '
"as true, `classes` should be 1000"
)
if (
isinstance(input_shape, tuple)
and None in input_shape
and weights == "imagenet"
):
raise ValueError(
"When specifying the input shape of a NASNet"
" and loading `ImageNet` weights, "
"the input_shape argument must be static "
"(no None entries). Got: `input_shape=" + str(input_shape) + "`."
)
if default_size is None:
default_size = 331
# Determine proper input shape and default size.
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if backend.image_data_format() != "channels_last":
warnings.warn(
"The NASNet family of models is only available "
'for the input data format "channels_last" '
"(width, height, channels). "
"However your settings specify the default "
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
"in your Keras config located at ~/.keras/keras.json. "
"The model being returned right now will expect inputs "
'to follow the "channels_last" data format.',
stacklevel=2,
)
backend.set_image_data_format("channels_last")
old_data_format = "channels_first"
else:
old_data_format = None
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if penultimate_filters % (24 * (filter_multiplier**2)) != 0:
raise ValueError(
"For NASNet-A models, the `penultimate_filters` must be a multiple "
"of 24 * (`filter_multiplier` ** 2). Current value: %d"
% penultimate_filters
)
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
filters = penultimate_filters // 24
x = layers.Conv2D(
stem_block_filters,
(3, 3),
strides=(2, 2),
padding="valid",
use_bias=False,
name="stem_conv1",
kernel_initializer="he_normal",
)(img_input)
x = layers.BatchNormalization(
axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="stem_bn1"
)(x)
p = None
x, p = _reduction_a_cell(
x, p, filters // (filter_multiplier**2), block_id="stem_1"
)
x, p = _reduction_a_cell(
x, p, filters // filter_multiplier, block_id="stem_2"
)
for i in range(num_blocks):
x, p = _normal_a_cell(x, p, filters, block_id="%d" % (i))
x, p0 = _reduction_a_cell(
x, p, filters * filter_multiplier, block_id="reduce_%d" % (num_blocks)
)
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x,
p,
filters * filter_multiplier,
block_id="%d" % (num_blocks + i + 1),
)
x, p0 = _reduction_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id="reduce_%d" % (2 * num_blocks),
)
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id="%d" % (2 * num_blocks + i + 1),
)
x = layers.Activation("relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = Functional(inputs, x, name="NASNet")
# Load weights.
if weights == "imagenet":
if default_size == 224: # mobile version
if include_top:
weights_path = file_utils.get_file(
"nasnet_mobile.h5",
NASNET_MOBILE_WEIGHT_PATH,
cache_subdir="models",
file_hash="020fb642bf7360b370c678b08e0adf61",
)
else:
weights_path = file_utils.get_file(
"nasnet_mobile_no_top.h5",
NASNET_MOBILE_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="1ed92395b5b598bdda52abe5c0dbfd63",
)
model.load_weights(weights_path)
elif default_size == 331: # large version
if include_top:
weights_path = file_utils.get_file(
"nasnet_large.h5",
NASNET_LARGE_WEIGHT_PATH,
cache_subdir="models",
file_hash="11577c9a518f0070763c2b964a382f17",
)
else:
weights_path = file_utils.get_file(
"nasnet_large_no_top.h5",
NASNET_LARGE_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="d81d89dc07e6e56530c4e77faddd61b5",
)
model.load_weights(weights_path)
else:
raise ValueError(
"ImageNet weights can only be loaded with NASNetLarge"
" or NASNetMobile"
)
elif weights is not None:
model.load_weights(weights)
if old_data_format:
backend.set_image_data_format(old_data_format)
return model
@keras_core_export(
[
"keras_core.applications.nasnet.NASNetMobile",
"keras_core.applications.NASNetMobile",
]
)
def NASNetMobile(
input_shape=None,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates a Mobile NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `keras_core.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights). For loading `imagenet` weights,
`input_shape` should be (224, 224, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation` can
only be `None` or `"softmax"`.
Returns:
A Keras model instance.
"""
if backend.backend() == "torch":
raise ValueError(
"NASNetMobile is not available with the torch backend "
"at this time due to an outstanding bug. "
"If interested, please open a PR."
)
return NASNet(
input_shape,
penultimate_filters=1056,
num_blocks=4,
stem_block_filters=32,
skip_reduction=False,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224,
classifier_activation=classifier_activation,
)
@keras_core_export(
[
"keras_core.applications.nasnet.NASNetLarge",
"keras_core.applications.NASNetLarge",
]
)
def NASNetLarge(
input_shape=None,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates a NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `keras_core.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights). For loading `imagenet` weights,
`input_shape` should be (331, 331, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
Returns:
A Keras model instance.
"""
return NASNet(
input_shape,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=331,
classifier_activation=classifier_activation,
)
def _separable_conv_block(
ip, filters, kernel_size=(3, 3), strides=(1, 1), block_id=None
):
"""Adds 2 blocks of [relu-separable conv-batchnorm].
Args:
ip: Input tensor
filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions
strides: Strided convolution for downsampling
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
with backend.name_scope(f"separable_conv_block_{block_id}"):
x = layers.Activation("relu")(ip)
if strides == (2, 2):
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=f"separable_conv_1_pad_{block_id}",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.SeparableConv2D(
filters,
kernel_size,
strides=strides,
name=f"separable_conv_1_{block_id}",
padding=conv_pad,
use_bias=False,
)(x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"separable_conv_1_bn_{block_id}",
)(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(
filters,
kernel_size,
name=f"separable_conv_2_{block_id}",
padding="same",
use_bias=False,
)(x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"separable_conv_2_bn_{block_id}",
)(x)
return x
def _adjust_block(p, ip, filters, block_id=None):
"""Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Args:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
img_dim = 2 if backend.image_data_format() == "channels_first" else -2
with backend.name_scope("adjust_block"):
if p is None:
p = ip
elif p.shape[img_dim] != ip.shape[img_dim]:
with backend.name_scope(f"adjust_reduction_block_{block_id}"):
p = layers.Activation("relu", name=f"adjust_relu_1_{block_id}")(
p
)
p1 = layers.AveragePooling2D(
(1, 1),
strides=(2, 2),
padding="valid",
name=f"adjust_avg_pool_1_{block_id}",
)(p)
p1 = layers.Conv2D(
filters // 2,
(1, 1),
padding="same",
use_bias=False,
name=f"adjust_conv_1_{block_id}",
kernel_initializer="he_normal",
)(p1)
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D(
(1, 1),
strides=(2, 2),
padding="valid",
name=f"adjust_avg_pool_2_{block_id}",
)(p2)
p2 = layers.Conv2D(
filters // 2,
(1, 1),
padding="same",
use_bias=False,
name=f"adjust_conv_2_{block_id}",
kernel_initializer="he_normal",
)(p2)
p = layers.concatenate([p1, p2], axis=channel_dim)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"adjust_bn_{block_id}",
)(p)
elif p.shape[channel_dim] != filters:
with backend.name_scope(f"adjust_projection_block_{block_id}"):
p = layers.Activation("relu")(p)
p = layers.Conv2D(
filters,
(1, 1),
strides=(1, 1),
padding="same",
name=f"adjust_conv_projection_{block_id}",
use_bias=False,
kernel_initializer="he_normal",
)(p)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"adjust_bn_{block_id}",
)(p)
return p
def _normal_a_cell(ip, p, filters, block_id=None):
"""Adds a Normal cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
with backend.name_scope(f"normal_A_block_{block_id}"):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation("relu")(ip)
h = layers.Conv2D(
filters,
(1, 1),
strides=(1, 1),
padding="same",
name=f"normal_conv_1_{block_id}",
use_bias=False,
kernel_initializer="he_normal",
)(h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"normal_bn_1_{block_id}",
)(h)
with backend.name_scope("block_1"):
x1_1 = _separable_conv_block(
h,
filters,
kernel_size=(5, 5),
block_id=f"normal_left1_{block_id}",
)
x1_2 = _separable_conv_block(
p, filters, block_id=f"normal_right1_{block_id}"
)
x1 = layers.add([x1_1, x1_2], name=f"normal_add_1_{block_id}")
with backend.name_scope("block_2"):
x2_1 = _separable_conv_block(
p, filters, (5, 5), block_id=f"normal_left2_{block_id}"
)
x2_2 = _separable_conv_block(
p, filters, (3, 3), block_id=f"normal_right2_{block_id}"
)
x2 = layers.add([x2_1, x2_2], name=f"normal_add_2_{block_id}")
with backend.name_scope("block_3"):
x3 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"normal_left3_{block_id}",
)(h)
x3 = layers.add([x3, p], name=f"normal_add_3_{block_id}")
with backend.name_scope("block_4"):
x4_1 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"normal_left4_{block_id}",
)(p)
x4_2 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"normal_right4_{block_id}",
)(p)
x4 = layers.add([x4_1, x4_2], name=f"normal_add_4_{block_id}")
with backend.name_scope("block_5"):
x5 = _separable_conv_block(
h, filters, block_id=f"normal_left5_{block_id}"
)
x5 = layers.add([x5, h], name=f"normal_add_5_{block_id}")
x = layers.concatenate(
[p, x1, x2, x3, x4, x5],
axis=channel_dim,
name=f"normal_concat_{block_id}",
)
return x, ip
def _reduction_a_cell(ip, p, filters, block_id=None):
"""Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == "channels_first" else -1
with backend.name_scope(f"reduction_A_block_{block_id}"):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation("relu")(ip)
h = layers.Conv2D(
filters,
(1, 1),
strides=(1, 1),
padding="same",
name=f"reduction_conv_1_{block_id}",
use_bias=False,
kernel_initializer="he_normal",
)(h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name=f"reduction_bn_1_{block_id}",
)(h)
h3 = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(h, 3),
name=f"reduction_pad_1_{block_id}",
)(h)
with backend.name_scope("block_1"):
x1_1 = _separable_conv_block(
h,
filters,
(5, 5),
strides=(2, 2),
block_id=f"reduction_left1_{block_id}",
)
x1_2 = _separable_conv_block(
p,
filters,
(7, 7),
strides=(2, 2),
block_id=f"reduction_right1_{block_id}",
)
x1 = layers.add([x1_1, x1_2], name=f"reduction_add_1_{block_id}")
with backend.name_scope("block_2"):
x2_1 = layers.MaxPooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
name=f"reduction_left2_{block_id}",
)(h3)
x2_2 = _separable_conv_block(
p,
filters,
(7, 7),
strides=(2, 2),
block_id=f"reduction_right2_{block_id}",
)
x2 = layers.add([x2_1, x2_2], name=f"reduction_add_2_{block_id}")
with backend.name_scope("block_3"):
x3_1 = layers.AveragePooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
name=f"reduction_left3_{block_id}",
)(h3)
x3_2 = _separable_conv_block(
p,
filters,
(5, 5),
strides=(2, 2),
block_id=f"reduction_right3_{block_id}",
)
x3 = layers.add([x3_1, x3_2], name=f"reduction_add3_{block_id}")
with backend.name_scope("block_4"):
x4 = layers.AveragePooling2D(
(3, 3),
strides=(1, 1),
padding="same",
name=f"reduction_left4_{block_id}",
)(x1)
x4 = layers.add([x2, x4])
with backend.name_scope("block_5"):
x5_1 = _separable_conv_block(
x1, filters, (3, 3), block_id=f"reduction_left4_{block_id}"
)
x5_2 = layers.MaxPooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
name=f"reduction_right5_{block_id}",
)(h3)
x5 = layers.add([x5_1, x5_2], name=f"reduction_add4_{block_id}")
x = layers.concatenate(
[x2, x3, x4, x5],
axis=channel_dim,
name=f"reduction_concat_{block_id}",
)
return x, ip
@keras_core_export("keras_core.applications.nasnet.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_core_export("keras_core.applications.nasnet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| keras-core/keras_core/applications/nasnet.py/0 | {
"file_path": "keras-core/keras_core/applications/nasnet.py",
"repo_id": "keras-core",
"token_count": 15233
} | 27 |
from keras_core import testing
from keras_core.backend.common.name_scope import current_path
from keras_core.backend.common.name_scope import name_scope
class NameScopeTest(testing.TestCase):
def test_stacking(self):
self.assertEqual(current_path(), "")
with name_scope("outer") as outer:
self.assertEqual(outer.name, "outer")
self.assertEqual(current_path(), "outer")
with name_scope("middle") as middle:
self.assertEqual(middle.name, "middle")
self.assertEqual(current_path(), "outer/middle")
with name_scope("inner") as inner:
self.assertEqual(inner.name, "inner")
self.assertEqual(current_path(), "outer/middle/inner")
self.assertEqual(current_path(), "outer/middle")
self.assertEqual(current_path(), "outer")
self.assertEqual(current_path(), "")
def test_deduplication(self):
self.assertEqual(current_path(), "")
with name_scope("name", caller=1):
with name_scope("name", caller=1):
self.assertEqual(current_path(), "name")
self.assertEqual(current_path(), "")
with name_scope("name"):
with name_scope("name"):
self.assertEqual(current_path(), "name/name")
def test_errors(self):
with self.assertRaisesRegex(ValueError, "must be a string"):
name_scope("foo/bar")
with self.assertRaisesRegex(ValueError, "must be a string"):
name_scope(4)
| keras-core/keras_core/backend/common/name_scope_test.py/0 | {
"file_path": "keras-core/keras_core/backend/common/name_scope_test.py",
"repo_id": "keras-core",
"token_count": 707
} | 28 |
import contextlib
import tree
from jax import lax
from jax import numpy as jnp
from keras_core.backend.common import stateless_scope
from keras_core.utils.nest import pack_sequence_as
def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return jnp.transpose(input_t, axes)
if not time_major:
inputs = tree.map_structure(swap_batch_timestep, inputs)
flattened_inputs = tree.flatten(inputs)
time_steps = flattened_inputs[0].shape[0]
if mask is not None:
if mask.dtype != "bool":
mask = mask.astype("bool")
if len(mask.shape) == 2:
mask = jnp.expand_dims(mask, axis=-1)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
def _expand_mask(mask_t, input_t, fixed_dim=1):
if tree.is_nested(mask_t):
raise ValueError(
f"mask_t is expected to be tensor, but got {mask_t}"
)
if tree.is_nested(input_t):
raise ValueError(
f"input_t is expected to be tensor, but got {input_t}"
)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = jnp.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:])
return jnp.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError("Unrolling requires a fixed number of timesteps.")
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of
# nested input, the input is flattened and then transformed
# individually. The result of this will be a tuple of lists, each of
# the item in tuple is list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if tree.is_nested(inputs):
processed_input = tree.map_structure(
_process_single_input_t, inputs
)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(
inp, tuple(states) + tuple(constants)
)
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = jnp.zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = jnp.where(tiled_mask_t, output, prev_output)
flat_states = tree.flatten(states)
flat_new_states = tree.flatten(new_states)
tiled_mask_t = tuple(
_expand_mask(mask_t, s) for s in flat_states
)
flat_final_states = tuple(
jnp.where(m, s, ps)
for m, s, ps in zip(
tiled_mask_t, flat_new_states, flat_states
)
)
states = pack_sequence_as(states, flat_final_states)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = jnp.stack(successive_outputs)
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(
inp, tuple(states) + tuple(constants)
)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = jnp.stack(successive_outputs)
else: # Unroll == False
if mask is not None:
def _step(states, current_input):
current_input, current_mask = current_input
is_masked = jnp.all(
jnp.logical_not(current_mask), axis=-1, keepdims=True
)
output_t, new_states = step_function(current_input, states)
if zero_output_for_mask:
masked_outs = jnp.where(
is_masked, jnp.zeros_like(output_t), output_t
)
else:
# Assume the first state is the previous output.
output_tm1 = states[0]
masked_outs = jnp.where(is_masked, output_tm1, output_t)
new_states = [
jnp.where(is_masked, s, ns)
for s, ns in zip(states, new_states)
]
return (new_states, masked_outs)
scan_xs = (inputs, mask)
else:
def _step(states, current_input):
output_t, new_states = step_function(current_input, states)
return new_states, output_t
scan_xs = inputs
if stateless_scope.in_stateless_scope():
# Reuse the existing parent stateless scope.
scope = contextlib.nullcontext()
else:
scope = stateless_scope.StatelessScope()
with scope:
# We must use a stateless scope because `scan` will involve
# JAX tracing -- any variable update at this stage would
# be a leak.
new_states, outputs = lax.scan(
f=_step,
init=initial_states,
xs=scan_xs,
reverse=go_backwards,
)
if go_backwards:
outputs = jnp.flip(outputs, axis=0)
last_output = outputs[-1]
if not time_major:
outputs = tree.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
def cudnn_ok(*args, **kwargs):
return False
def lstm(*args, **kwargs):
raise NotImplementedError
def gru(*args, **kwargs):
raise NotImplementedError
def unstack(x, axis=0):
return [
lax.index_in_dim(x, i, axis, keepdims=False)
for i in range(x.shape[axis])
]
| keras-core/keras_core/backend/jax/rnn.py/0 | {
"file_path": "keras-core/keras_core/backend/jax/rnn.py",
"repo_id": "keras-core",
"token_count": 3907
} | 29 |
import tensorflow as tf
from keras_core.utils import tf_utils
class TFLayer(tf.__internal__.tracking.AutoTrackable):
def __init__(self, *args, **kwargs):
# Export-related attributes
self._saved_model_inputs_spec = None
self._saved_model_arg_spec = None
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _set_save_spec(self, inputs, args=None, kwargs=None):
"""Defines the save spec so that serialization can trace layer calls.
The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are
saved into a tuple of `([inputs] + args, kwargs)`.
Args:
inputs: possibly nested inputs passed into the call function.
args: a list of positional arguments passed into call.
kwargs: a dictionary of keyword arguments passed into call.
"""
if self._saved_model_inputs_spec is not None:
return # Already set.
inputs_spec = tf.nest.map_structure(tf_utils.get_tensor_spec, inputs)
args_spec = tf.nest.map_structure(tf_utils.get_tensor_spec, args or [])
kwargs_spec = {}
# Filter out non-tensor arguments from kwargs.
for key, kwarg in kwargs.items():
flat_kwarg = tf.nest.flatten(kwarg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]
if any(s is None for s in flat_specs):
continue
kwargs_spec[key] = tf.nest.pack_sequence_as(kwarg, flat_specs)
self._saved_model_inputs_spec = inputs_spec
self._saved_model_arg_spec = (
[inputs_spec] + list(args_spec),
kwargs_spec,
)
def _trackable_children(self, save_type="checkpoint", **kwargs):
if save_type == "savedmodel":
# SavedModel needs to ignore the execution functions.
train_function = getattr(self, "train_function", None)
test_function = getattr(self, "test_function", None)
predict_function = getattr(self, "predict_function", None)
self.train_function = None
self.test_function = None
self.predict_function = None
children = super()._trackable_children(save_type, **kwargs)
if save_type == "savedmodel":
self.train_function = train_function
self.test_function = test_function
self.predict_function = predict_function
return children
@property
def _default_save_signature(self):
"""For SavedModel support: returns the default serving signature."""
from keras_core.models.functional import Functional
from keras_core.models.model import Model
from keras_core.models.sequential import Sequential
if not isinstance(self, Model):
return None
inputs = None
if (
isinstance(self, Sequential)
and getattr(self, "_functional", None) is not None
):
inputs = self._functional.input
elif isinstance(self, Functional):
inputs = self.input
if inputs is not None:
input_signature = [
tf.nest.map_structure(
lambda x: tf.TensorSpec(x.shape, self.compute_dtype),
inputs,
)
]
else:
shapes_dict = self._build_shapes_dict
if len(shapes_dict) == 1:
input_shape = tuple(shapes_dict.values())[0]
input_signature = [
tf.TensorSpec(input_shape, self.compute_dtype)
]
else:
input_signature = [
tf.nest.map_structure(
lambda x: tf.TensorSpec(x.shape, self.compute_dtype),
shapes_dict,
)
]
@tf.function(input_signature=input_signature)
def serving_default(inputs):
return self(inputs)
return serving_default
| keras-core/keras_core/backend/tensorflow/layer.py/0 | {
"file_path": "keras-core/keras_core/backend/tensorflow/layer.py",
"repo_id": "keras-core",
"token_count": 1889
} | 30 |
import torch
from keras_core.backend.common.stateless_scope import in_stateless_scope
from keras_core.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
self.torch_params = torch.nn.ParameterList(
[variable.value for variable in self.variables]
)
def parameters(self, recurse=True):
if not hasattr(self, "torch_params"):
self._track_variables()
return torch.nn.Module.parameters(self, recurse=recurse)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras_core.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "torch_params"
):
from keras_core.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
| keras-core/keras_core/backend/torch/layer.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/layer.py",
"repo_id": "keras-core",
"token_count": 542
} | 31 |
import torch
import torch.nn.functional as tnn
from keras_core.backend.config import floatx
from keras_core.backend.torch.core import convert_to_tensor
from keras_core.backend.torch.core import get_device
from keras_core.backend.torch.core import to_torch_dtype
from keras_core.random.seed_generator import SeedGenerator
from keras_core.random.seed_generator import draw_seed
from keras_core.random.seed_generator import make_default_seed
def torch_seed_generator(seed):
first_seed, second_seed = draw_seed(seed)
device = get_device()
if device == "meta":
# Generator is not supported by the meta device.
return None
generator = torch.Generator(device=get_device())
generator.manual_seed(int(first_seed + second_seed))
return generator
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.normal(
mean, stddev, size=shape, dtype=dtype, device=get_device()
)
generator = torch_seed_generator(seed)
return torch.normal(
mean,
stddev,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def categorical(logits, num_samples, dtype="int32", seed=None):
logits = convert_to_tensor(logits)
dtype = to_torch_dtype(dtype)
probs = torch.softmax(logits, dim=-1)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.multinomial(
probs,
num_samples,
replacement=True,
).type(dtype)
generator = torch_seed_generator(seed)
return torch.multinomial(
probs,
num_samples,
replacement=True,
generator=generator,
).type(dtype)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
requested_shape = shape
if len(requested_shape) == 0:
shape = (1,)
# Do not use generator during symbolic execution.
if get_device() == "meta":
rand_tensor = torch.rand(size=shape, dtype=dtype, device=get_device())
else:
generator = torch_seed_generator(seed)
rand_tensor = torch.rand(
size=shape, generator=generator, dtype=dtype, device=get_device()
)
output = (maxval - minval) * rand_tensor + minval
if len(requested_shape) == 0:
return output[0]
return output
def randint(shape, minval, maxval, dtype="int32", seed=None):
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.randint(
low=minval,
high=maxval,
size=shape,
dtype=dtype,
device=get_device(),
)
generator = torch_seed_generator(seed)
return torch.randint(
low=minval,
high=maxval,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
# Take a larger standard normal dist, discard values outside 2 * stddev
# Offset by mean and stddev
x = normal(tuple(shape) + (4,), mean=0, stddev=1, dtype=dtype, seed=seed)
valid = (x > -2) & (x < 2)
indexes = valid.max(-1, keepdim=True)[1]
trunc_x = torch.empty(shape, device=get_device())
trunc_x.data.copy_(x.gather(-1, indexes).squeeze(-1))
trunc_x.data.mul_(stddev).add_(mean)
return trunc_x
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return inputs.shape
concrete_inputs_shape = inputs.shape
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
if (
seed is not None
and not (isinstance(seed, SeedGenerator) and seed._initial_seed is None)
or noise_shape is not None
):
keep_prob = 1.0 - rate
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
keep_prob_matrix = torch.full(
noise_shape, keep_prob, device=get_device()
)
generator = torch_seed_generator(seed)
# Do not use generator during symbolic execution.
if get_device() == "meta":
mask = torch.bernoulli(keep_prob_matrix)
else:
mask = torch.bernoulli(keep_prob_matrix, generator=generator)
mask = mask.bool()
mask = torch.broadcast_to(mask, inputs.shape)
return torch.where(
mask,
inputs / keep_prob,
torch.zeros_like(inputs, dtype=inputs.dtype),
)
# Fast path, unseeded (since torch doesn't support seeding dropout!!!!)
# Using the above implementation is possible, but much slower.
return torch.nn.functional.dropout(
inputs, p=rate, training=True, inplace=False
)
def shuffle(x, axis=0, seed=None):
# Ref: https://github.com/pytorch/pytorch/issues/71409
x = convert_to_tensor(x)
# Get permutation indices
# Do not use generator during symbolic execution.
if get_device() == "meta":
row_perm = torch.rand(x.shape[: axis + 1], device=get_device()).argsort(
axis
)
else:
generator = torch_seed_generator(seed)
row_perm = torch.rand(
x.shape[: axis + 1], generator=generator, device=get_device()
).argsort(axis)
for _ in range(x.ndim - axis - 1):
row_perm.unsqueeze_(-1)
# Reformat this for the gather operation
row_perm = row_perm.repeat(
*[1 for _ in range(axis + 1)], *(x.shape[axis + 1 :])
)
return x.gather(axis, row_perm)
| keras-core/keras_core/backend/torch/random.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/random.py",
"repo_id": "keras-core",
"token_count": 2596
} | 32 |
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.callbacks.callback import Callback
from keras_core.utils import io_utils
@keras_core_export("keras_core.callbacks.LearningRateScheduler")
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
At the beginning of every epoch, this callback gets the updated learning
rate value from `schedule` function provided at `__init__`, with the current
epoch and current learning rate, and applies the updated learning rate on
the optimizer.
Args:
schedule: A function that takes an epoch index (integer, indexed from 0)
and current learning rate (float) as inputs and returns a new
learning rate as output (float).
verbose: Integer. 0: quiet, 1: log update messages.
Example:
>>> # This function keeps the initial learning rate for the first ten epochs
>>> # and decreases it exponentially after that.
>>> def scheduler(epoch, lr):
... if epoch < 10:
... return lr
... else:
... return lr * ops.exp(-0.1)
>>>
>>> model = keras_core.models.Sequential([keras_core.layers.Dense(10)])
>>> model.compile(keras_core.optimizers.SGD(), loss='mse')
>>> round(model.optimizer.learning_rate, 5)
0.01
>>> callback = keras_core.callbacks.LearningRateScheduler(scheduler)
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=15, callbacks=[callback], verbose=0)
>>> round(model.optimizer.learning_rate, 5)
0.00607
"""
def __init__(self, schedule, verbose=0):
super().__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, "learning_rate"):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
try: # new API
learning_rate = float(
backend.convert_to_numpy(self.model.optimizer.learning_rate)
)
learning_rate = self.schedule(epoch, learning_rate)
except TypeError: # Support for old API for backward compatibility
learning_rate = self.schedule(epoch)
if not isinstance(learning_rate, (float, np.float32, np.float64)):
raise ValueError(
"The output of the `schedule` function should be a float. "
f"Got: {learning_rate}"
)
self.model.optimizer.learning_rate = learning_rate
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch + 1}: LearningRateScheduler setting learning "
f"rate to {learning_rate}."
)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs["learning_rate"] = float(
backend.convert_to_numpy(self.model.optimizer.learning_rate)
)
| keras-core/keras_core/callbacks/learning_rate_scheduler.py/0 | {
"file_path": "keras-core/keras_core/callbacks/learning_rate_scheduler.py",
"repo_id": "keras-core",
"token_count": 1232
} | 33 |
"""Small NumPy datasets for debugging/testing."""
from keras_core.datasets import boston_housing
from keras_core.datasets import california_housing
from keras_core.datasets import cifar10
from keras_core.datasets import cifar100
from keras_core.datasets import fashion_mnist
from keras_core.datasets import imdb
from keras_core.datasets import mnist
from keras_core.datasets import reuters
| keras-core/keras_core/datasets/__init__.py/0 | {
"file_path": "keras-core/keras_core/datasets/__init__.py",
"repo_id": "keras-core",
"token_count": 129
} | 34 |
import inspect
from keras_core.api_export import keras_core_export
from keras_core.initializers.constant_initializers import Constant
from keras_core.initializers.constant_initializers import Identity
from keras_core.initializers.constant_initializers import Ones
from keras_core.initializers.constant_initializers import Zeros
from keras_core.initializers.initializer import Initializer
from keras_core.initializers.random_initializers import GlorotNormal
from keras_core.initializers.random_initializers import GlorotUniform
from keras_core.initializers.random_initializers import HeNormal
from keras_core.initializers.random_initializers import HeUniform
from keras_core.initializers.random_initializers import LecunNormal
from keras_core.initializers.random_initializers import LecunUniform
from keras_core.initializers.random_initializers import OrthogonalInitializer
from keras_core.initializers.random_initializers import RandomNormal
from keras_core.initializers.random_initializers import RandomUniform
from keras_core.initializers.random_initializers import TruncatedNormal
from keras_core.initializers.random_initializers import VarianceScaling
from keras_core.saving import serialization_lib
from keras_core.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Ones,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"one": Ones,
"zero": Zeros,
}
)
@keras_core_export("keras_core.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_core_export("keras_core.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_core_export("keras_core.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras_core.initializers.deserialize(identifier)
<...keras_core.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras_core.initializers.deserialize(cfg)
<...keras_core.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
| keras-core/keras_core/initializers/__init__.py/0 | {
"file_path": "keras-core/keras_core/initializers/__init__.py",
"repo_id": "keras-core",
"token_count": 1434
} | 35 |
from keras_core import activations
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.ReLU")
class ReLU(Layer):
"""Rectified Linear Unit activation function layer.
Formula:
``` python
f(x) = max(x,0)
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Example:
``` python
relu_layer = keras_core.layers.activations.ReLU(
max_value=10,
negative_slope=0.5,
threshold=0,
)
input = np.array([-10, -5, 0.0, 5, 10])
result = relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
max_value: Float >= 0. Maximum activation value. None means unlimited.
Defaults to `None`.
negative_slope: Float >= 0. Negative slope coefficient.
Defaults to `0.0`.
threshold: Float >= 0. Threshold value for thresholded activation.
Defaults to `0.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self, max_value=None, negative_slope=0.0, threshold=0.0, **kwargs
):
super().__init__(**kwargs)
if max_value is not None and max_value < 0.0:
raise ValueError(
"max_value of a ReLU layer cannot be a negative "
f"value. Received: max_value={max_value}"
)
if negative_slope is None or negative_slope < 0.0:
raise ValueError(
"negative_slope of a ReLU layer cannot be a negative "
f"value. Received: negative_slope={negative_slope}"
)
if threshold is None or threshold < 0.0:
raise ValueError(
"threshold of a ReLU layer cannot be a negative "
f"value. Received: threshold={threshold}"
)
self.supports_masking = True
self.max_value = max_value
self.negative_slope = negative_slope
self.threshold = threshold
def call(self, inputs):
return activations.relu(
inputs,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_value": self.max_value,
"negative_slope": self.negative_slope,
"threshold": self.threshold,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
| keras-core/keras_core/layers/activations/relu.py/0 | {
"file_path": "keras-core/keras_core/layers/activations/relu.py",
"repo_id": "keras-core",
"token_count": 1262
} | 36 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.convolutional.base_conv import BaseConv
@keras_core_export(
["keras_core.layers.Conv1D", "keras_core.layers.Convolution1D"]
)
class Conv1D(BaseConv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved with the layer
input over a single spatial (or temporal) dimension to produce a tensor of
outputs. If `use_bias` is True, a bias vector is created and added to the
outputs. Finally, if `activation` is not `None`, it is applied to the
outputs as well.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, `"valid"`, `"same"` or `"causal"`(case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input. `"causal"` results in causal
(dilated) convolutions, e.g. `output[t]` does not depend on
`input[t+1:]`. Useful when modeling temporal data where the model
should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section2.1](
https://arxiv.org/abs/1609.03499).
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution.
groups: A positive int specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters // groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing `activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Examples:
>>> # The inputs are 128-length vectors with 10 timesteps, and the
>>> # batch size is 4.
>>> x = np.random.rand(4, 10, 128)
>>> y = keras_core.layers.Conv1D(32, 3, activation='relu')(x)
>>> print(y.shape)
(4, 8, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs
)
def _compute_causal_padding(self):
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == "channels_last":
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def call(self, inputs):
padding = self.padding
if self.padding == "causal":
# Apply causal padding to inputs.
inputs = ops.pad(inputs, self._compute_causal_padding())
padding = "valid"
outputs = ops.conv(
inputs,
self.kernel,
strides=list(self.strides),
padding=padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs += bias
if self.activation is not None:
return self.activation(outputs)
return outputs
| keras-core/keras_core/layers/convolutional/conv1d.py/0 | {
"file_path": "keras-core/keras_core/layers/convolutional/conv1d.py",
"repo_id": "keras-core",
"token_count": 3080
} | 37 |
import numpy as np
import pytest
from keras_core import backend
from keras_core import layers
from keras_core import testing
from keras_core.backend.common import keras_tensor
class DenseTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dense_basics(self):
# 2D case, no bias.
self.run_layer_test(
layers.Dense,
init_kwargs={
"units": 4,
"activation": "relu",
"kernel_initializer": "random_uniform",
"bias_initializer": "ones",
"use_bias": False,
},
input_shape=(2, 3),
expected_output_shape=(2, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# 3D case, some regularizers.
self.run_layer_test(
layers.Dense,
init_kwargs={
"units": 5,
"activation": "sigmoid",
"kernel_regularizer": "l2",
"bias_regularizer": "l2",
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2, # we have 2 regularizers.
supports_masking=True,
)
def test_dense_correctness(self):
layer = layers.Dense(units=2, activation="relu")
layer.build((1, 2))
layer.set_weights(
[
np.array([[1.0, -2.0], [3.0, -4.0]]),
np.array([5.0, -6.0]),
]
)
inputs = np.array(
[[-1.0, 2.0]],
)
self.assertAllClose(layer(inputs), [[10.0, 0.0]])
def test_dense_errors(self):
with self.assertRaisesRegex(ValueError, "incompatible with the layer"):
layer = layers.Dense(units=2, activation="relu")
layer(keras_tensor.KerasTensor((1, 2)))
layer(keras_tensor.KerasTensor((1, 3)))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_dense_sparse(self):
import tensorflow as tf
self.run_layer_test(
layers.Dense,
init_kwargs={
"units": 4,
},
input_shape=(2, 3),
input_sparse=True,
expected_output_shape=(2, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
)
inputs = 4 * backend.random.uniform((10, 10))
inputs = tf.sparse.from_dense(tf.nn.dropout(inputs, 0.8))
layer = layers.Dense(units=5)
outputs = layer(inputs)
# Verify the computation is the same as if it had been a dense tensor
expected_outputs = tf.add(
tf.matmul(tf.sparse.to_dense(inputs), layer.kernel),
layer.bias,
)
self.assertAllClose(outputs, expected_outputs)
# Verify the gradient is sparse
with tf.GradientTape() as g:
outputs = layer(inputs)
self.assertIsInstance(
g.gradient(outputs, layer.kernel), tf.IndexedSlices
)
| keras-core/keras_core/layers/core/dense_test.py/0 | {
"file_path": "keras-core/keras_core/layers/core/dense_test.py",
"repo_id": "keras-core",
"token_count": 1845
} | 38 |
"""Layer is an Operation with state.
Takes care of:
- Weights / variables (and tracking thereof)
- deferred build
- trainable argument value inference
- masking
- autocasting
And some more magic:
- add_loss
- metric tracking
- RNG seed tracking
- activity regularization
"""
import collections
import inspect
import warnings
from functools import wraps
import tree
from keras_core import backend
from keras_core import initializers
from keras_core import mixed_precision
from keras_core import regularizers
from keras_core import utils
from keras_core.api_export import keras_core_export
from keras_core.backend import KerasTensor
from keras_core.backend.common import global_state
from keras_core.layers import input_spec
from keras_core.metrics.metric import Metric
from keras_core.ops.operation import Operation
from keras_core.utils import python_utils
from keras_core.utils import summary_utils
from keras_core.utils import traceback_utils
from keras_core.utils import tracking
from keras_core.utils.shape_utils import map_shape_structure
if backend.backend() == "tensorflow":
from keras_core.backend.tensorflow.layer import TFLayer as BackendLayer
elif backend.backend() == "jax":
from keras_core.backend.jax.layer import JaxLayer as BackendLayer
elif backend.backend() == "torch":
from keras_core.backend.torch.layer import TorchLayer as BackendLayer
elif backend.backend() == "numpy":
from keras_core.backend.numpy.layer import NumpyLayer as BackendLayer
else:
raise RuntimeError(
f"Backend '{backend.backend()}' must implement a layer mixin class."
)
@keras_core_export(["keras_core.Layer", "keras_core.layers.Layer"])
class Layer(BackendLayer, Operation):
"""This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and
that outputs one or more tensors. It involves *computation*, defined
in the `call()` method, and a *state* (weight variables). State can be
created:
* in `__init__()`, for instance via `self.add_weight()`;
* in the optional `build()` method, which is invoked by the first
`__call__()` to the layer, and supplies the shape(s) of the input(s),
which may not have been known at initialization time.
Layers are recursively composable: If you assign a Layer instance as an
attribute of another Layer, the outer layer will start tracking the weights
created by the inner layer. Nested layers should be instantiated in the
`__init__()` method or `build()` method.
Users will just instantiate a layer and then treat it as a callable.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights. Can also be a
`keras_core.mixed_precision.DTypePolicy`,
which allows the computation and
weight dtype to differ. Defaults to `None`. `None` means to use
`keras_core.mixed_precision.dtype_policy()`,
which is a `float32` policy unless set to different value
(via `keras_core.mixed_precision.set_dtype_policy()`).
Attributes:
name: The name of the layer (string).
dtype: Dtype of the layer's weights. Alias of `layer.variable_dtype`.
variable_dtype: Dtype of the layer's weights.
compute_dtype: The dtype of the layer's computations.
Layers automatically cast inputs to this dtype, which causes
the computations and output to also be in this dtype.
When mixed precision is used with a
`keras_core.mixed_precision.DTypePolicy`, this will be different
than `variable_dtype`.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean), i.e.
whether its potentially-trainable weights should be returned
as part of `layer.trainable_weights`.
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Defines custom layer attributes, and creates layer weights
that do not depend on input shapes, using `add_weight()`,
or other state.
* `build(self, input_shape)`: This method can be used to create weights that
depend on the shape(s) of the input(s), using `add_weight()`, or other
state. `__call__()` will automatically build the layer
(if it has not been built yet) by calling `build()`.
* `call(self, *args, **kwargs)`: Called in `__call__` after making
sure `build()` has been called. `call()` performs the logic of applying
the layer to the input arguments.
Two reserved keyword arguments you can optionally use in `call()` are:
1. `training` (boolean, whether the call is in inference mode or
training mode).
2. `mask` (boolean tensor encoding masked timesteps in the input,
used e.g. in RNN layers).
A typical signature for this method is `call(self, inputs)`, and user
could optionally add `training` and `mask` if the layer need them.
* `get_config(self)`: Returns a dictionary containing the configuration
used to initialize this layer. If the keys differ from the arguments
in `__init__()`, then override `from_config(self)` as well.
This method is used when saving
the layer or a model that contains this layer.
Examples:
Here's a basic example: a layer with two variables, `w` and `b`,
that returns `y = w . x + b`.
It shows how to implement `build()` and `call()`.
Variables set as attributes of a layer are tracked as weights
of the layers (in `layer.weights`).
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
# Create the state of the layer (weights)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="glorot_uniform",
trainable=True,
name="kernel",
)
self.bias = self.add_weight(
shape=(self.units,),
initializer="zeros",
trainable=True,
name="bias",
)
# Defines the computation
def call(self, inputs):
return ops.matmul(inputs, self.kernel) + self.bias
# Instantiates the layer.
linear_layer = SimpleDense(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(ops.ones((2, 2)))
assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
```
Besides trainable weights, updated via backpropagation during training,
layers can also have non-trainable weights. These weights are meant to
be updated manually during `call()`. Here's a example layer that computes
the running sum of its inputs:
```python
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = self.add_weight(
shape=(),
initializer="zeros",
trainable=False,
name="total",
)
def call(self, inputs):
self.total.assign(self.total + ops.sum(inputs))
return self.total
my_sum = ComputeSum(2)
x = ops.ones((2, 2))
y = my_sum(x)
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
```
"""
def __new__(cls, *args, **kwargs):
# Wrap the user-provided build method in the build_decorator
# to add name scope support and serialization support.
obj = super().__new__(cls, *args, **kwargs)
original_build_method = obj.build
@wraps(original_build_method)
def build_wrapper(*args, **kwargs):
with backend.name_scope(obj.name, caller=obj):
original_build_method(*args, **kwargs)
# Record build config.
signature = inspect.signature(original_build_method)
obj._build_shapes_dict = signature.bind(*args, **kwargs).arguments
# Set built, post build actions, and lock state.
obj.built = True
obj._post_build()
obj._lock_state()
obj.build = build_wrapper
return obj
def __init__(
self,
*,
activity_regularizer=None,
trainable=True,
dtype=None,
autocast=True,
name=None,
**kwargs,
):
BackendLayer.__init__(self)
self._lock = False
Operation.__init__(self, name=name)
self.activity_regularizer = regularizers.get(activity_regularizer)
input_dim_arg = kwargs.pop("input_dim", None)
if input_dim_arg is not None:
input_shape_arg = (input_dim_arg,)
else:
input_shape_arg = kwargs.pop("input_shape", None)
if input_shape_arg is not None:
warnings.warn(
"Do not pass an `input_shape`/`input_dim` argument to "
"a layer. When using Sequential models, "
"prefer using an `Input(shape)` object as the "
"first layer in the model instead.",
stacklevel=2,
)
self._input_shape_arg = input_shape_arg
if kwargs:
raise ValueError(
"Unrecognized keyword arguments "
f"passed to {self.__class__.__name__}: {kwargs}"
)
self.built = False
self.dtype_policy = mixed_precision.resolve_policy(dtype)
self.autocast = autocast
self._input_spec = None
self._called = False
self.supports_jit = True
self._trainable = trainable
self._losses = []
self._loss_ids = set()
self._call_signature = inspect.signature(self.call)
call_signature_parameters = [
p.name for p in self._call_signature.parameters.values()
]
self._call_has_training_arg = "training" in call_signature_parameters
self._call_has_mask_arg = "mask" in call_signature_parameters
self._supports_masking = not utils.is_default(self.compute_mask)
# Whether to automatically convert (+ auto-cast) inputs to `call()`.
self._convert_input_args = True
# Whether to allow non-tensors as positional arguments in `call()`.
self._allow_non_tensor_positional_args = False
# Dict of shapes that were used to call `build()`.
self._build_shapes_dict = None
self._initializer_tracker()
@tracking.no_automatic_dependency_tracking
def _initializer_tracker(self):
if hasattr(self, "_tracker"):
return
trainable_variables = []
non_trainable_variables = []
layers = []
metrics = []
seed_generators = []
self._tracker = tracking.Tracker(
{
"trainable_variables": (
lambda x: isinstance(x, backend.Variable) and x.trainable,
trainable_variables,
),
"non_trainable_variables": (
lambda x: isinstance(x, backend.Variable)
and not x.trainable,
non_trainable_variables,
),
"metrics": (lambda x: isinstance(x, Metric), metrics),
"layers": (
lambda x: isinstance(x, Layer)
and not isinstance(x, Metric),
layers,
),
"seed_generators": (
lambda x: isinstance(x, backend.random.SeedGenerator),
seed_generators,
),
}
)
if backend.backend() == "tensorflow":
# Remove attribute tracking for lists (TF-specific attribute)
_self_setattr_tracking = getattr(
self, "_self_setattr_tracking", True
)
self._self_setattr_tracking = False
self._trainable_variables = trainable_variables
self._non_trainable_variables = non_trainable_variables
self._layers = layers
self._metrics = metrics
self._seed_generators = seed_generators
if backend.backend() == "tensorflow":
# Reset attribute tracking (TF-specific)
self._self_setattr_tracking = _self_setattr_tracking
@property
def input_spec(self):
return self._input_spec
@input_spec.setter
def input_spec(self, value):
self._input_spec = value
@utils.default
def build(self, input_shape):
self._check_super_called()
if utils.is_default(self.build) and might_have_unbuilt_state(self):
warnings.warn(
f"`build()` was called on layer '{self.name}', however "
"the layer does not have a `build()` method implemented "
"and it looks like it has unbuilt state. This will cause "
"the layer to be marked as built, despite not being "
"actually built, which may cause failures down the line. "
"Make sure to implement a proper `build()` method."
)
self.built = True
def _lock_state(self):
"""Prevent further state updates, called automatically in `build()`."""
if not self._tracker.locked:
self._tracker.lock(
msg=(
"You cannot add new elements of state "
"(variables or sub-layers) "
"to a layer that is already built. All state "
"must be created in the `__init__()` method or "
"in the `build()` method."
)
)
def get_build_config(self):
"""Returns a dictionary with the layer's input shape.
This method returns a config dict that can be used by
`build_from_config(config)` to create all states (e.g. Variables and
Lookup tables) needed by the layer.
By default, the config only contains the input shape that the layer
was built with. If you're writing a custom layer that creates state in
an unusual way, you should override this method to make sure this state
is already created when Keras attempts to load its value upon model
loading.
Returns:
A dict containing the input shape associated with the layer.
"""
if self._build_shapes_dict is not None:
if len(self._build_shapes_dict) == 1:
return {
"input_shape": tuple(self._build_shapes_dict.values())[0],
}
else:
return {"shapes_dict": self._build_shapes_dict}
def build_from_config(self, config):
"""Builds the layer's states with the supplied config dict.
By default, this method calls the `build(config["input_shape"])` method,
which creates weights based on the layer's input shape in the supplied
config. If your config contains other information needed to load the
layer's state, you should override this method.
Args:
config: Dict containing the input shape associated with this layer.
"""
if config:
if "input_shape" in config:
self.build(config["input_shape"])
elif "shapes_dict" in config:
self.build(**config["shapes_dict"])
self.built = True
def add_variable(
self,
shape,
initializer,
dtype=None,
trainable=True,
regularizer=None,
constraint=None,
name=None,
):
"""Add a weight variable to the layer.
Alias of `add_weight()`.
"""
return self.add_weight(
shape=shape,
initializer=initializer,
dtype=dtype,
trainable=trainable,
regularizer=regularizer,
constraint=constraint,
name=name,
)
def add_weight(
self,
shape,
initializer,
dtype=None,
trainable=True,
regularizer=None,
constraint=None,
name=None,
):
"""Add a weight variable to the layer.
Args:
shape: Shape tuple for the variable.
Must be fully-defined (no `None` entries).
initializer: Initializer object to use to
populate the initial variable value,
or string name of a built-in initializer
(e.g. `"random_normal"`).
dtype: Dtype of the variable to create,
e.g. `"float32"`.
trainable: Boolean, whether the variable should
be trainable via backprop or whether its
updates are managed manually.
constraint: Contrainst object to call on the
variable after any optimizer update,
or string name of a built-in constraint.
name: String name of the variable. Useful
for debugging purposes.
"""
self._check_super_called()
initializer = initializers.get(initializer)
with backend.name_scope(self.name, caller=self):
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype or self.variable_dtype,
trainable=trainable,
name=name,
)
# Will be added to layer.losses
variable.regularizer = regularizer
variable.constraint = constraint
self._track_variable(variable)
return variable
@property
def trainable(self):
"""Settable boolean, whether this layer should be trainable or not."""
return self._trainable
@trainable.setter
def trainable(self, value):
"""Sets trainable attribute for the layer and its sublayers.
When this value is changed during training (e.g. with a
`Callback`) you need to call the parent
`Model.make_train_function` with `force=True` in order to
recompile the training graph.
Args:
value: Boolean with the desired state for the layer's trainable
attribute.
"""
value = bool(value)
self._trainable = value
for v in self._trainable_variables:
v.trainable = value
for layer in self._layers:
layer.trainable = value
@property
def variables(self):
"""List of all layer state, including random seeds.
This extends `layer.weights` to include all state used by the layer
including `SeedGenerator`s.
Note that metrics variables are not included here, use
`metrics_variables` to visit all the metric variables.
"""
# Return all `Variables` associate with the layer including metrics
# and random seeds. Also deduplicate them.
variables = []
seen_ids = set()
for v in self._trainable_variables + self._non_trainable_variables:
if id(v) not in seen_ids:
variables.append(v)
seen_ids.add(id(v))
for sg in self._seed_generators:
variables.append(sg.state)
for layer in self._layers:
for v in layer.variables:
if id(v) not in seen_ids:
variables.append(v)
seen_ids.add(id(v))
return variables
@property
def trainable_variables(self):
"""List of all trainable layer state.
This is equivalent to `layer.trainable_weights`.
"""
if not self.trainable:
return []
return [v for v in self.variables if v.trainable]
@property
def non_trainable_variables(self):
"""List of all non-trainable layer state.
This extends `layer.non_trainable_weights` to include all state used by
the layer including state for metrics and `SeedGenerator`s.
"""
if not self.trainable:
return self.variables
return [v for v in self.variables if not v.trainable]
@property
def weights(self):
"""List of all weight variables of the layer.
Unlike, `layer.variables` this excludes metric state and random seeds.
"""
# Return only `Variables` directly owned by layers and sub-layers.
# Also deduplicate them.
weights = []
seen_ids = set()
for w in self._trainable_variables + self._non_trainable_variables:
if id(w) not in seen_ids:
weights.append(w)
seen_ids.add(id(w))
for layer in self._layers:
for w in layer.weights:
if id(w) not in seen_ids:
weights.append(w)
seen_ids.add(id(w))
return weights
@property
def trainable_weights(self):
"""List of all trainable weight variables of the layer.
These are the weights that get updated by the optimizer during training.
"""
if not self.trainable:
return []
return [v for v in self.weights if v.trainable]
@property
def non_trainable_weights(self):
"""List of all non-trainable weight variables of the layer.
These are the weights that should not be updated by the optimizer during
training. Unlike, `layer.non_trainable_variables` this excludes metric
state and random seeds.
"""
if not self.trainable:
return self.weights
return [v for v in self.weights if not v.trainable]
@property
def metrics_variables(self):
"""List of all metric variables."""
vars = []
for metric in self._metrics:
vars.extend(metric.variables)
for layer in self._layers:
for metric in layer._metrics:
vars.extend(metric.variables)
return vars
def get_weights(self):
"""Return the values of `layer.weights` as a list of NumPy arrays."""
return [v.numpy() for v in self.weights]
def set_weights(self, weights):
"""Sets the values of `layer.weights` from a list of NumPy arrays."""
layer_weights = self.weights
if len(layer_weights) != len(weights):
raise ValueError(
f"You called `set_weights(weights)` on layer '{self.name}' "
f"with a weight list of length {len(weights)}, but the layer "
f"was expecting {len(layer_weights)} weights."
)
for variable, value in zip(layer_weights, weights):
if variable.shape != value.shape:
raise ValueError(
f"Layer {self.name} weight shape {variable.shape} "
"is not compatible with provided weight "
f"shape {value.shape}."
)
variable.assign(value)
@property
def dtype(self):
"""Alias of `layer.variable_dtype`."""
return self.variable_dtype
@property
def compute_dtype(self):
"""The dtype of the computations performed by the layer."""
return self.dtype_policy.compute_dtype
@property
def variable_dtype(self):
"""The dtype of the state (weights) of the layer."""
return self.dtype_policy.variable_dtype
@property
def input_dtype(self):
"""The dtype layer inputs should be converted to."""
return self.dtype_policy.compute_dtype
@property
def supports_masking(self):
"""Whether this layer supports computing a mask using `compute_mask`."""
return self._supports_masking
@supports_masking.setter
def supports_masking(self, value):
self._supports_masking = value
@utils.default
def compute_mask(self, inputs, previous_mask):
return previous_mask
@traceback_utils.filter_traceback
def __call__(self, *args, **kwargs):
self._check_super_called()
self._called = True
#####################################
# 1. Convert any array arguments to tensors of correct dtype.
def maybe_convert(x):
if backend.is_tensor(x):
if (
self.autocast
and backend.is_float_dtype(x.dtype)
and x.dtype != self.input_dtype
):
x = backend.cast(x, dtype=self.input_dtype)
return x
elif isinstance(x, backend.KerasTensor):
if (
self.autocast
and backend.is_float_dtype(x.dtype)
and x.dtype != self.input_dtype
):
x.dtype = self.input_dtype
return x
elif hasattr(x, "__array__"):
return backend.convert_to_tensor(x, dtype=self.input_dtype)
return x
# Used to avoid expensive `tree` operations in the most common case.
if (
kwargs
or len(args) != 1
or not backend.is_tensor(args[0])
or backend.standardize_dtype(args[0].dtype) != self.input_dtype
) and self._convert_input_args:
args = tree.map_structure(maybe_convert, args)
kwargs = tree.map_structure(maybe_convert, kwargs)
##########################################################
# 2. Enforce that only tensors can be passed positionally.
if not self._allow_non_tensor_positional_args:
for arg in tree.flatten(args):
if not isinstance(arg, KerasTensor) and not backend.is_tensor(
arg
):
raise ValueError(
"Only input tensors may be passed as "
"positional arguments. The following argument value "
f"should be passed as a keyword argument: {arg} "
f"(of type {type(arg)})"
)
# Caches info about `call()` signature, args, kwargs.
call_spec = CallSpec(self._call_signature, args, kwargs)
############################################
# 3. Check input spec for 1st positional arg.
# TODO: consider extending this to all args and kwargs.
self._assert_input_compatibility(call_spec.first_arg)
################
# 4. Call build
with backend.name_scope(self.name, caller=self):
self._maybe_build(call_spec)
##########################
# 5. Infer training value
# Training phase for `Layer.call` is set via (in order of priority):
# (1) The `training` argument passed to this `Layer.call`, if not None
# (2) The training argument of an outer `Layer.call`.
# (4) Any non-None default value for `training` in the call signature
# (5) False (treating the layer as if it's in inference)
# Maintains info about the `Layer.call` stack
# across nested calls.
call_context = self._get_call_context()
# This is the value explicity passed by the user
training = call_spec.user_arguments_dict.get("training", None)
if training is None:
# Wasn't passed explicitly: use context value
training = call_context.training
if training is None:
# Get signature default value
training = call_spec.arguments_dict.get("training", None)
call_context.training = training
if self._call_has_training_arg and training is not None:
# Only populate arg if it has a concrete value
kwargs["training"] = training
##############################
# 6. Populate mask argument(s)
if len(call_spec.tensor_arguments_dict) == 1:
if (
"mask" in call_spec.argument_names
and call_spec.arguments_dict["mask"] is None
):
arg_name = list(call_spec.tensor_arguments_dict.keys())[0]
only_tensor_arg = call_spec.tensor_arguments_dict[arg_name]
mask = tree.map_structure(
lambda x: getattr(x, "_keras_mask", None),
only_tensor_arg,
)
kwargs["mask"] = mask
elif len(call_spec.tensor_arguments_dict) > 1:
for k, v in call_spec.tensor_arguments_dict.items():
expected_mask_arg_name = f"{k}_mask"
if expected_mask_arg_name in call_spec.argument_names:
if call_spec.arguments_dict[expected_mask_arg_name] is None:
mask = tree.map_structure(
lambda x: getattr(x, "_keras_mask", None), v
)
kwargs[expected_mask_arg_name] = mask
####################
# 7. Call the layer.
try:
with backend.name_scope(self.name, caller=self):
current_scope = backend.get_autocast_scope()
new_scope = None
if current_scope is not None:
# Clear or update the current scope if necessary.
if not self.autocast:
new_scope = backend.AutocastScope(None)
elif not backend.is_float_dtype(self.compute_dtype):
# Some preprocessing layers might have a non-float
# dtype, we should not autocast in this case.
new_scope = backend.AutocastScope(None)
elif current_scope.dtype != self.compute_dtype:
new_scope = backend.AutocastScope(self.compute_dtype)
elif self.compute_dtype != self.variable_dtype:
# Enter a new scope if our dtypes are "mixed".
new_scope = backend.AutocastScope(self.compute_dtype)
if new_scope is not None:
with new_scope:
outputs = super().__call__(*args, **kwargs)
else:
outputs = super().__call__(*args, **kwargs)
if not self.built:
self.built = True
# Record activity regularizer loss.
if self.activity_regularizer is not None:
for output in tree.flatten(outputs):
if backend.is_tensor(output):
self.add_loss(self.activity_regularizer(output))
# Set masks on outputs,
# provided only the first positional input arg and its mask.
# TODO: consider extending this to all args and kwargs.
previous_mask = getattr(call_spec.first_arg, "_keras_mask", None)
if self.supports_masking:
self._set_mask_metadata(
call_spec.first_arg, outputs, previous_mask
)
elif previous_mask is not None:
warnings.warn(
f"Layer '{self.name}' (of type {self.__class__.__name__}) "
"was passed an input with a mask attached to it. "
"However, this layer does not support masking and will "
"therefore destroy the mask information. Downstream "
"layers will not see the mask."
)
finally:
# Destroy call context if we created it
self._maybe_reset_call_context()
return outputs
def call(self, *args, **kwargs):
raise NotImplementedError
@traceback_utils.filter_traceback
def stateless_call(
self,
trainable_variables,
non_trainable_variables,
*args,
return_losses=False,
**kwargs,
):
"""Call the layer without any side effects.
Args:
trainable_variables: List of trainable variables of the model.
non_trainable_variables: List of non-trainable variables of the
model.
*args: Positional argumets to be passed to `call()`.
return_losses: If `True`, `stateless_call()` will return the list of
losses created during `call()` as part of its return values.
**kwargs: Keyword arguments to be passed to `call()`.
Returns:
A tuple. By default, returns `(outputs, non_trainable_variables)`.
If `return_losses = True`, then returns
`(outputs, non_trainable_variables, losses)`.
Note: `non_trainable_variables` include not only non-trainable weights
such as `BatchNormalization` statistics, but also RNG seed state
(if there are any random operations part of the layer, such as dropout),
and `Metric` state (if there are any metrics attached to the layer).
These are all elements of state of the layer.
Example:
```python
model = ...
data = ...
trainable_variables = model.trainable_variables
non_trainable_variables = model.non_trainable_variables
# Call the model with zero side effects
outputs, non_trainable_variables = model.stateless_call(
trainable_variables,
non_trainable_variables,
data,
)
# Attach the updated state to the model
# (until you do this, the model is still in its pre-call state).
for ref_var, value in zip(
model.non_trainable_variables, non_trainable_variables
):
ref_var.assign(value)
```
"""
self._check_super_called()
if not self.built:
raise ValueError(
f"To call stateless_call, {self.__class__.__name__} must be "
"built (i.e. its variables must have been already created). "
"You can build it by calling it on some data."
)
if len(trainable_variables) != len(self.trainable_variables):
raise ValueError(
"Argument `trainable_variables` must be a list of tensors "
"corresponding 1:1 to "
f"{self.__class__.__name__}().trainable_variables. "
f"Received list with length {len(trainable_variables)}, "
f"but expected {len(self.trainable_variables)} variables."
)
if len(non_trainable_variables) != len(self.non_trainable_variables):
raise ValueError(
"Argument `non_trainable_variables` must be a list of tensors "
"corresponding 1:1 to "
f"{self.__class__.__name__}().non_trainable_variables. "
f"Received list with length {len(non_trainable_variables)}, "
f"but expected {len(self.non_trainable_variables)} variables."
)
# Gather variable mapping
trainable_mapping = zip(self.trainable_variables, trainable_variables)
non_trainable_mapping = zip(
self.non_trainable_variables, non_trainable_variables
)
mapping = list(trainable_mapping) + list(non_trainable_mapping)
# Call in stateless scope
with backend.StatelessScope(
state_mapping=mapping, collect_losses=return_losses
) as scope:
outputs = self.call(*args, **kwargs)
# Gather updated non-trainable variables
non_trainable_variables = []
for v in self.non_trainable_variables:
new_v = scope.get_current_value(v)
if new_v is not None:
non_trainable_variables.append(new_v)
else:
non_trainable_variables.append(v)
if return_losses:
return outputs, non_trainable_variables, scope.losses[:]
return outputs, non_trainable_variables
def compute_output_spec(self, *args, **kwargs):
if utils.is_default(self.compute_output_shape):
return super().compute_output_spec(*args, **kwargs)
else:
# Use compute_output_shape() to return the right output spec
call_spec = CallSpec(self._call_signature, args, kwargs)
shapes_dict = get_shapes_dict(call_spec)
shapes_dict = update_shapes_dict_for_target_fn(
self.compute_output_shape,
shapes_dict=shapes_dict,
call_spec=call_spec,
class_name=self.__class__.__name__,
)
output_shape = self.compute_output_shape(**shapes_dict)
if (
isinstance(output_shape, list)
and output_shape
and isinstance(output_shape[0], (int, type(None)))
):
output_shape = tuple(output_shape)
if not isinstance(output_shape, (list, tuple, dict)):
try:
output_shape = tuple(output_shape)
except:
raise ValueError(
"Method `compute_output_shape()` of layer "
f"{self.__class__.__name__} is returning "
"a type that cannot be interpreted as a shape. "
"It should return a shape tuple. "
f"Received: {output_shape}"
)
if (
isinstance(output_shape, tuple)
and output_shape
and isinstance(output_shape[0], (int, type(None)))
):
return KerasTensor(output_shape, dtype=self.compute_dtype)
# Case: nested. Could be a tuple/list of shapes, or a dict of
# shapes. Could be deeply nested.
return map_shape_structure(
lambda s: KerasTensor(s, dtype=self.compute_dtype), output_shape
)
@utils.default
def compute_output_shape(self, *args, **kwargs):
return NotImplementedError
def add_loss(self, loss):
"""Can be called inside of the `call()` method to add a scalar loss.
Example:
```python
class MyLayer(Layer):
...
def call(self, x):
self.add_loss(ops.sum(x))
return x
```
"""
# Eager only.
losses = tree.flatten(loss)
for x in losses:
if not backend.is_tensor(x):
raise ValueError(
"`add_loss()` can only be called from inside `build()` or "
f"`call()`, on a tensor input. Received invalid value: {x}"
)
if backend.in_stateless_scope():
scope = backend.get_stateless_scope()
if scope.collect_losses:
for x in losses:
scope.add_loss(loss)
self._loss_ids.add(id(loss))
else:
self._losses.extend(losses)
def _get_own_losses(self):
if backend.in_stateless_scope():
losses = []
scope = backend.get_stateless_scope()
for loss in scope.losses:
if id(loss) in self._loss_ids:
losses.append(loss)
return losses
else:
return self._losses[:]
@property
def losses(self):
"""List of scalar losses added via `add_loss()` during layer call."""
losses = self._get_own_losses()
for layer in self._layers:
losses.extend(layer._get_own_losses())
weight_regularization_losses = []
for v in self.trainable_weights:
if backend.in_stateless_scope():
v = backend.get_stateless_scope().get_current_value(v)
regularizer = getattr(v, "regularizer", None)
if regularizer:
weight_regularization_losses.append(regularizer(v))
losses.extend(weight_regularization_losses)
return losses
def _clear_losses(self):
if backend.in_stateless_scope():
scope = backend.get_stateless_scope()
if scope.collect_losses:
for x in scope.losses:
if id(x) in self._loss_ids:
scope.losses.remove(x)
self._losses.clear()
self._loss_ids.clear()
for layer in self._layers:
layer._clear_losses()
def save_own_variables(self, store):
"""Saves the state of the layer.
You can override this method to take full control of how the state of
the layer is saved upon calling `model.save()`.
Args:
store: Dict where the state of the model will be saved.
"""
all_vars = self._trainable_variables + self._non_trainable_variables
for i, v in enumerate(all_vars):
store[f"{i}"] = v.numpy()
def load_own_variables(self, store):
"""Loads the state of the layer.
You can override this method to take full control of how the state of
the layer is loaded upon calling `keras.models.load_model()`.
Args:
store: Dict from which the state of the model will be loaded.
"""
all_vars = self._trainable_variables + self._non_trainable_variables
if len(store.keys()) != len(all_vars):
if len(all_vars) == 0 and not self.built:
raise ValueError(
f"Layer '{self.name}' was never built "
"and thus it doesn't have any variables. "
f"However the weights file lists {len(store.keys())} "
"variables for this layer.\n"
"In most cases, this error indicates that either:\n\n"
"1. The layer is owned by a parent layer that "
"implements a `build()` method, but calling the "
"parent's `build()` method did NOT create the state of "
f"the child layer '{self.name}'. A `build()` method "
"must create ALL state for the layer, including "
"the state of any children layers.\n\n"
"2. You need to implement "
"the `def build_from_config(self, config)` method "
f"on layer '{self.name}', to specify how to rebuild "
"it during loading. "
"In this case, you might also want to implement the "
"method that generates the build config at saving time, "
"`def get_build_config(self)`. "
"The method `build_from_config()` is meant "
"to create the state "
"of the layer (i.e. its variables) upon deserialization.",
)
raise ValueError(
f"Layer '{self.name}' expected {len(all_vars)} variables, "
"but received "
f"{len(store.keys())} variables during loading. "
f"Expected: {[v.name for v in all_vars]}"
)
for i, v in enumerate(all_vars):
v.assign(store[f"{i}"])
def _track_variable(self, variable):
if variable.trainable:
self._tracker.add_to_store("trainable_variables", variable)
else:
self._tracker.add_to_store("non_trainable_variables", variable)
def add_metric(self):
# Permanently disabled
raise NotImplementedError
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
"""
if not self.built:
raise ValueError(
"You tried to call `count_params` "
f"on layer '{self.name}', "
"but the layer isn't built. "
"You can build it manually via: "
f"`layer.build(input_shape)`."
)
return summary_utils.count_params(self.weights)
def _maybe_build(self, call_spec):
if self.built:
return
shapes_dict = get_shapes_dict(call_spec)
first_shape = next(iter(shapes_dict.values()), None)
# If the layer has a build method, call it with our input shapes.
if not utils.is_default(self.build):
shapes_dict = update_shapes_dict_for_target_fn(
self.build,
shapes_dict=shapes_dict,
call_spec=call_spec,
class_name=self.__class__.__name__,
)
self.build(**shapes_dict)
# Check input spec again (after build, since self.input_spec
# may have been updated
self._assert_input_compatibility(call_spec.first_arg)
return
# Otherwise, attempt to build the layer by calling it on symbolic input.
if might_have_unbuilt_state(self):
if len(shapes_dict) == 1:
success = self._build_by_run_for_single_pos_arg(first_shape)
else:
success = self._build_by_run_for_kwargs(shapes_dict)
if not success:
if call_spec.eager:
# Will let the actual eager call do state-building
return
raise ValueError(
f"Layer '{self.name}' looks like it has unbuilt state, but "
"Keras is not able to trace the layer `call()` in order to "
"build it automatically. Possible causes:\n"
"1. The `call()` method of your layer may be crashing. Try "
"to `__call__()` the layer eagerly on some test input "
"first to see if it works. "
"E.g. `x = np.random.random((3, 4)); y = layer(x)`\n"
"2. If the `call()` method is correct, then you may need "
"to implement the `def build(self, input_shape)` method on "
"your layer. It should create all variables used by the "
"layer (e.g. by calling `layer.build()` on all its "
"children layers)."
)
self.build(first_shape)
def _build_by_run(self, *args, **kwargs):
call_spec = CallSpec(self._call_signature, args, kwargs)
shapes_dict = get_shapes_dict(call_spec)
if len(shapes_dict) == 1:
success = self._build_by_run_for_single_pos_arg(
tuple(shapes_dict.values())[0]
)
else:
success = self._build_by_run_for_kwargs(shapes_dict)
return success
def _build_by_run_for_single_pos_arg(self, input_shape):
# Case: all inputs are in the first arg (possibly nested).
input_tensors = map_shape_structure(
lambda s: backend.KerasTensor(s), input_shape
)
try:
backend.compute_output_spec(self.call, input_tensors)
return True
except:
return False
def _build_by_run_for_kwargs(self, shapes_dict):
# Case: inputs were recorded as multiple keyword arguments.
if all(is_shape_tuple(s) for s in shapes_dict.values()):
# Case: all input keyword arguments were plain tensors.
input_tensors = {
# We strip the `_shape` suffix to recover kwarg names.
utils.removesuffix(k, "_shape"): backend.KerasTensor(shape)
for k, shape in shapes_dict.items()
}
try:
backend.compute_output_spec(self.call, **input_tensors)
return True
except:
return False
else:
# Not supported: nested input keyword arguments.
return False
def __repr__(self):
return (
f"<{self.__class__.__name__} "
f"name={self.name}, built={self.built}>"
)
def __str__(self):
return (
f"<{self.__class__.__name__} "
f"name={self.name}, built={self.built}>"
)
def __setattr__(self, name, value):
# Track Variables, Layers, Metrics, SeedGenerators.
name, value = self._setattr_hook(name, value)
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
elif name != "_tracker":
self._initializer_tracker()
return super().__setattr__(name, value)
def _check_super_called(self):
if getattr(self, "_lock", True):
raise RuntimeError(
f"In layer '{self.__class__.__name__}', you forgot to call "
"`super().__init__()` as the first statement "
"in the `__init__()` method. Go add it!"
)
def _assert_input_compatibility(self, arg_0):
if self.input_spec:
input_spec.assert_input_compatibility(
self.input_spec, arg_0, layer_name=self.name
)
def _get_call_context(self):
"""Returns currently active `CallContext`."""
layer_call_ctx = global_state.get_global_attribute("current_call_ctx")
if layer_call_ctx is None:
# Enter new call context.
layer_call_ctx = CallContext(entry_layer=self)
global_state.set_global_attribute(
"current_call_ctx", layer_call_ctx
)
self._clear_losses()
return layer_call_ctx
def _maybe_reset_call_context(self):
layer_call_ctx = global_state.get_global_attribute("current_call_ctx")
if layer_call_ctx is None or layer_call_ctx.entry_layer == self:
global_state.set_global_attribute("current_call_ctx", None)
def _flatten_layers(self, include_self=True, recursive=True):
layers = []
if include_self:
layers.append(self)
seen_object_ids = set()
deque = collections.deque(self._layers)
while deque:
layer = deque.popleft()
if id(layer) in seen_object_ids:
continue
seen_object_ids.add(id(layer))
layers.append(layer)
# Introspect recursively through sublayers.
if recursive:
deque.extendleft(layer._layers)
return layers
def _set_mask_metadata(self, inputs, outputs, previous_mask):
flat_outputs = tree.flatten(outputs)
mask_already_computed = all(
getattr(x, "_keras_mask", None) is not None for x in flat_outputs
)
if mask_already_computed:
return
output_masks = self.compute_mask(inputs, previous_mask)
if output_masks is None:
return
flat_masks = tree.flatten(output_masks)
for tensor, mask in zip(flat_outputs, flat_masks):
if getattr(tensor, "_keras_mask", None) is None:
try:
# Numpy backend does not support masking.
if backend.backend() == "numpy":
warnings.warn(
"The NumPy backend does not support masking at this"
"time. Masks will be ignored."
)
tensor._keras_mask = mask
except AttributeError:
# It's a C type.
pass
@python_utils.default
def get_config(self):
self._check_super_called()
base_config = super().get_config()
config = {
"trainable": self.trainable,
"dtype": self.dtype_policy.name,
}
return {**base_config, **config}
def is_backend_tensor_or_symbolic(x):
return backend.is_tensor(x) or isinstance(x, backend.KerasTensor)
class CallSpec:
def __init__(self, signature, args, kwargs):
# `training` and `mask` are special kwargs that are always available in
# a layer, if user specifies them in their call without adding to spec,
# we remove them to be able to bind variables. User is not using
# `training` anyway so we can ignore.
# TODO: If necessary use workaround for `mask`
if "training" in kwargs and "training" not in signature.parameters:
kwargs.pop("training")
bound_args = signature.bind(*args, **kwargs)
else:
bound_args = signature.bind(*args, **kwargs)
self.user_arguments_dict = {
k: v for k, v in bound_args.arguments.items()
}
bound_args.apply_defaults()
arg_dict = {}
arg_names = []
tensor_arg_dict = {}
tensor_args = []
tensor_arg_names = []
nested_tensor_arg_names = []
for name, value in bound_args.arguments.items():
arg_dict[name] = value
arg_names.append(name)
if is_backend_tensor_or_symbolic(value):
tensor_args.append(value)
tensor_arg_names.append(name)
tensor_arg_dict[name] = value
elif tree.is_nested(value):
flat_values = tree.flatten(value)
if all(is_backend_tensor_or_symbolic(x) for x in flat_values):
tensor_args.append(value)
tensor_arg_names.append(name)
tensor_arg_dict[name] = value
nested_tensor_arg_names.append(name)
elif any(is_backend_tensor_or_symbolic(x) for x in flat_values):
raise ValueError(
"In a nested call() argument, "
"you cannot mix tensors and non-tensors. "
"Received invalid mixed argument: "
f"{name}={value}"
)
self.arguments_dict = arg_dict
self.argument_names = arg_names
self.tensor_arguments_dict = tensor_arg_dict
self.tensor_arguments_names = tensor_arg_names
self.nested_tensor_argument_names = nested_tensor_arg_names
self.first_arg = arg_dict[arg_names[0]]
if all(
backend.is_tensor(x) for x in self.tensor_arguments_dict.values()
):
self.eager = True
else:
self.eager = False
def get_arguments_dict(fn, args, kwargs):
"""Return a dict mapping argument names to their values."""
sig = inspect.signature(fn)
bound_args = sig.bind(*args, **kwargs)
arg_dict = {}
for name, value in bound_args.arguments.items():
arg_dict[name] = value
return arg_dict
def get_shapes_dict(call_spec):
"""Convert the call() arguments dict into a dict of input shape arguments.
Example:
```
>>> get_shapes_dict(call_spec)
{"input_a_shape": (2, 3)}
```
"""
shapes_dict = {}
for k, v in call_spec.tensor_arguments_dict.items():
if k == "mask" or k.endswith("_mask"):
# Do not include mask tensors in shapes dict
continue
if k == "kwargs" or k == "args":
# Do not include catch-alls in shapes dict
continue
if k in call_spec.nested_tensor_argument_names:
shapes_dict[f"{k}_shape"] = tree.map_structure(
lambda x: backend.standardize_shape(x.shape), v
)
else:
shapes_dict[f"{k}_shape"] = backend.standardize_shape(v.shape)
return shapes_dict
def update_shapes_dict_for_target_fn(
target_fn,
shapes_dict,
call_spec,
class_name,
):
"""Updates a `shapes_dict` for `build()` or `compute_output_shape()`.
This function will align a dictionary of the shapes of all tensor
passed to `call`, with the signatures of `build()` or
`compute_output_shape()`.
The alignment is a follows:
- If `build()` or `compute_output_shape()` accept only one argument,
forward the shape of the first positional argument from call without
checking any argument names.
- If `build()` or `compute_output_shape()` accept multiple arguments,
enforce that all argument names match a call argument name, e.g.
`foo_shape` would match call argument `foo`.
Returns:
An updated `shapes_dict` that can be used to invoke
`target_fn(**shapes_dict)`.
"""
if utils.is_default(target_fn):
return None
sig = inspect.signature(target_fn)
expected_names = []
for name, param in sig.parameters.items():
if param.kind in (
param.POSITIONAL_OR_KEYWORD,
param.POSITIONAL_ONLY,
param.KEYWORD_ONLY,
):
expected_names.append(name)
# Single arg: don't check names, pass first shape.
if len(expected_names) == 1:
key = expected_names[0]
values = tuple(shapes_dict.values())
if values:
input_shape = values[0]
else:
input_shape = None
return {key: input_shape}
# Multiple args: check that all names line up.
kwargs = {}
for name in expected_names:
method_name = target_fn.__name__
error_preamble = (
f"For a `{method_name}()` method with more than one argument, all "
"arguments should have a `_shape` suffix and match an argument "
f"from `call()`. E.g. `{method_name}(self, foo_shape, bar_shape)` "
)
if not name.endswith("_shape"):
raise ValueError(
f"{error_preamble} For layer '{class_name}', "
f"Received `{method_name}()` argument "
f"`{name}`, which does not end in `_shape`."
)
expected_call_arg = utils.removesuffix(name, "_shape")
if expected_call_arg not in call_spec.arguments_dict:
raise ValueError(
f"{error_preamble} For layer '{class_name}', "
f"received `{method_name}()` argument "
f"`{name}`, but `call()` does not have argument "
f"`{expected_call_arg}`."
)
if name in shapes_dict:
kwargs[name] = shapes_dict[name]
return kwargs
class CallContext:
def __init__(self, entry_layer):
self.entry_layer = entry_layer
self.training = None
def is_shape_tuple(s):
return isinstance(s, (list, tuple)) and all(
d is None or isinstance(d, int) for d in s
)
def might_have_unbuilt_state(layer):
return any(not lr.built for lr in layer._layers)
| keras-core/keras_core/layers/layer.py/0 | {
"file_path": "keras-core/keras_core/layers/layer.py",
"repo_id": "keras-core",
"token_count": 27511
} | 39 |
from keras_core import constraints
from keras_core import initializers
from keras_core import ops
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.GroupNormalization")
class GroupNormalization(Layer):
"""Group normalization layer.
Group Normalization divides the channels into groups and computes
within each group the mean and variance for normalization.
Empirically, its accuracy is more stable than batch norm in a wide
range of small batch sizes, if learning rate is adjusted linearly
with batch sizes.
Relation to Layer Normalization:
If the number of groups is set to 1, then this operation becomes nearly
identical to Layer Normalization (see Layer Normalization docs for details).
Relation to Instance Normalization:
If the number of groups is set to the input dimension (number of groups is
equal to number of channels), then this operation becomes identical to
Instance Normalization.
Args:
groups: Integer, the number of groups for Group Normalization. Can be in
the range `[1, N]` where N is the input dimension. The input
dimension must be divisible by the number of groups.
Defaults to 32.
axis: Integer or List/Tuple. The axis or axes to normalize across.
Typically, this is the features axis/axes. The left-out axes are
typically the batch axis/axes. -1 is the last dimension in the
input. Defaults to `-1`.
epsilon: Small float added to variance to avoid dividing by zero.
Defaults to 1e-3.
center: If `True`, add offset of `beta` to normalized tensor.
If `False`, `beta` is ignored. Defaults to `True`.
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
When the next layer is linear (also e.g. `relu`), this can be
disabled since the scaling will be done by the next layer.
Defaults to `True`.
beta_initializer: Initializer for the beta weight. Defaults to zeros.
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
beta_regularizer: Optional regularizer for the beta weight. None by
default.
gamma_regularizer: Optional regularizer for the gamma weight. None by
default.
beta_constraint: Optional constraint for the beta weight.
None by default.
gamma_constraint: Optional constraint for the gamma weight. None by
default. Input shape: Arbitrary. Use the keyword argument
`input_shape` (tuple of integers, does not include the samples
axis) when using this layer as the first layer in a model.
Output shape: Same shape as input.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Reference:
- [Yuxin Wu & Kaiming He, 2018](https://arxiv.org/abs/1803.08494)
"""
def __init__(
self,
groups=32,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
f"Axis {self.axis} of input tensor should have a defined "
"dimension but the layer received an input with shape "
f"{input_shape}."
)
if self.groups == -1:
self.groups = dim
if dim < self.groups:
raise ValueError(
f"Number of groups ({self.groups}) cannot be more than the "
f"number of channels ({dim})."
)
if dim % self.groups != 0:
raise ValueError(
f"Number of groups ({self.groups}) must be a multiple "
f"of the number of channels ({dim})."
)
self.input_spec = InputSpec(
ndim=len(input_shape), axes={self.axis: dim}
)
if self.scale:
self.gamma = self.add_weight(
shape=(dim,),
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
shape=(dim,),
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
)
else:
self.beta = None
super().build(input_shape)
def call(self, inputs):
reshaped_inputs = self._reshape_into_groups(inputs)
normalized_inputs = self._apply_normalization(
reshaped_inputs, inputs.shape
)
return ops.reshape(normalized_inputs, ops.shape(inputs))
def _reshape_into_groups(self, inputs):
input_shape = ops.shape(inputs)
group_shape = list(inputs.shape)
group_shape[0] = -1
for i, e in enumerate(group_shape[1:]):
if e is None:
group_shape[i + 1] = input_shape[i + 1]
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
reshaped_inputs = ops.reshape(inputs, group_shape)
return reshaped_inputs
def _apply_normalization(self, reshaped_inputs, input_shape):
group_reduction_axes = list(range(1, len(reshaped_inputs.shape)))
axis = -2 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
broadcast_shape = self._create_broadcast_shape(input_shape)
mean, variance = ops.moments(
reshaped_inputs, axes=group_reduction_axes, keepdims=True
)
# Compute the batch normalization.
inv = ops.rsqrt(variance + self.epsilon)
if self.scale:
gamma = ops.reshape(self.gamma, broadcast_shape)
gamma = ops.cast(gamma, reshaped_inputs.dtype)
inv = inv * gamma
res = -mean * inv
if self.center:
beta = ops.reshape(self.beta, broadcast_shape)
beta = ops.cast(beta, reshaped_inputs.dtype)
res = res + beta
normalized_inputs = reshaped_inputs * inv + res
return normalized_inputs
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
return broadcast_shape
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/normalization/group_normalization.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/group_normalization.py",
"repo_id": "keras-core",
"token_count": 3673
} | 40 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_core_export(
[
"keras_core.layers.GlobalAveragePooling2D",
"keras_core.layers.GlobalAvgPool2D",
]
)
class GlobalAveragePooling2D(BaseGlobalPooling):
"""Global average pooling operation for 2D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, height, width, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, height, width)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 3)
>>> y = keras_core.layers.GlobalAveragePooling2D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=2,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
return ops.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
| keras-core/keras_core/layers/pooling/global_average_pooling2d.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/global_average_pooling2d.py",
"repo_id": "keras-core",
"token_count": 1092
} | 41 |
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.utils import argument_validation
from keras_core.utils import numerical_utils
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core.layers.Discretization")
class Discretization(TFDataLayer):
"""A preprocessing layer which buckets continuous features by ranges.
This layer will place each element of its input data into one of several
contiguous ranges and output an integer index indicating which range each
element was placed in.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Input shape:
Any array of dimension 2 or higher.
Output shape:
Same as input shape.
Arguments:
bin_boundaries: A list of bin boundaries.
The leftmost and rightmost bins
will always extend to `-inf` and `inf`,
so `bin_boundaries=[0., 1., 2.]`
generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`,
and `[2., +inf)`.
If this option is set, `adapt()` should not be called.
num_bins: The integer number of bins to compute.
If this option is set,
`adapt()` should be called to learn the bin boundaries.
epsilon: Error tolerance, typically a small fraction
close to zero (e.g. 0.01). Higher values of epsilon increase
the quantile approximation, and hence result in more
unequal buckets, but could improve performance
and resource consumption.
output_mode: Specification for the output of the layer.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, or
`"count"` configuring the layer as follows:
- `"int"`: Return the discretized bin indices directly.
- `"one_hot"`: Encodes each individual element in the
input into an array the same size as `num_bins`,
containing a 1 at the input's bin
index. If the last dimension is size 1, will encode on that
dimension. If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a
single array the same size as `num_bins`,
containing a 1 for each bin index
index present in the sample.
Treats the last dimension as the sample
dimension, if input shape is `(..., sample_length)`,
output shape will be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains
a count of the number of times the bin index appeared
in the sample.
Defaults to `"int"`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
and `"count"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor` instead of
a dense `Tensor`. Defaults to `False`.
Examples:
Discretize float values based on provided buckets.
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
>>> layer = Discretization(bin_boundaries=[0., 1., 2.])
>>> layer(input)
array([[0, 2, 3, 1],
[1, 3, 2, 1]])
Discretize float values based on a number of buckets to compute.
>>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
>>> layer = Discretization(num_bins=4, epsilon=0.01)
>>> layer.adapt(input)
>>> layer(input)
array([[0, 2, 3, 2],
[1, 3, 3, 1]])
"""
def __init__(
self,
bin_boundaries=None,
num_bins=None,
epsilon=0.01,
output_mode="int",
sparse=False,
dtype=None,
name=None,
):
if dtype is None:
dtype = "int64" if output_mode == "int" else backend.floatx()
super().__init__(name=name, dtype=dtype)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse` can only be set to True with the "
"TensorFlow backend."
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse` may only be true if `output_mode` is "
"`'one_hot'`, `'multi_hot'`, or `'count'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
if num_bins is not None and num_bins < 0:
raise ValueError(
"`num_bins` must be greater than or equal to 0. "
f"Received: `num_bins={num_bins}`"
)
if num_bins is not None and bin_boundaries is not None:
if len(bin_boundaries) != num_bins - 1:
raise ValueError(
"Both `num_bins` and `bin_boundaries` should not be "
f"set. Received: `num_bins={num_bins}` and "
f"`bin_boundaries={bin_boundaries}`"
)
self.input_bin_boundaries = bin_boundaries
self.bin_boundaries = (
bin_boundaries if bin_boundaries is not None else []
)
self.num_bins = num_bins
self.epsilon = epsilon
self.output_mode = output_mode
self.sparse = sparse
if self.bin_boundaries:
self.built = True
self.summary = None
else:
self.summary = np.array([[], []], dtype="float32")
def build(self, input_shape=None):
self.built = True
@property
def input_dtype(self):
return backend.floatx()
def adapt(self, data, steps=None):
"""Computes bin boundaries from quantiles in a input dataset.
Calling `adapt()` on a `Discretization` layer is an alternative to
passing in a `bin_boundaries` argument during construction. A
`Discretization` layer should always be either adapted over a dataset or
passed `bin_boundaries`.
During `adapt()`, the layer will estimate the quantile boundaries of the
input dataset. The number of quantiles can be controlled via the
`num_bins` argument, and the error tolerance for quantile boundaries can
be controlled via the `epsilon` argument.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
if self.input_bin_boundaries is not None:
raise ValueError(
"Cannot adapt a Discretization layer that has been initialized "
"with `bin_boundaries`, use `num_bins` instead."
)
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
self.update_state(data)
self.finalize_state()
def update_state(self, data):
data = np.array(data).astype("float32")
summary = summarize(data, self.epsilon)
self.summary = merge_summaries(summary, self.summary, self.epsilon)
def finalize_state(self):
if self.input_bin_boundaries is not None:
return
self.bin_boundaries = get_bin_boundaries(
self.summary, self.num_bins
).tolist()
def reset_state(self):
if self.input_bin_boundaries is not None:
return
self.summary = np.array([[], []], dtype="float32")
def compute_output_spec(self, inputs):
return backend.KerasTensor(shape=inputs.shape, dtype=self.compute_dtype)
def load_own_variables(self, store):
if len(store) == 1:
# Legacy format case
self.summary = store["0"]
return
def call(self, inputs):
indices = self.backend.numpy.digitize(inputs, self.bin_boundaries)
outputs = numerical_utils.encode_categorical_inputs(
indices,
output_mode=self.output_mode,
depth=len(self.bin_boundaries) + 1,
dtype=self.compute_dtype,
count_weights=None,
backend_module=self.backend,
)
return outputs
def get_config(self):
return {
"bin_boundaries": self.bin_boundaries,
"num_bins": self.num_bins,
"epsilon": self.epsilon,
"output_mode": self.output_mode,
"sparse": self.sparse,
"name": self.name,
"dtype": self.dtype,
}
def summarize(values, epsilon):
"""Reduce a 1D sequence of values to a summary.
This algorithm is based on numpy.quantiles but modified to allow for
intermediate steps between multiple data sets. It first finds the target
number of bins as the reciprocal of epsilon and then takes the individual
values spaced at appropriate intervals to arrive at that target.
The final step is to return the corresponding counts between those values
If the target num_bins is larger than the size of values, the whole array is
returned (with weights of 1).
Args:
values: 1D `np.ndarray` to be summarized.
epsilon: A `'float32'` that determines the approximate desired
precision.
Returns:
A 2D `np.ndarray` that is a summary of the inputs. First column is the
interpolated partition values, the second is the weights (counts).
"""
values = np.reshape(values, [-1])
values = np.sort(values)
elements = np.size(values)
num_buckets = 1.0 / epsilon
increment = elements / num_buckets
start = increment
step = max(increment, 1)
boundaries = values[int(start) :: int(step)]
weights = np.ones_like(boundaries)
weights = weights * step
return np.stack([boundaries, weights])
def merge_summaries(prev_summary, next_summary, epsilon):
"""Weighted merge sort of summaries.
Given two summaries of distinct data, this function merges (and compresses)
them to stay within `epsilon` error tolerance.
Args:
prev_summary: 2D `np.ndarray` summary to be merged with `next_summary`.
next_summary: 2D `np.ndarray` summary to be merged with `prev_summary`.
epsilon: A float that determines the approxmiate desired precision.
Returns:
A 2-D `np.ndarray` that is a merged summary. First column is the
interpolated partition values, the second is the weights (counts).
"""
merged = np.concatenate((prev_summary, next_summary), axis=1)
merged = np.take(merged, np.argsort(merged[0]), axis=1)
return compress_summary(merged, epsilon)
def get_bin_boundaries(summary, num_bins):
return compress_summary(summary, 1.0 / num_bins)[0, :-1]
def compress_summary(summary, epsilon):
"""Compress a summary to within `epsilon` accuracy.
The compression step is needed to keep the summary sizes small after
merging, and also used to return the final target boundaries. It finds the
new bins based on interpolating cumulative weight percentages from the large
summary. Taking the difference of the cumulative weights from the previous
bin's cumulative weight will give the new weight for that bin.
Args:
summary: 2D `np.ndarray` summary to be compressed.
epsilon: A `'float32'` that determines the approxmiate desired
precision.
Returns:
A 2D `np.ndarray` that is a compressed summary. First column is the
interpolated partition values, the second is the weights (counts).
"""
if summary.shape[1] * epsilon < 1:
return summary
percents = epsilon + np.arange(0.0, 1.0, epsilon)
cum_weights = summary[1].cumsum()
cum_weight_percents = cum_weights / cum_weights[-1]
new_bins = np.interp(percents, cum_weight_percents, summary[0])
cum_weights = np.interp(percents, cum_weight_percents, cum_weights)
new_weights = cum_weights - np.concatenate(
(np.array([0]), cum_weights[:-1])
)
summary = np.stack((new_bins, new_weights))
return summary.astype("float32")
| keras-core/keras_core/layers/preprocessing/discretization.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/discretization.py",
"repo_id": "keras-core",
"token_count": 5631
} | 42 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.