text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
import collections
import re
from keras.api_export import keras_export
from keras.backend.common import global_state
def auto_name(prefix):
prefix = to_snake_case(prefix)
return uniquify(prefix)
def uniquify(name):
object_name_uids = global_state.get_global_attribute(
"object_name_uids",
default=collections.defaultdict(int),
set_to_default=True,
)
if name in object_name_uids:
unique_name = f"{name}_{object_name_uids[name]}"
else:
unique_name = name
object_name_uids[name] += 1
return unique_name
def to_snake_case(name):
name = re.sub(r"\W+", "", name)
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
return name
@keras_export("keras.backend.get_uid")
def get_uid(prefix=""):
"""Associates a string prefix with an integer counter.
Args:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
>>> get_uid('dense')
1
>>> get_uid('dense')
2
"""
object_name_uids = global_state.get_global_attribute(
"object_name_uids",
default=collections.defaultdict(int),
set_to_default=True,
)
object_name_uids[prefix] += 1
return object_name_uids[prefix]
def reset_uids():
global_state.set_global_attribute(
"object_name_uids", collections.defaultdict(int)
)
def get_object_name(obj):
if hasattr(obj, "name"): # Most Keras objects.
return obj.name
elif hasattr(obj, "__name__"): # Function.
return to_snake_case(obj.__name__)
elif hasattr(obj, "__class__"): # Class instance.
return to_snake_case(obj.__class__.__name__)
return to_snake_case(str(obj))
| keras/keras/utils/naming.py/0 | {
"file_path": "keras/keras/utils/naming.py",
"repo_id": "keras",
"token_count": 776
} | 176 |
import os
import random
import string
from keras import testing
from keras.utils import text_dataset_utils
class TextDatasetFromDirectoryTest(testing.TestCase):
def _prepare_directory(
self, num_classes=2, nested_dirs=False, count=16, length=20
):
# Get a unique temp directory
temp_dir = self.get_temp_dir()
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
for i in range(count):
path = paths[i % len(paths)]
filename = os.path.join(path, f"text_{i}.txt")
with open(os.path.join(temp_dir, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(length)]
)
f.write(text)
return temp_dir
def test_text_dataset_from_directory_standalone(self):
# Test retrieving txt files without labels from a directory and its
# subdirs. Save a few extra files in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i in range(3):
filename = f"text_{i}.txt"
with open(os.path.join(directory, filename), "w") as f:
text = "".join(
[random.choice(string.printable) for _ in range(20)]
)
f.write(text)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=5, label_mode=None, max_length=10
)
batch = next(iter(dataset))
# We just return the texts, no labels
self.assertEqual(batch.shape, (5,))
self.assertEqual(batch.dtype.name, "string")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_text_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="int", max_length=10
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(len(batch[0].numpy()[0]), 10) # Test max_length
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="binary"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="categorical"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_text_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None
)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8,))
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode="categorical"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
self.assertEqual(batch[0].dtype.name, "string")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_text_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
dataset = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2,))
(
train_dataset,
val_dataset,
) = text_dataset_utils.text_dataset_from_directory(
directory,
batch_size=10,
validation_split=0.2,
subset="both",
seed=1337,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8,))
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2,))
def test_text_dataset_from_directory_manual_labels(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, labels=[0, 1], shuffle=False
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_text_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=8, label_mode=None, follow_links=True
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_text_dataset_from_directory_no_files(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No text files found"):
_ = text_dataset_utils.text_dataset_from_directory(directory)
def test_text_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = text_dataset_utils.text_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = text_dataset_utils.text_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_text_dataset_from_directory_not_batched(self):
directory = self._prepare_directory()
dataset = text_dataset_utils.text_dataset_from_directory(
directory, batch_size=None, label_mode=None, follow_links=True
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 0)
| keras/keras/utils/text_dataset_utils_test.py/0 | {
"file_path": "keras/keras/utils/text_dataset_utils_test.py",
"repo_id": "keras",
"token_count": 5524
} | 177 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the TF-Keras API, the high-level API of TensorFlow.
Detailed documentation and user guides are available at
[keras.io](https://keras.io).
"""
from tf_keras import applications
from tf_keras import distribute
from tf_keras import layers
from tf_keras import losses
from tf_keras import metrics
from tf_keras import models
from tf_keras import optimizers
from tf_keras.engine.input_layer import Input
from tf_keras.engine.sequential import Sequential
from tf_keras.engine.training import Model
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python import tf2
from tensorflow.python.util.tf_export import keras_export
__version__ = "2.17.0"
keras_export("keras.__version__").export_constant(__name__, "__version__")
| tf-keras/tf_keras/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/__init__.py",
"repo_id": "tf-keras",
"token_count": 400
} | 178 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DenseNet models for TF-Keras.
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
"""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.applications import imagenet_utils
from tf_keras.engine import training
from tf_keras.layers import VersionAwareLayers
from tf_keras.utils import data_utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/densenet/"
)
DENSENET121_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + "densenet121_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET121_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH
+ "densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
DENSENET169_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + "densenet169_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET169_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH
+ "densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
DENSENET201_WEIGHT_PATH = (
BASE_WEIGHTS_PATH + "densenet201_weights_tf_dim_ordering_tf_kernels.h5"
)
DENSENET201_WEIGHT_PATH_NO_TOP = (
BASE_WEIGHTS_PATH
+ "densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
layers = VersionAwareLayers()
def dense_block(x, blocks, name):
"""A dense block.
Args:
x: input tensor.
blocks: integer, the number of building blocks.
name: string, block label.
Returns:
Output tensor for the block.
"""
for i in range(blocks):
x = conv_block(x, 32, name=name + "_block" + str(i + 1))
return x
def transition_block(x, reduction, name):
"""A transition block.
Args:
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + "_bn"
)(x)
x = layers.Activation("relu", name=name + "_relu")(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + "_conv",
)(x)
x = layers.AveragePooling2D(2, strides=2, name=name + "_pool")(x)
return x
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
Args:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
Output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + "_0_bn"
)(x)
x1 = layers.Activation("relu", name=name + "_0_relu")(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + "_1_conv"
)(x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + "_1_bn"
)(x1)
x1 = layers.Activation("relu", name=name + "_1_relu")(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding="same", use_bias=False, name=name + "_2_conv"
)(x1)
x = layers.Concatenate(axis=bn_axis, name=name + "_concat")([x, x1])
return x
def DenseNet(
blocks,
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the DenseNet architecture.
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
This function returns a TF-Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each TF-Keras Application expects a specific kind of input
preprocessing. For DenseNet, call
`tf.keras.applications.densenet.preprocess_input` on your inputs before
passing them to the model. `densenet.preprocess_input` will scale pixels
between 0 and 1 and then will normalize each channel with respect to the
ImageNet dataset statistics.
Args:
blocks: numbers of building blocks for the four dense layers.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional TF-Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top`'
" as true, `classes` should be 1000"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name="conv1/conv")(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name="conv1/bn"
)(x)
x = layers.Activation("relu", name="conv1/relu")(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name="pool1")(x)
x = dense_block(x, blocks[0], name="conv2")
x = transition_block(x, 0.5, name="pool2")
x = dense_block(x, blocks[1], name="conv3")
x = transition_block(x, 0.5, name="pool3")
x = dense_block(x, blocks[2], name="conv4")
x = transition_block(x, 0.5, name="pool4")
x = dense_block(x, blocks[3], name="conv5")
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name="bn")(x)
x = layers.Activation("relu", name="relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
if blocks == [6, 12, 24, 16]:
model = training.Model(inputs, x, name="densenet121")
elif blocks == [6, 12, 32, 32]:
model = training.Model(inputs, x, name="densenet169")
elif blocks == [6, 12, 48, 32]:
model = training.Model(inputs, x, name="densenet201")
else:
model = training.Model(inputs, x, name="densenet")
# Load weights.
if weights == "imagenet":
if include_top:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
"densenet121_weights_tf_dim_ordering_tf_kernels.h5",
DENSENET121_WEIGHT_PATH,
cache_subdir="models",
file_hash="9d60b8095a5708f2dcce2bca79d332c7",
)
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
"densenet169_weights_tf_dim_ordering_tf_kernels.h5",
DENSENET169_WEIGHT_PATH,
cache_subdir="models",
file_hash="d699b8f76981ab1b30698df4c175e90b",
)
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
"densenet201_weights_tf_dim_ordering_tf_kernels.h5",
DENSENET201_WEIGHT_PATH,
cache_subdir="models",
file_hash="1ceb130c1ea1b78c3bf6114dbdfd8807",
)
else:
if blocks == [6, 12, 24, 16]:
weights_path = data_utils.get_file(
"densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5",
DENSENET121_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="30ee3e1110167f948a6b9946edeeb738",
)
elif blocks == [6, 12, 32, 32]:
weights_path = data_utils.get_file(
"densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5",
DENSENET169_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="b8c4d4c20dd625c148057b9ff1c1176b",
)
elif blocks == [6, 12, 48, 32]:
weights_path = data_utils.get_file(
"densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5",
DENSENET201_WEIGHT_PATH_NO_TOP,
cache_subdir="models",
file_hash="c13680b51ded0fb44dff2d8f86ac8bb1",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export(
"keras.applications.densenet.DenseNet121", "keras.applications.DenseNet121"
)
def DenseNet121(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the Densenet121 architecture."""
return DenseNet(
[6, 12, 24, 16],
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation,
)
@keras_export(
"keras.applications.densenet.DenseNet169", "keras.applications.DenseNet169"
)
def DenseNet169(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the Densenet169 architecture."""
return DenseNet(
[6, 12, 32, 32],
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation,
)
@keras_export(
"keras.applications.densenet.DenseNet201", "keras.applications.DenseNet201"
)
def DenseNet201(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the Densenet201 architecture."""
return DenseNet(
[6, 12, 48, 32],
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation,
)
@keras_export("keras.applications.densenet.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="torch"
)
@keras_export("keras.applications.densenet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Densely Connected Convolutional Networks](
https://arxiv.org/abs/1608.06993) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your TF-Keras config at `~/.keras/keras.json`.
Note: each TF-Keras Application expects a specific kind of input
preprocessing. For DenseNet, call
`tf.keras.applications.densenet.preprocess_input` on your inputs before
passing them to the model.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A TF-Keras model instance.
"""
setattr(DenseNet121, "__doc__", DenseNet121.__doc__ + DOC)
setattr(DenseNet169, "__doc__", DenseNet169.__doc__ + DOC)
setattr(DenseNet201, "__doc__", DenseNet201.__doc__ + DOC)
| tf-keras/tf_keras/applications/densenet.py/0 | {
"file_path": "tf-keras/tf_keras/applications/densenet.py",
"repo_id": "tf-keras",
"token_count": 7495
} | 179 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks on Antirectifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.benchmarks import benchmark_util
class AntirectifierBenchmark(tf.test.Benchmark):
"""Benchmarks for Antirectifier using `tf.test.Benchmark`."""
def __init__(self):
super().__init__()
(self.x_train, self.y_train), _ = keras.datasets.mnist.load_data()
self.x_train = self.x_train.reshape(-1, 784)
self.x_train = self.x_train.astype("float32") / 255
def _build_model(self):
"""Model from https://keras.io/examples/keras_recipes/antirectifier/."""
model = keras.Sequential(
[
keras.Input(shape=(784,)),
keras.layers.Dense(256),
Antirectifier(),
keras.layers.Dense(256),
Antirectifier(),
keras.layers.Dropout(0.5),
keras.layers.Dense(10),
]
)
return model
# In each benchmark test, the required arguments for the
# method `measure_performance` include:
# x: Input data, it could be Numpy or loaded from tfds.
# y: Target data. If `x` is a dataset or generator instance,
# `y` should not be specified.
# loss: Loss function for model.
# optimizer: Optimizer for model.
# Check more details in `measure_performance()` method of
# benchmark_util.
def benchmark_antirectifier_bs_128(self):
"""Measure performance with batch_size=128."""
batch_size = 128
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
optimizer="rmsprop",
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["sparse_categorical_accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"antirectifier", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_antirectifier_bs_256(self):
"""Measure performance with batch_size=256."""
batch_size = 256
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
optimizer="rmsprop",
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["sparse_categorical_accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"antirectifier", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_antirectifier_bs_512(self):
"""Measure performance with batch_size=512."""
batch_size = 512
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
optimizer="rmsprop",
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["sparse_categorical_accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"antirectifier", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_antirectifier_bs_512_gpu_2(self):
"""Measure performance with batch_size=512, gpu=2 and
distribution_strategy=`mirrored`.
"""
batch_size = 512
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
num_gpus=2,
distribution_strategy="mirrored",
optimizer="rmsprop",
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["sparse_categorical_accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"antirectifier", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
class Antirectifier(keras.layers.Layer):
"""Build simple custom layer."""
def __init__(self, initializer="he_normal", **kwargs):
super().__init__(**kwargs)
self.initializer = keras.initializers.get(initializer)
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer=self.initializer,
name="kernel",
trainable=True,
)
def call(self, inputs):
inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True)
pos = tf.nn.relu(inputs)
neg = tf.nn.relu(-inputs)
concatenated = tf.concat([pos, neg], axis=-1)
mixed = tf.matmul(concatenated, self.kernel)
return mixed
def get_config(self):
# Implement get_config to enable serialization. This is optional.
base_config = super().get_config()
config = {"initializer": keras.initializers.serialize(self.initializer)}
return dict(list(base_config.items()) + list(config.items()))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py",
"repo_id": "tf-keras",
"token_count": 2836
} | 180 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark tests for TF-Keras optimizers."""
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.benchmarks import benchmark_util
from tf_keras.optimizers.legacy import adam
# isort: off
from tensorflow.python.platform.benchmark import (
ParameterizedBenchmark,
)
def bidirect_imdb_lstm_config():
"""Bidirectional LSTM model and IMDB data."""
def model_fn():
inputs = keras.Input(shape=(None,), dtype="int32")
x = keras.layers.Embedding(20000, 128)(inputs)
x = keras.layers.Bidirectional(
keras.layers.LSTM(64, return_sequences=True)
)(x)
x = keras.layers.Bidirectional(keras.layers.LSTM(64))(x)
outputs = keras.layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
return model
(x_train, y_train), _ = keras.datasets.imdb.load_data(num_words=20000)
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=200)
return model_fn, x_train, y_train
class KerasOptimizerBenchmark(
tf.test.Benchmark, metaclass=ParameterizedBenchmark
):
"""Keras optimizer benchmarks."""
# The parameter of each benchmark test is a tuple, and the first one is
# the optimizer name.
_benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu(
[
("Adam", keras.optimizers.Adam(), 10),
("NonFusedAdam", adam.NonFusedAdam(), 10),
]
)
def benchmark_optimizer(self, optimizer, num_iters):
"""Optimizer benchmark with Bidirectional LSTM model on IMDB data.
Args:
optimizer: The optimizer instance to be benchmarked.
num_iters: The number of iterations to run for performance
measurement.
"""
model, train_x, train_y = bidirect_imdb_lstm_config()
metrics, wall_time, extras = benchmark_util.measure_performance(
model,
x=train_x,
y=train_y,
batch_size=512,
optimizer=optimizer,
loss="binary_crossentropy",
metrics=["accuracy"],
)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {
"implementation": name[0],
"model_name": "optimizers",
"parameters": "lstm.512",
}
extras.update(metadata)
self.report_benchmark(
iters=num_iters, wall_time=wall_time, metrics=metrics, extras=extras
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/benchmarks/optimizer_benchmarks_test.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/optimizer_benchmarks_test.py",
"repo_id": "tf-keras",
"token_count": 1297
} | 181 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras weights constraints."""
import math
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import constraints
from tf_keras.testing_infra import test_combinations
def get_test_values():
return [0.1, 0.5, 3, 8, 1e-7]
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100.0 - 50.0
example_array[0, 0] = 0.0 # 0 could possibly cause trouble
return example_array
def get_example_kernel(width):
np.random.seed(3537)
example_array = np.random.rand(width, width, 2, 2)
return example_array
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class KerasConstraintsTest(tf.test.TestCase):
def test_serialization(self):
all_activations = ["max_norm", "non_neg", "unit_norm", "min_max_norm"]
for name in all_activations:
fn = constraints.get(name)
ref_fn = getattr(constraints, name)()
assert fn.__class__ == ref_fn.__class__
config = constraints.serialize(fn)
fn = constraints.deserialize(config)
assert fn.__class__ == ref_fn.__class__
def test_max_norm(self):
array = get_example_array()
for m in get_test_values():
norm_instance = constraints.max_norm(m)
normed = norm_instance(backend.variable(array))
assert np.all(backend.eval(normed) < m)
# a more explicit example
norm_instance = constraints.max_norm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
x_normed_target = np.array(
[
[0, 0, 0],
[1.0, 0, 0],
[2.0, 0, 0],
[2.0 / np.sqrt(3), 2.0 / np.sqrt(3), 2.0 / np.sqrt(3)],
]
).T
x_normed_actual = backend.eval(norm_instance(backend.variable(x)))
self.assertAllClose(x_normed_actual, x_normed_target, rtol=1e-05)
def test_non_neg(self):
non_neg_instance = constraints.non_neg()
normed = non_neg_instance(backend.variable(get_example_array()))
assert np.all(np.min(backend.eval(normed), axis=1) == 0.0)
def test_unit_norm(self):
unit_norm_instance = constraints.unit_norm()
normalized = unit_norm_instance(backend.variable(get_example_array()))
norm_of_normalized = np.sqrt(
np.sum(backend.eval(normalized) ** 2, axis=0)
)
# In the unit norm constraint, it should be equal to 1.
difference = norm_of_normalized - 1.0
largest_difference = np.max(np.abs(difference))
assert np.abs(largest_difference) < 10e-5
def test_min_max_norm(self):
array = get_example_array()
for m in get_test_values():
norm_instance = constraints.min_max_norm(
min_value=m, max_value=m * 2
)
normed = norm_instance(backend.variable(array))
value = backend.eval(normed)
l2 = np.sqrt(np.sum(np.square(value), axis=0))
assert not l2[l2 < m]
assert not l2[l2 > m * 2 + 1e-5]
def test_conv2d_radial_constraint(self):
for width in (3, 4, 5, 6):
array = get_example_kernel(width)
norm_instance = constraints.radial_constraint()
normed = norm_instance(backend.variable(array))
value = backend.eval(normed)
assert np.all(value.shape == array.shape)
assert np.all(value[0:, 0, 0, 0] == value[-1:, 0, 0, 0])
assert len(set(value[..., 0, 0].flatten())) == math.ceil(
float(width) / 2
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/constraints_test.py/0 | {
"file_path": "tf-keras/tf_keras/constraints_test.py",
"repo_id": "tf-keras",
"token_count": 1939
} | 182 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom Training Loop correctness test."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import optimizers
from tf_keras.applications import resnet_v2
from tf_keras.datasets import fashion_mnist
from tf_keras.distribute import optimizer_combinations
from tf_keras.distribute import strategy_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.ops.losses import losses_impl
_NUM_SAMPLES = 66
_BATCH_SIZE = 32
_RANDOM_SEED = 1337
_NUM_EPOCHS = 2
_STEPS_PER_EPOCH = 2
class MaybeStrategyScope:
"""Provides a context allowing no distribution strategy."""
def __init__(self, strategy):
self._strategy = strategy
self._scope = None
def __enter__(self):
if self._strategy:
self._scope = self._strategy.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._strategy:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def get_model(sync_batchnorm=False):
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation="relu", input_shape=(1,)))
model.add(
keras.layers.Dense(
10,
activation="relu",
kernel_regularizer=keras.regularizers.l2(1e-4),
)
)
if sync_batchnorm:
model.add(keras.layers.BatchNormalization(synchronized=True))
else:
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation="relu"))
model.add(keras.layers.Dense(1))
return model
def get_data():
x_train = np.random.rand(_NUM_SAMPLES, 1)
y_train = 3 * x_train
x_train = x_train.astype("float32")
y_train = y_train.astype("float32")
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(_BATCH_SIZE)
return train_dataset
def compute_loss(labels, logits, reg_losses):
pred_loss = keras.losses.mean_squared_error(labels, logits)
scaled_loss = tf.nn.compute_average_loss(
pred_loss, global_batch_size=_BATCH_SIZE
)
l2_loss = tf.nn.scale_regularization_loss(reg_losses)
return scaled_loss + l2_loss
def iteration_inside_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=None,
sync_batchnorm=None,
jit_compile=False,
):
"""Helper function to test iterating over data inside a tf.function."""
with MaybeStrategyScope(strategy):
if strategy and sync_batchnorm:
model = get_model(sync_batchnorm)
else:
model = get_model()
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
"training_accuracy", dtype=tf.float32
)
@tf.function
def train_epoch(dist_input):
"""Training StepFn."""
@tf.function(jit_compile=jit_compile)
def step_fn(inputs):
samples, labels = inputs
with tf.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
total_loss = 0.0
num_batches = 0
if iteration_type == "dataset":
for x in dist_input:
if strategy:
per_replica_losses = strategy.run(step_fn, args=(x,))
total_loss += strategy.reduce(
tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None,
)
else:
total_loss += step_fn(x)
num_batches += 1
else:
iterator = iter(dist_input)
for _ in range(_STEPS_PER_EPOCH):
if strategy:
per_replica_losses = strategy.run(
step_fn, args=(next(iterator),)
)
total_loss += strategy.reduce(
tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None,
)
else:
total_loss += step_fn(next(iterator))
num_batches += 1
return total_loss / tf.cast(num_batches, dtype=tf.float32)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
for _ in range(_NUM_EPOCHS):
loss = train_epoch(dataset)
return (model.get_weights(), loss, training_accuracy.result())
def iteration_outside_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=None,
sync_batchnorm=None,
jit_compile=False,
):
"""Helper function to test iterating over data outside a tf.function."""
with MaybeStrategyScope(strategy):
model = get_model(sync_batchnorm=sync_batchnorm)
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
"training_accuracy", dtype=tf.float32
)
@tf.function
def train_step(dist_inputs):
"""Training StepFn."""
@tf.function(jit_compile=jit_compile)
def step_fn(inputs):
samples, labels = inputs
with tf.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
if strategy:
per_replica_losses = strategy.run(step_fn, args=(dist_inputs,))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
else:
return step_fn(dist_inputs)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
total_loss = 0.0
num_batches = 0
if iteration_type == "dataset":
for _ in range(_NUM_EPOCHS):
for x in dataset:
total_loss += train_step(x)
num_batches += 1
else:
for _ in range(_NUM_EPOCHS):
iterator = iter(dataset)
for _ in range(_STEPS_PER_EPOCH):
total_loss += train_step(next(iterator))
num_batches += 1
return (
model.get_weights(),
total_loss / tf.cast(num_batches, dtype=tf.float32),
training_accuracy.result(),
)
@test_utils.run_v2_only
class TestDistributionStrategyDnnCorrectness(
tf.test.TestCase, parameterized.TestCase
):
"""Test custom training loop correctness with a simple DNN model."""
def setUp(self):
super().setUp()
np.random.seed(_RANDOM_SEED)
tf.compat.v1.set_random_seed(_RANDOM_SEED)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.all_strategies,
optimizer_fn=optimizer_combinations.optimizers_v2,
mode=["eager"],
iteration_type=["iterator", "dataset"],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[False],
)
+ tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.multiworker_strategies,
optimizer_fn=[
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
optimizer_combinations.adam_experimental_fn,
],
mode=["eager"],
iteration_type=["iterator", "dataset"],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[False],
)
+ tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
optimizer_fn=[
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
],
mode=["eager"],
iteration_type=["iterator", "dataset"],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[True],
)
)
def test_dnn_correctness_minus_tpus(
self,
distribution,
optimizer_fn,
iteration_type,
inside_func,
sync_batchnorm,
jit_compile,
):
# TODO(anjs): Identify why this particular V1 optimizer needs a higher
# tol.
if (
"FtrlV1" in optimizer_fn._name
and "TPU" in type(distribution).__name__
):
self.skipTest("Reduced tolerance of the order of 1e-1 required.")
self.dnn_correctness(
distribution,
optimizer_fn,
iteration_type,
inside_func,
sync_batchnorm,
jit_compile,
)
def dnn_correctness(
self,
distribution,
optimizer_fn,
iteration_type,
inside_func,
sync_batchnorm=None,
jit_compile=False,
):
model = get_model(sync_batchnorm)
initial_weights = model.get_weights()
dataset = get_data()
if inside_func:
iteration_func = iteration_inside_func
else:
iteration_func = iteration_outside_func
wts_with_ds, loss_with_ds, acc_with_ds = iteration_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=distribution,
sync_batchnorm=sync_batchnorm,
jit_compile=jit_compile,
)
wts, loss, acc = iteration_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
sync_batchnorm=sync_batchnorm,
jit_compile=False,
)
self.assertAllClose(wts, wts_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(loss, loss_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(acc, acc_with_ds, atol=1e-3, rtol=1e-3)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)
)
def test_fused_batch_norm_uneven_batch(self, distribution):
"""Test that fused BN works when the last device gets empty data.
Adapted from
https://www.tensorflow.org/tutorials/distribute/custom_training
but using ResNet, which uses fused batchnorm, as the model.
Arguments:
distribution: distribute test configuration
"""
self.skipTest("TODO(b/234354008): Requires fetching data from network.")
(train_images, train_labels), _ = fashion_mnist.load_data()
# add channel dimension to make 2D data into 3D, since some ops of the
# model require it.
train_images = train_images[..., None]
train_images = train_images / np.float32(255)
# Padding images because ResNet requires a minimal shape of (32, 32)
padded_train_images = np.concatenate(
[
np.zeros((len(train_images), 2, 28, 1)),
train_images,
np.zeros((len(train_images), 2, 28, 1)),
],
axis=1,
)
padded_train_images = np.concatenate(
[
np.zeros((len(train_images), 32, 2, 1)),
padded_train_images,
np.zeros((len(train_images), 32, 2, 1)),
],
axis=2,
)
buffer_size = len(train_images)
global_batch_size = distribution.num_replicas_in_sync
num_samples = global_batch_size - 1
epochs = 2
# Keep only the first images, so that the last GPU receives an empty
# batch
padded_train_images = padded_train_images[:num_samples]
train_labels = train_labels[:num_samples]
train_dataset = (
tf.data.Dataset.from_tensor_slices(
(padded_train_images, train_labels)
)
.shuffle(buffer_size)
.batch(global_batch_size)
)
train_dist_dataset = distribution.experimental_distribute_dataset(
train_dataset
)
def create_model():
inputs = keras.Input((32, 32, 1))
preprocessed = keras.layers.Conv2D(3, (1, 1))(
inputs
) # ResNet requires 3 channels
features = resnet_v2.ResNet50V2(
include_top=False,
input_tensor=preprocessed,
pooling="avg",
weights=None,
).output
return keras.Model(inputs, features)
with distribution.scope():
# Set reduction to `none` so we can do the reduction afterwards and
# divide by global batch size.
loss_object = keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=losses_impl.Reduction.NONE
)
def compute_resnet_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=global_batch_size
)
model = create_model()
optimizer = optimizers.adam_legacy.Adam()
def train_step(inputs):
images, labels = inputs
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = compute_resnet_loss(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
@tf.function
def distributed_train_step(dataset_inputs):
per_replica_losses = distribution.run(
train_step, args=(dataset_inputs,)
)
return distribution.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
for epoch in range(epochs):
# Train loop
total_loss = 0.0
num_batches = 0
for x in train_dist_dataset:
total_loss += distributed_train_step(x)
num_batches += 1
train_loss = total_loss / num_batches
print(f"Epoch {epoch+1}, Loss: {train_loss}")
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/distribute/ctl_correctness_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/ctl_correctness_test.py",
"repo_id": "tf-keras",
"token_count": 8157
} | 183 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness test for tf.keras Embedding models using DistributionStrategy."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.distribute import keras_correctness_test_base
from tf_keras.optimizers.legacy import (
gradient_descent as gradient_descent_keras,
)
class DistributionStrategyEmbeddingModelCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase # noqa: E501
):
def get_model(
self,
max_words=10,
initial_weights=None,
distribution=None,
input_shapes=None,
):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids = keras.layers.Input(
shape=(max_words,), dtype=np.int32, name="words"
)
word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(
word_ids
)
if self.use_distributed_dense:
word_embed = keras.layers.TimeDistributed(
keras.layers.Dense(4)
)(word_embed)
avg = keras.layers.GlobalAveragePooling1D()(word_embed)
preds = keras.layers.Dense(2, activation="softmax")(avg)
model = keras.Model(inputs=[word_ids], outputs=[preds])
if initial_weights:
model.set_weights(initial_weights)
model.compile(
optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model()
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_embedding_model_correctness(
self, distribution, use_numpy, use_validation_data
):
self.use_distributed_dense = False
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model()
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_embedding_time_distributed_model_correctness(
self, distribution, use_numpy, use_validation_data
):
self.use_distributed_dense = True
self.run_correctness_test(distribution, use_numpy, use_validation_data)
class DistributionStrategySiameseEmbeddingModelCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyEmbeddingModelCorrectnessBase # noqa: E501
):
def get_model(
self,
max_words=10,
initial_weights=None,
distribution=None,
input_shapes=None,
):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids_a = keras.layers.Input(
shape=(max_words,), dtype=np.int32, name="words_a"
)
word_ids_b = keras.layers.Input(
shape=(max_words,), dtype=np.int32, name="words_b"
)
def submodel(embedding, word_ids):
word_embed = embedding(word_ids)
rep = keras.layers.GlobalAveragePooling1D()(word_embed)
return keras.Model(inputs=[word_ids], outputs=[rep])
word_embed = keras.layers.Embedding(
input_dim=20,
output_dim=10,
input_length=max_words,
embeddings_initializer=keras.initializers.RandomUniform(0, 1),
)
a_rep = submodel(word_embed, word_ids_a).outputs[0]
b_rep = submodel(word_embed, word_ids_b).outputs[0]
sim = keras.layers.Dot(axes=1, normalize=True)([a_rep, b_rep])
model = keras.Model(inputs=[word_ids_a, word_ids_b], outputs=[sim])
if initial_weights:
model.set_weights(initial_weights)
# TODO(b/130808953): Switch back to the V1 optimizer after
# global_step is made mirrored.
model.compile(
optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
loss="mse",
metrics=["mse"],
)
return model
def get_data(
self,
count=(
keras_correctness_test_base._GLOBAL_BATCH_SIZE
* keras_correctness_test_base._EVAL_STEPS
),
min_words=5,
max_words=10,
max_word_id=19,
num_classes=2,
):
features_a, labels_a, _ = super().get_data(
count, min_words, max_words, max_word_id, num_classes
)
features_b, labels_b, _ = super().get_data(
count, min_words, max_words, max_word_id, num_classes
)
y_train = np.zeros((count, 1), dtype=np.float32)
y_train[labels_a == labels_b] = 1.0
y_train[labels_a != labels_b] = -1.0
# TODO(b/123360757): Add tests for using list as inputs for multi-input
# models.
x_train = {
"words_a": features_a,
"words_b": features_b,
}
x_predict = x_train
return x_train, y_train, x_predict
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model()
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_siamese_embedding_model_correctness(
self, distribution, use_numpy, use_validation_data
):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/distribute/keras_embedding_model_correctness_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/keras_embedding_model_correctness_test.py",
"repo_id": "tf-keras",
"token_count": 2973
} | 184 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras callbacks in multi-worker training with TF2."""
import json
import os
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import callbacks
from tf_keras.distribute import distributed_file_utils
from tf_keras.distribute import multi_worker_testing_utils
def checkpoint_exists(filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith(".h5"):
return tf.io.gfile.exists(filepath)
tf_saved_model_exists = tf.io.gfile.exists(filepath)
tf_weights_only_checkpoint_exists = tf.io.gfile.exists(filepath + ".index")
return tf_saved_model_exists or tf_weights_only_checkpoint_exists
def _model_setup(test_obj, file_format):
"""Set up a MNIST TF-Keras model for testing purposes.
This function builds a MNIST TF-Keras model and returns relevant information
for testing.
Args:
test_obj: The `TestCase` testing object.
file_format: File format for checkpoints. 'tf' or 'h5'.
Returns:
A tuple of (model, saving_filepath, train_ds, steps) where train_ds is
the training dataset.
"""
batch_size = 64
steps = 2
with tf.distribute.MultiWorkerMirroredStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps
)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# Pass saving_filepath from the parent thread to ensure every worker has the
# same filepath to save.
saving_filepath = os.path.join(
test_obj.get_temp_dir(), "checkpoint." + file_format
)
return model, saving_filepath, train_ds, steps
def get_tf_config_task():
return json.loads(os.environ["TF_CONFIG"])["task"]
def get_tf_config_cluster_spec():
return json.loads(os.environ["TF_CONFIG"])["cluster"]
def get_task_type():
return get_tf_config_task()["type"]
def get_task_index():
return get_tf_config_task()["index"]
def is_chief():
return (
"chief" not in get_tf_config_cluster_spec()
and get_task_type() == "worker"
and get_task_index() == 0
)
class KerasCallbackMultiProcessTest(parameterized.TestCase, tf.test.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=["eager"],
file_format=["h5", "tf"],
save_weights_only=[True, False],
)
)
def test_model_checkpoint_saves_on_chief_but_not_otherwise(
self, file_format, mode, save_weights_only
):
def proc_model_checkpoint_saves_on_chief_but_not_otherwise(
test_obj, file_format
):
model, saving_filepath, train_ds, steps = _model_setup(
test_obj, file_format
)
num_epoch = 2
extension = os.path.splitext(saving_filepath)[1]
# Incorporate type/index information and thread id in
# saving_filepath to ensure every worker has a unique path. Note
# that in normal use case the saving_filepath will be the same for
# all workers, but we use different ones here just to test out chief
# saves checkpoint but non-chief doesn't.
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
"checkpoint_%s_%d%s"
% (task_config["type"], task_config["index"], extension),
)
# The saving_filepath shouldn't exist at the beginning (as it's
# unique).
test_obj.assertFalse(checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
validation_data=train_ds,
validation_steps=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=save_weights_only,
)
],
)
# If it's chief, the model should be saved; if not, the model
# shouldn't.
test_obj.assertEqual(checkpoint_exists(saving_filepath), is_chief())
# If it's chief, the model should be saved (`write_filepath` should
# simply return `saving_filepath`); if not, i.e. for non-chief
# workers, the temporary path generated by `write_filepath` should
# no longer contain the checkpoint that has been deleted.
test_obj.assertEqual(
checkpoint_exists(
distributed_file_utils.write_filepath(
saving_filepath, model._distribution_strategy
)
),
is_chief(),
)
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_saves_on_chief_but_not_otherwise,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, file_format),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["eager"])
)
def test_model_checkpoint_works_with_same_file_path(self, mode):
def proc_model_checkpoint_works_with_same_file_path(
test_obj, saving_filepath
):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's
# unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)],
)
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), "checkpoint")
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, saving_filepath),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["eager"])
)
def test_backupandrestore_checkpoint_works_with_interruption(self, mode):
class InterruptingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch == 2:
raise RuntimeError("Interrupting!")
class AssertCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
# the interruption happened on epoch 2 as specified in
# InterruptingCallback, so the initial epoch after restart will
# begin at 2.
assert epoch > 1
def proc_model_checkpoint_works_with_same_file_path(
test_obj, saving_filepath
):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 4
# The saving_filepath shouldn't exist at the beginning (as it's
# unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
bar_dir = os.path.join(os.path.dirname(saving_filepath), "backup")
try:
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
InterruptingCallback(),
],
)
except RuntimeError as e:
if "Interrupting!" not in str(e):
raise
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
backup_filepath = os.path.join(bar_dir, "chief", "checkpoint")
test_obj.assertTrue(tf.io.gfile.exists(backup_filepath))
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
AssertCallback(),
],
)
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
test_obj.assertFalse(tf.io.gfile.exists(backup_filepath))
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), "checkpoint")
tf.__internal__.distribute.multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, saving_filepath),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["eager"])
)
def test_profiler_saves_on_both_chief_and_non_chief(self, mode):
def proc_profiler_saves_on_both_chief_and_non_chief(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
"logfile_%s_%d" % (task_config["type"], task_config["index"]),
)
# The saving_filepath shouldn't exist at the beginning (as it's
# unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.TensorBoard(
log_dir=saving_filepath, profile_batch=[2, 4]
)
],
)
# Profiler dir should be created on both chief and non-chief node
profiler_dir_path = os.path.join(
saving_filepath, "plugins", "profile"
)
test_obj.assertTrue(tf.io.gfile.exists(profiler_dir_path))
tf.__internal__.distribute.multi_process_runner.run(
proc_profiler_saves_on_both_chief_and_non_chief,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["eager"])
)
def test_tensorboard_saves_on_chief_but_not_otherwise(self, mode):
def proc_tensorboard_saves_on_chief_but_not_otherwise(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
# Incorporate type/index information and thread id in
# saving_filepath to ensure every worker has a unique path. Note
# that in normal use case the saving_filepath will be the same for
# all workers, but we use different ones here just to test out chief
# saves summaries but non-chief doesn't.
task_config = get_tf_config_task()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
"logfile_%s_%d" % (task_config["type"], task_config["index"]),
)
# The saving_filepath shouldn't exist at the beginning (as it's
# unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
# disabling profiler by setting profile_batch to zero
callbacks=[
callbacks.TensorBoard(
log_dir=saving_filepath, profile_batch=0
)
],
)
# If it's chief, the summaries should be saved in the filepath; if
# not, the directory should be empty (although created). Using
# `file_io.list_directory()` since the directory may be created at
# this point.
test_obj.assertEqual(
bool(tf.io.gfile.listdir(saving_filepath)), is_chief()
)
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_saves_on_chief_but_not_otherwise,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["eager"])
)
def test_tensorboard_can_still_save_to_temp_even_if_it_exists(self, mode):
def proc_tensorboard_can_still_save_to_temp_even_if_it_exists(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
f"logfile_{get_tf_config_task()['type']}",
)
saving_filepath_for_temp = os.path.join(
saving_filepath, "workertemp_1"
)
os.mkdir(saving_filepath)
os.mkdir(saving_filepath_for_temp)
# Verifies that even if `saving_filepath_for_temp` exists,
# tensorboard can still save to temporary directory.
test_obj.assertTrue(tf.io.gfile.exists(saving_filepath_for_temp))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)],
)
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_can_still_save_to_temp_even_if_it_exists,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["eager"])
)
def test_tensorboard_works_with_same_file_path(self, mode):
def proc_tensorboard_works_with_same_file_path(
test_obj, saving_filepath
):
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's
# unique).
test_obj.assertFalse(tf.io.gfile.exists(saving_filepath))
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)],
)
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
test_obj.assertTrue(tf.io.gfile.listdir(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), "logfile")
tf.__internal__.distribute.multi_process_runner.run(
proc_tensorboard_works_with_same_file_path,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self, saving_filepath),
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["eager"])
)
def test_early_stopping(self, mode):
def proc_early_stopping(test_obj):
class EpochCounterCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.last_epoch = epoch
model, _, train_ds, steps = _model_setup(test_obj, file_format="")
epoch_counter_cbk = EpochCounterCallback()
cbks = [
callbacks.EarlyStopping(
monitor="loss", min_delta=0.05, patience=1, verbose=1
),
epoch_counter_cbk,
]
# Empirically, it is expected that `model.fit()` terminates around
# the 22th epoch. Asserting that it should have been stopped before
# the 50th epoch to avoid flakiness and be more predictable.
model.fit(
x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks
)
test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
tf.__internal__.distribute.multi_process_runner.run(
proc_early_stopping,
cluster_spec=tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501
num_workers=2
),
args=(self,),
)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/distribute/multi_worker_callback_tf2_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/multi_worker_callback_tf2_test.py",
"repo_id": "tf-keras",
"token_count": 8973
} | 185 |
# This package contains all the DTensor related TF-Keras components.
# Since DTensor is not a public API yet, all the DTensor related change
# can't be exposed to public yet.
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
# copybara:uncomment_begin(google-only)
# load(
# "//third_party/tensorflow/dtensor:build_defs.bzl",
# "dtensor_test",
# )
# copybara:uncomment_end
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//learning/brain/distribute/experimental/auto_distribute:__pkg__",
"//learning/brain/distribute/python:__subpackages__",
"//learning/brain/experimental/dtensor/models:__subpackages__",
"//tf_keras:friends",
],
licenses = ["notice"],
)
py_library(
name = "dtensor",
srcs = ["__init__.py"],
deps = [
"//:expect_tensorflow_installed",
],
)
tf_py_test(
name = "initializers_test",
srcs = ["initializers_test.py"],
shard_count = 4,
deps = [
":dtensor",
":test_util",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/initializers",
"//tf_keras/utils:tf_utils",
],
)
tf_py_test(
name = "layers_test",
srcs = ["layers_test.py"],
shard_count = 4,
tags = ["no_oss"],
deps = [
":dtensor",
":test_util",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/layers",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "layout_map",
srcs = ["layout_map.py"],
deps = [
":dtensor",
":lazy_variable",
":utils",
"//tf_keras/engine:base_layer",
],
)
tf_py_test(
name = "layout_map_test",
srcs = ["layout_map_test.py"],
tags = ["no_oss"],
deps = [
":dtensor",
":layout_map",
":test_util",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "integration_test_utils",
srcs = ["integration_test_utils.py"],
deps = [
":dtensor",
":layout_map",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:losses",
"//tf_keras/datasets",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/utils:np_utils",
],
)
tf_py_test(
name = "metrics_test",
srcs = ["metrics_test.py"],
shard_count = 4,
deps = [
":dtensor",
":test_util",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/metrics",
"//tf_keras/utils:tf_utils",
],
)
# copybara:uncomment_begin(google-only)
# dtensor_test(
# name = "mnist_model_test",
# srcs = ["mnist_model_test.py"],
# env = {
# "CUDA_MODULE_LOADING": "LAZY",
# "TF_GPU_ALLOCATOR": "cuda_malloc_async",
# },
# tags = [
# "no_oss",
# "requires-net:external",
# ],
# deps = [
# ":dtensor",
# ":integration_test_utils",
# ":layout_map",
# ":test_util",
# "//:expect_numpy_installed",
# "//:expect_tensorflow_installed",
# "//tf_keras:backend",
# "//tf_keras/optimizers",
# "//tf_keras/utils:tf_utils",
# ],
# )
# copybara:uncomment_end
tf_py_test(
name = "optimizers_test",
srcs = ["optimizers_test.py"],
deps = [
":dtensor",
":layout_map",
":test_util",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:losses",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/optimizers",
],
)
py_library(
name = "lazy_variable",
srcs = ["lazy_variable.py"],
deps = [
"//:expect_tensorflow_installed",
],
)
py_library(
name = "utils",
srcs = ["utils.py"],
deps = [
":dtensor",
"//:expect_tensorflow_installed",
],
)
tf_py_test(
name = "utils_test",
srcs = ["utils_test.py"],
deps = [
":dtensor",
":test_util",
":utils",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/layers",
],
)
py_library(
name = "test_util",
srcs = ["test_util.py"],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
],
)
tf_py_test(
name = "save_load_test",
srcs = ["save_load_test.py"],
deps = [
":dtensor",
":layout_map",
":test_util",
"//tf_keras",
"//tf_keras:backend",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/utils:tf_utils",
],
)
# copybara:uncomment_begin(google-only)
# dtensor_test(
# name = "strategy_integration_test",
# srcs = ["strategy_integration_test.py"],
# shard_count = {
# "CPU": 2,
# "GPU": 4,
# "TPU": 2,
# },
# tags = ["no_oss"],
# deps = [
# ":integration_test_utils",
# ":test_util",
# "//:expect_absl_installed", # absl/testing:parameterized
# "//:expect_numpy_installed",
# "//:expect_tensorflow_installed",
# "//tf_keras:backend",
# "//tf_keras/mixed_precision:mixed_precision_experimental",
# "//tf_keras/optimizers",
# "//tf_keras/utils:tf_utils",
# "//third_party/tensorflow/dtensor/python/tests:test_util",
# "//third_party/tensorflow/python/distribute/experimental:mirrored_strategy",
# ],
# )
# copybara:uncomment_end
| tf-keras/tf_keras/dtensor/BUILD/0 | {
"file_path": "tf-keras/tf_keras/dtensor/BUILD",
"repo_id": "tf-keras",
"token_count": 3094
} | 186 |
# Description:
# Contains the TF-Keras engine API (internal TensorFlow version).
# Placeholder: load unaliased py_library
# buildifier: disable=same-origin-load
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
# buildifier: disable=same-origin-load
load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
# TODO(scottzhu): Remove non-keras deps from TF.
default_visibility = ["//tf_keras:friends"],
licenses = ["notice"],
)
py_library(
name = "engine",
srcs = [
"__init__.py",
"compile_utils.py",
"functional.py",
"partial_batch_padding_handler.py",
"saving.py",
"sequential.py",
"training.py",
"training_arrays_v1.py",
"training_distributed_v1.py",
"training_eager_v1.py",
"training_generator_v1.py",
"training_utils.py",
"training_utils_v1.py",
"training_v1.py",
],
srcs_version = "PY3",
deps = [
":base_layer",
":base_preprocessing_layer",
":data_adapter",
":functional_utils",
":input_layer",
":input_spec",
":keras_tensor",
":node",
"//:expect_h5py_installed",
"//:expect_tensorboard_installed",
"//:expect_tensorflow_installed",
"//:expect_yaml_installed",
"//tf_keras:activations",
"//tf_keras:backend",
"//tf_keras:callbacks",
"//tf_keras:callbacks_v1",
"//tf_keras:constraints",
"//tf_keras:losses",
"//tf_keras:regularizers",
"//tf_keras/distribute",
"//tf_keras/distribute:distribute_coordinator_utils",
"//tf_keras/dtensor:layout_map",
"//tf_keras/export:export_lib",
"//tf_keras/initializers",
"//tf_keras/metrics",
"//tf_keras/mixed_precision:autocast_variable",
"//tf_keras/mixed_precision:loss_scale_optimizer",
"//tf_keras/mixed_precision:policy",
"//tf_keras/optimizers",
"//tf_keras/saving",
"//tf_keras/utils:engine_utils",
"//tf_keras/utils:metrics_utils",
"//tf_keras/utils:mode_keys",
"//tf_keras/utils:steps_per_execution_tuning",
"//tf_keras/utils:tf_utils",
"//tf_keras/utils:version_utils",
],
)
py_library(
name = "base_layer_utils",
srcs = ["base_layer_utils.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/dtensor",
"//tf_keras/utils:tf_inspect",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "base_layer",
srcs = [
"base_layer.py",
"base_layer_v1.py",
],
srcs_version = "PY3",
deps = [
":base_layer_utils",
":input_spec",
":node",
"//:expect_numpy_installed",
"//tf_keras:backend",
"//tf_keras:constraints",
"//tf_keras/initializers",
# TODO(keras-team): Fix the circular deps between layer and metrics.
# "//tf_keras/metrics",
"//tf_keras:regularizers",
"//tf_keras/dtensor:lazy_variable",
"//tf_keras/mixed_precision:autocast_variable",
"//tf_keras/mixed_precision:loss_scale_optimizer",
"//tf_keras/mixed_precision:policy",
"//tf_keras/saving",
"//tf_keras/utils:generic_utils",
"//tf_keras/utils:layer_utils",
"//tf_keras/utils:object_identity",
"//tf_keras/utils:tf_utils",
"//tf_keras/utils:traceback_utils",
"//tf_keras/utils:version_utils",
"//:expect_tensorflow_installed",
],
)
py_library(
name = "input_layer",
srcs = ["input_layer.py"],
deps = [
":base_layer",
":keras_tensor",
":node",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/distribute",
"//tf_keras/saving",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "functional_utils",
srcs = ["functional_utils.py"],
deps = [
":input_layer",
":keras_tensor",
":node",
"//:expect_tensorflow_installed",
],
)
py_library(
name = "data_adapter",
srcs = ["data_adapter.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/distribute",
"//tf_keras/utils:dataset_creator",
"//tf_keras/utils:engine_utils",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "input_spec",
srcs = ["input_spec.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras:backend",
],
)
py_library(
name = "keras_tensor",
srcs = ["keras_tensor.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/utils:object_identity",
],
)
py_library(
name = "base_preprocessing_layer",
srcs = [
"base_preprocessing_layer.py",
],
srcs_version = "PY3",
deps = [
":base_layer",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
],
)
py_library(
name = "node",
srcs = ["node.py"],
srcs_version = "PY3",
deps = [
":base_layer_utils",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/utils:tf_utils",
],
)
tf_py_test(
name = "base_layer_utils_test",
srcs = ["base_layer_utils_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":base_layer_utils",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:backend",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "data_adapter_test",
size = "medium",
srcs = ["data_adapter_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"no_oss_py38", # TODO(b/150615192)
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":data_adapter",
"//:expect_numpy_installed",
"//:expect_pandas_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "base_preprocessing_layer_test",
size = "medium",
srcs = ["base_preprocessing_layer_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":base_preprocessing_layer",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "functional_utils_test",
size = "medium",
srcs = ["functional_utils_test.py"],
python_version = "PY3",
deps = [
":functional_utils",
":input_layer",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/testing_infra:test_combinations",
],
)
cuda_py_test(
name = "training_gpu_test",
size = "small",
srcs = ["training_gpu_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":engine",
":input_layer",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/layers/convolutional",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
tf_py_test(
name = "correctness_test",
size = "medium",
srcs = ["correctness_test.py"],
python_version = "PY3",
shard_count = 2,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
"notsan",
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "keras_tensor_test",
size = "small",
srcs = ["keras_tensor_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "ragged_keras_tensor_test",
size = "small",
srcs = ["ragged_keras_tensor_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "input_spec_test",
size = "small",
srcs = ["input_spec_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
tf_py_test(
name = "training_test",
size = "large", # Resolves timeout in OSS build.
srcs = ["training_test.py"],
python_version = "PY3",
shard_count = 20,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
"notsan",
],
deps = [
":engine",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:backend",
"//tf_keras:callbacks",
"//tf_keras:losses",
"//tf_keras/layers",
"//tf_keras/metrics",
"//tf_keras/mixed_precision:policy",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:data_utils",
"//tf_keras/utils:np_utils",
],
)
tf_py_test(
name = "compile_utils_test",
size = "medium",
srcs = ["compile_utils_test.py"],
tags = [
"nomac", # TODO(b/146226927)
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "training_dataset_test",
size = "medium",
srcs = ["training_dataset_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "training_arrays_test",
size = "medium",
srcs = ["training_arrays_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/layers",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "training_generator_test",
size = "medium",
srcs = ["training_generator_test.py"],
python_version = "PY3",
shard_count = 6,
tags = [
"noasan", # TODO(b/132183295): Re-enable this.
"nomac", # TODO(b/140193633): Re-enable this.
"notsan",
],
deps = [
":engine",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:losses",
"//tf_keras/layers",
"//tf_keras/metrics",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:data_utils",
],
)
tf_py_test(
name = "training_integration_test",
size = "medium",
srcs = ["training_integration_test.py"],
python_version = "PY3",
shard_count = 30,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "feature_columns_integration_test",
size = "medium",
srcs = ["feature_columns_integration_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
"notsan",
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "training_eager_test",
size = "medium",
srcs = ["training_eager_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
"notsan",
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "training_utils_v1_test",
size = "medium",
srcs = ["training_utils_v1_test.py"],
python_version = "PY3",
tags = [
"no_oss", # TODO(b/135021748) re-enable
"notsan",
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "input_layer_test",
size = "medium",
srcs = ["input_layer_test.py"],
python_version = "PY3",
shard_count = 3,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":base_layer",
":engine",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:layer_utils",
],
)
tf_py_test(
name = "functional_test",
size = "medium",
srcs = ["functional_test.py"],
python_version = "PY3",
shard_count = 8,
tags = [
"no-internal-py3",
"no_rocm",
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":base_layer",
":engine",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:backend",
"//tf_keras/initializers",
"//tf_keras/layers",
"//tf_keras/models",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:layer_utils",
"//tf_keras/utils:tf_utils",
],
)
tf_py_test(
name = "node_test",
size = "medium",
srcs = ["node_test.py"],
python_version = "PY3",
shard_count = 3,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":base_layer",
":engine",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:layer_utils",
],
)
tf_py_test(
name = "base_layer_test",
size = "medium",
srcs = ["base_layer_test.py"],
python_version = "PY3",
shard_count = 8,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
":base_layer",
":engine",
"//:expect_numpy_installed",
"//:expect_tensorboard_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras:backend",
"//tf_keras:regularizers",
"//tf_keras/layers",
"//tf_keras/legacy_tf_layers:core",
"//tf_keras/mixed_precision:policy",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:tf_utils",
],
)
tf_py_test(
name = "control_flow_test",
size = "medium",
srcs = ["control_flow_test.py"],
python_version = "PY3",
shard_count = 8,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "sequential_test",
size = "medium",
srcs = ["sequential_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "deferred_sequential_test",
size = "medium",
srcs = ["deferred_sequential_test.py"],
python_version = "PY3",
tags = [
"nomac", # TODO(mihaimaruseac): b/127695564
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
| tf-keras/tf_keras/engine/BUILD/0 | {
"file_path": "tf-keras/tf_keras/engine/BUILD",
"repo_id": "tf-keras",
"token_count": 9367
} | 187 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to Feature Columns integration."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras import metrics as metrics_module
from tf_keras.feature_column import dense_features as df
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import np_utils
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super().__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(
feature_columns, name="input_layer"
)
self._dense_layer = keras.layers.Dense(units, name="dense_layer")
def call(self, features):
net = self._input_layer(features)
net = self._dense_layer(net)
return net
class FeatureColumnsIntegrationTest(test_combinations.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`."""
@test_combinations.run_all_keras_modes
def test_sequential_model(self):
columns = [tf.feature_column.numeric_column("a")]
model = keras.models.Sequential(
[
df.DenseFeatures(columns),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(20, activation="softmax"),
]
)
model.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
x = {"a": np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
model.fit(x, y, epochs=1, batch_size=5)
model.fit(x, y, epochs=1, batch_size=5)
model.evaluate(x, y, batch_size=5)
model.predict(x, batch_size=5)
@test_combinations.run_all_keras_modes
def test_sequential_model_with_ds_input(self):
columns = [tf.feature_column.numeric_column("a")]
model = keras.models.Sequential(
[
df.DenseFeatures(columns),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(20, activation="softmax"),
]
)
model.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {"a": np.random.random((100, 1))}
ds1 = tf.data.Dataset.from_tensor_slices(x)
ds2 = tf.data.Dataset.from_tensor_slices(y)
ds = tf.data.Dataset.zip((ds1, ds2)).batch(5)
model.fit(ds, steps_per_epoch=1)
model.fit(ds, steps_per_epoch=1)
model.evaluate(ds, steps=1)
model.predict(ds, steps=1)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_sequential_model_with_crossed_column(self):
feature_columns = []
age_buckets = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("age"),
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65],
)
feature_columns.append(age_buckets)
# indicator cols
thal = tf.feature_column.categorical_column_with_vocabulary_list(
"thal", ["fixed", "normal", "reversible"]
)
crossed_feature = tf.feature_column.crossed_column(
[age_buckets, thal], hash_bucket_size=1000
)
crossed_feature = tf.feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
feature_layer = df.DenseFeatures(feature_columns)
model = keras.models.Sequential(
[
feature_layer,
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(1, activation="sigmoid"),
]
)
age_data = np.random.randint(10, 100, size=100)
thal_data = np.random.choice(
["fixed", "normal", "reversible"], size=100
)
inp_x = {"age": age_data, "thal": thal_data}
inp_y = np.random.randint(0, 1, size=100)
ds = tf.data.Dataset.from_tensor_slices((inp_x, inp_y)).batch(5)
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"],
)
model.fit(ds, epochs=1)
model.fit(ds, epochs=1)
model.evaluate(ds)
model.predict(ds)
@test_combinations.run_all_keras_modes
def test_subclassed_model_with_feature_columns(self):
col_a = tf.feature_column.numeric_column("a")
col_b = tf.feature_column.numeric_column("b")
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
x = {"a": np.random.random((10, 1)), "b": np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.evaluate(x=x, y=y, batch_size=5)
dnn_model.predict(x=x, batch_size=5)
@test_combinations.run_all_keras_modes
def test_subclassed_model_with_feature_columns_with_ds_input(self):
col_a = tf.feature_column.numeric_column("a")
col_b = tf.feature_column.numeric_column("b")
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"],
run_eagerly=test_utils.should_run_eagerly(),
)
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {"a": np.random.random((100, 1)), "b": np.random.random((100, 1))}
ds1 = tf.data.Dataset.from_tensor_slices(x)
ds2 = tf.data.Dataset.from_tensor_slices(y)
ds = tf.data.Dataset.zip((ds1, ds2)).batch(5)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.evaluate(ds, steps=1)
dnn_model.predict(ds, steps=1)
# TODO(kaftan) seems to throw an error when enabled.
@test_combinations.run_all_keras_modes
def DISABLED_test_function_model_feature_layer_input(self):
col_a = tf.feature_column.numeric_column("a")
col_b = tf.feature_column.numeric_column("b")
feature_layer = df.DenseFeatures([col_a, col_b], name="fc")
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for
# DenseFeatures the way Input is for InputLayer.
output = dense(feature_layer)
model = keras.models.Model([feature_layer], [output])
optimizer = "rmsprop"
loss = "mse"
loss_weights = [1.0, 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), "mae"],
loss_weights=loss_weights,
)
data = ({"a": np.arange(10), "b": np.arange(10)}, np.arange(10, 20))
model.fit(*data, epochs=1)
# TODO(kaftan) seems to throw an error when enabled.
@test_combinations.run_all_keras_modes
def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
col_a = tf.feature_column.numeric_column("a")
col_b = tf.feature_column.numeric_column("b")
col_c = tf.feature_column.numeric_column("c")
fc1 = df.DenseFeatures([col_a, col_b], name="fc1")
fc2 = df.DenseFeatures([col_b, col_c], name="fc2")
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for
# DenseFeatures the way Input is for InputLayer.
output = dense(fc1) + dense(fc2)
model = keras.models.Model([fc1, fc2], [output])
optimizer = "rmsprop"
loss = "mse"
loss_weights = [1.0, 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), "mae"],
loss_weights=loss_weights,
)
data_list = (
[
{"a": np.arange(10), "b": np.arange(10)},
{"b": np.arange(10), "c": np.arange(10)},
],
np.arange(10, 100),
)
model.fit(*data_list, epochs=1)
data_bloated_list = (
[
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)},
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)},
],
np.arange(10, 100),
)
model.fit(*data_bloated_list, epochs=1)
data_dict = (
{
"fc1": {"a": np.arange(10), "b": np.arange(10)},
"fc2": {"b": np.arange(10), "c": np.arange(10)},
},
np.arange(10, 100),
)
model.fit(*data_dict, epochs=1)
data_bloated_dict = (
{
"fc1": {
"a": np.arange(10),
"b": np.arange(10),
"c": np.arange(10),
},
"fc2": {
"a": np.arange(10),
"b": np.arange(10),
"c": np.arange(10),
},
},
np.arange(10, 100),
)
model.fit(*data_bloated_dict, epochs=1)
@test_combinations.run_all_keras_modes
def test_string_input(self):
x = {
"age": np.random.random((1024, 1)),
"cabin": np.array(["a"] * 1024),
}
y = np.random.randint(2, size=(1024, 1))
ds1 = tf.data.Dataset.from_tensor_slices(x)
ds2 = tf.data.Dataset.from_tensor_slices(y)
dataset = tf.data.Dataset.zip((ds1, ds2)).batch(4)
categorical_cols = [
tf.feature_column.categorical_column_with_hash_bucket("cabin", 10)
]
feature_cols = [tf.feature_column.numeric_column("age")] + [
tf.feature_column.indicator_column(cc) for cc in categorical_cols
]
layers = [
df.DenseFeatures(feature_cols),
keras.layers.Dense(128),
keras.layers.Dense(1),
]
model = keras.models.Sequential(layers)
model.compile(optimizer="sgd", loss=keras.losses.BinaryCrossentropy())
model.fit(dataset)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/feature_columns_integration_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/feature_columns_integration_test.py",
"repo_id": "tf-keras",
"token_count": 5747
} | 188 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Home of the `Sequential` model."""
import copy
import tensorflow.compat.v2 as tf
from tf_keras import layers as layer_module
from tf_keras.engine import base_layer
from tf_keras.engine import functional
from tf_keras.engine import input_layer
from tf_keras.engine import training
from tf_keras.engine import training_utils
from tf_keras.saving import serialization_lib
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.saving.legacy.saved_model import model_serialization
from tf_keras.utils import generic_utils
from tf_keras.utils import layer_utils
from tf_keras.utils import tf_inspect
from tf_keras.utils import tf_utils
from tf_keras.utils import traceback_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
SINGLE_LAYER_OUTPUT_ERROR_MSG = (
"All layers in a Sequential model should have "
"a single output tensor. For multi-output "
"layers, use the functional API."
)
@keras_export("keras.Sequential", "keras.models.Sequential")
class Sequential(functional.Functional):
"""`Sequential` groups a linear stack of layers into a `tf.keras.Model`.
`Sequential` provides training and inference features on this model.
Examples:
```python
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(16,)))
model.add(tf.keras.layers.Dense(8))
# Note that you can also omit the initial `Input`.
# In that case the model doesn't have any weights until the first call
# to a training/evaluation method (since it isn't yet built):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(4))
# model.weights not created yet
# Whereas if you specify an `Input`, the model gets built
# continuously as you are adding layers:
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(16,)))
model.add(tf.keras.layers.Dense(4))
len(model.weights)
# Returns "2"
# When using the delayed-build pattern (no input shape specified), you can
# choose to manually build your model by calling
# `build(batch_input_shape)`:
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(4))
model.build((None, 16))
len(model.weights)
# Returns "4"
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit`, `eval`, or `predict`,
# or the first time you call the model on some input data.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
@traceback_utils.filter_traceback
def __init__(self, layers=None, name=None):
"""Creates a `Sequential` model instance.
Args:
layers: Optional list of layers to add to the model.
name: Optional name for the model.
"""
# Skip the init in FunctionalModel since model doesn't have input/output
# yet
super(functional.Functional, self).__init__(name=name, autocast=False)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
self._auto_track_sub_layers = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._input_dtype = None
self._layer_call_argspecs = {}
self._created_nodes = set()
# Flag that indicate whether the sequential network topology has been
# created. It is false when there isn't any layer, or the layers don't
# have an input shape.
self._graph_initialized = False
# Unfortunately some Sequential models using custom layers or
# FeatureColumn layers have multiple inputs. This is fundamentally
# incompatible with most of the Sequential API, and we have to disable a
# number of features for such models.
self._use_legacy_deferred_behavior = False
# Add to the model any layers passed to the constructor.
if layers:
if not isinstance(layers, (list, tuple)):
layers = [layers]
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
# `Trackable` manages the `_layers` attributes and does filtering
# over it.
layers = super().layers
if layers and isinstance(layers[0], input_layer.InputLayer):
return layers[1:]
return layers[:]
@tf.__internal__.tracking.no_automatic_dependency_tracking
@traceback_utils.filter_traceback
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Args:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
# If we are passed a TF-Keras tensor created by keras.Input(), we can
# extract the input layer from its keras history and use that without
# any loss of
# generality.
if hasattr(layer, "_keras_history"):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, input_layer.InputLayer):
layer = origin_layer
if isinstance(layer, tf.Module):
if not isinstance(layer, base_layer.Layer):
layer = functional.ModuleWrapper(layer)
else:
raise TypeError(
"The added layer must be an instance of class Layer. "
f"Received: layer={layer} of type {type(layer)}."
)
tf_utils.assert_no_legacy_layers([layer])
if not self._is_layer_name_unique(layer):
raise ValueError(
"All layers added to a Sequential model "
f'should have unique names. Name "{layer.name}" is already '
"the name of a layer in this model. Update the `name` argument "
"to pass a unique name."
)
self.built = False
set_inputs = False
self._maybe_create_attribute("_self_tracked_trackables", [])
if not self._self_tracked_trackables:
if isinstance(layer, input_layer.InputLayer):
# Case where the user passes an Input or InputLayer layer via
# `add`.
set_inputs = True
else:
batch_shape, dtype = training_utils.get_input_shape_and_dtype(
layer
)
if batch_shape:
# Instantiate an input layer.
x = input_layer.Input(
batch_shape=batch_shape,
dtype=dtype,
name=layer.name + "_input",
)
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
if set_inputs:
outputs = tf.nest.flatten(layer._inbound_nodes[-1].outputs)
if len(outputs) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = outputs
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
self.built = True
self._has_explicit_input_shape = True
elif self.outputs:
# If the model is being built continuously on top of an input layer:
# refresh its output.
output_tensor = layer(self.outputs[0])
if len(tf.nest.flatten(output_tensor)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [output_tensor]
self.built = True
if set_inputs or self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
self._graph_initialized = True
else:
self._self_tracked_trackables.append(layer)
self._handle_deferred_layer_dependencies([layer])
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
@tf.__internal__.tracking.no_automatic_dependency_tracking
@traceback_utils.filter_traceback
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError("There are no layers in the model.")
layer = self._self_tracked_trackables.pop()
self._layer_call_argspecs.pop(layer)
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._graph_initialized = False
elif self._graph_initialized:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self._init_graph_network(self.inputs, self.outputs)
self.built = True
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _build_graph_network_for_inferred_shape(
self, input_shape, input_dtype=None
):
if input_shape is None or not self.layers:
return
if (
not tf.__internal__.tf2.enabled()
or not tf.compat.v1.executing_eagerly_outside_functions()
):
# This behavior is disabled in V1 or when eager execution is
# disabled.
return
if (
not self._has_explicit_input_shape
and not self._use_legacy_deferred_behavior
):
# Determine whether the input shape is novel, i.e. whether the model
# should be rebuilt.
input_shape = tuple(input_shape)
if self._inferred_input_shape is None:
new_shape = input_shape
else:
new_shape = relax_input_shape(
self._inferred_input_shape, input_shape
)
if (
new_shape is not None
and new_shape != self._inferred_input_shape
):
# A novel shape has been received: we need to rebuild the model.
# In case we are inside a graph function, we step out of it.
with tf.init_scope():
inputs = input_layer.Input(
batch_shape=new_shape,
dtype=input_dtype,
name=self.layers[0].name + "_input",
)
layer_input = inputs
created_nodes = set()
for layer in self.layers:
# Clear nodes previously created via this method. This
# prevents node accumulation and ensures that e.g.
# `layer.output` is always connected to `model.inputs`
# (this is important e.g. for the feature extraction use
# case). We don't just do `layer._inbound_nodes = []`
# in order not to break shared layers added to
# Sequential models (which is technically illegal as per
# the `add()` docstring, but wasn't previously
# disabled).
clear_previously_created_nodes(
layer, self._created_nodes
)
try:
# Create Functional API connection by calling the
# current layer
layer_output = layer(layer_input)
except: # noqa: E722
# Functional API calls may fail for a number of
# reasons: 1) The layer may be buggy. In this case
# it will be easier for the user to debug if we fail
# on the first call on concrete data, instead of our
# own call on a symbolic input. 2) The layer is
# dynamic (graph-incompatible) and hasn't overridden
# `compute_output_shape`. In this case, it is
# impossible to build a graph network. 3) The layer
# is otherwise incompatible with the Functional API
# (e.g. this is the case for some probabilistic
# layers that rely on hacks and that do not return
# tensors). In all these cases, we should avoid
# creating a graph network (or we simply can't).
self._use_legacy_deferred_behavior = True
return
if len(tf.nest.flatten(layer_output)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# Keep track of nodes just created above
track_nodes_created_by_last_call(layer, created_nodes)
layer_input = layer_output
outputs = layer_output
self._created_nodes = created_nodes
try:
# Initialize a graph Network. This call will never fail
# for stack of valid TF-Keras layers. However some users
# have layers that are fundamentally incompatible with
# the Functional API, which do not return tensors. In
# this case, we fall back to the legacy deferred
# behavior.
# TODO(fchollet): consider raising here, as we should
# not be supporting such layers.
self._init_graph_network(inputs, outputs)
self._graph_initialized = True
except: # noqa: E722
self._use_legacy_deferred_behavior = True
self._inferred_input_shape = new_shape
@generic_utils.default
def build(self, input_shape=None):
if self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
else:
if input_shape is None:
raise ValueError("You must provide an `input_shape` argument.")
self._build_graph_network_for_inferred_shape(input_shape)
if not self.built:
input_shape = tuple(input_shape)
self._build_input_shape = input_shape
super().build(input_shape)
self.built = True
def call(self, inputs, training=None, mask=None):
# If applicable, update the static input shape of the model.
if not self._has_explicit_input_shape:
if not tf.is_tensor(inputs) and not isinstance(inputs, tf.Tensor):
# This is a Sequential with multiple inputs. This is technically
# an invalid use case of Sequential, but we tolerate it for
# backwards compatibility.
self._use_legacy_deferred_behavior = True
self._build_input_shape = tf.nest.map_structure(
_get_shape_tuple, inputs
)
else:
self._build_graph_network_for_inferred_shape(
inputs.shape, inputs.dtype
)
if self._graph_initialized:
if not self.built:
self._init_graph_network(self.inputs, self.outputs)
return super().call(inputs, training=training, mask=mask)
outputs = inputs # handle the corner case where self.layers is empty
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and
# `outputs` are the outputs of `layer` applied to `inputs`. At the
# end of each iteration `inputs` is set to `outputs` to prepare for
# the next layer.
kwargs = {}
argspec = self._layer_call_argspecs[layer].args
if "mask" in argspec:
kwargs["mask"] = mask
if "training" in argspec:
kwargs["training"] = training
outputs = layer(inputs, **kwargs)
inputs = outputs
def _get_mask_from_keras_tensor(kt):
return getattr(kt, "_keras_mask", None)
mask = tf.nest.map_structure(_get_mask_from_keras_tensor, outputs)
return outputs
def compute_output_shape(self, input_shape):
shape = input_shape
for layer in self.layers:
shape = layer.compute_output_shape(shape)
return shape
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
outputs = self.call(inputs, mask=mask)
return getattr(outputs, "_keras_mask", None)
def get_config(self):
layer_configs = []
serialize_obj_fn = serialization_lib.serialize_keras_object
if getattr(self, "use_legacy_config", None):
serialize_obj_fn = legacy_serialization.serialize_keras_object
for layer in super().layers:
# `super().layers` include the InputLayer if available (it is
# filtered out of `self.layers`). Note that
# `self._self_tracked_trackables` is managed by the tracking
# infrastructure and should not be used.
layer_configs.append(serialize_obj_fn(layer))
config = training.Model.get_config(self)
config["name"] = self.name
config["layers"] = copy.deepcopy(layer_configs)
if not self._is_graph_network and self._build_input_shape is not None:
config["build_input_shape"] = self._build_input_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if "name" in config:
name = config["name"]
build_input_shape = config.get("build_input_shape")
layer_configs = config["layers"]
else:
name = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
use_legacy_format = "module" not in layer_config
layer = layer_module.deserialize(
layer_config,
custom_objects=custom_objects,
use_legacy_format=use_legacy_format,
)
model.add(layer)
if (
not model.inputs
and build_input_shape
and isinstance(build_input_shape, (tuple, list))
):
model.build(build_input_shape)
return model
@property
def input_spec(self):
if hasattr(self, "_manual_input_spec"):
return self._manual_input_spec
if self._has_explicit_input_shape:
return super().input_spec
return None
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
@property
def _trackable_saved_model_saver(self):
return model_serialization.SequentialSavedModelSaver(self)
def _is_layer_name_unique(self, layer):
for ref_layer in self.layers:
if layer.name == ref_layer.name and ref_layer is not layer:
return False
return True
def _assert_weights_created(self):
if self._graph_initialized:
return
# When the graph has not been initialized, use the Model's
# implementation to to check if the weights has been created.
super(functional.Functional, self)._assert_weights_created()
def _get_shape_tuple(t):
if hasattr(t, "shape"):
shape = t.shape
if isinstance(shape, tuple):
return shape
if shape.rank is not None:
return tuple(shape.as_list())
return None
return None
def relax_input_shape(shape_1, shape_2):
if shape_1 is None or shape_2 is None:
return None
if len(shape_1) != len(shape_2):
return None
return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2))
def clear_previously_created_nodes(layer, created_nodes):
"""Remove nodes from `created_nodes` from the layer's inbound_nodes."""
for node in layer._inbound_nodes:
prev_layers = node.inbound_layers
for prev_layer in tf.nest.flatten(prev_layers):
prev_layer._outbound_nodes = [
n for n in prev_layer._outbound_nodes if n not in created_nodes
]
layer._inbound_nodes = [
n for n in layer._inbound_nodes if n not in created_nodes
]
def track_nodes_created_by_last_call(layer, created_nodes):
"""Adds to `created_nodes` the nodes created by the last call to `layer`."""
if not layer._inbound_nodes:
return
created_nodes.add(layer._inbound_nodes[-1])
prev_layers = layer._inbound_nodes[-1].inbound_layers
for prev_layer in tf.nest.flatten(prev_layers):
if prev_layer._outbound_nodes:
created_nodes.add(prev_layer._outbound_nodes[-1])
| tf-keras/tf_keras/engine/sequential.py/0 | {
"file_path": "tf-keras/tf_keras/engine/sequential.py",
"repo_id": "tf-keras",
"token_count": 10446
} | 189 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training utility functions."""
import functools
import multiprocessing.pool
import time
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import backend
from tf_keras.engine import keras_tensor
from tf_keras.engine import training_utils_v1
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.platform import tf_logging as logging
class ModelInputsTest(tf.test.TestCase):
def test_single_thing(self):
a = np.ones(10)
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(["input_1"], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf.is_tensor(vals))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tf.is_tensor(vals[0]))
self.assertEqual(backend.floatx(), vals[0].dtype)
def test_single_thing_eager(self):
if not tf.executing_eagerly():
self.skipTest("Run in eager mode only.")
a = np.ones(10, dtype=np.int32)
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(["input_1"], model_inputs.get_input_names())
val = model_inputs.get_symbolic_inputs()
self.assertIsInstance(val, keras_tensor.KerasTensor)
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
self.assertEqual(tf.int32, vals[0].dtype)
def test_list(self):
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(["input_1", "input_2"], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf.is_tensor(vals[0]))
self.assertTrue(tf.is_tensor(vals[1]))
def test_list_eager(self):
if not tf.executing_eagerly():
self.skipTest("Run in eager mode only.")
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(["input_1", "input_2"], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
self.assertIsInstance(vals[1], keras_tensor.KerasTensor)
def test_dict(self):
a = {"b": np.ones(10), "a": np.ones(20)}
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(["a", "b"], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf.is_tensor(vals["a"]))
self.assertTrue(tf.is_tensor(vals["b"]))
def test_dict_eager(self):
if not tf.executing_eagerly():
self.skipTest("Run in eager mode only.")
a = {"b": np.ones(10), "a": np.ones(20)}
model_inputs = training_utils_v1.ModelInputs(a)
self.assertEqual(["a", "b"], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertIsInstance(vals["a"], keras_tensor.KerasTensor)
self.assertIsInstance(vals["b"], keras_tensor.KerasTensor)
class DatasetUtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("Batch", lambda: tf.data.Dataset.range(5).batch(2)),
("Cache", lambda: tf.data.Dataset.range(5).cache()),
(
"Concatenate",
lambda: tf.data.Dataset.range(5).concatenate(
tf.data.Dataset.range(5)
),
),
(
"FlatMap",
lambda: tf.data.Dataset.range(5).flat_map(
lambda _: tf.data.Dataset.from_tensors(0)
),
),
(
"FlatMap_Shuffle",
lambda: tf.data.Dataset.range(5).flat_map(
lambda _: tf.data.Dataset.from_tensors(0).shuffle(1)
),
True,
),
("Filter", lambda: tf.data.Dataset.range(5).filter(lambda _: True)),
(
"FixedLengthRecordDatasetV2",
lambda: tf.data.FixedLengthRecordDataset([], 42),
),
("FromTensors", lambda: tf.data.Dataset.from_tensors(0)),
(
"FromTensorSlices",
lambda: tf.data.Dataset.from_tensor_slices([0, 0, 0]),
),
(
"Interleave",
lambda: tf.data.Dataset.range(5).interleave(
lambda _: tf.data.Dataset.from_tensors(0), cycle_length=1
),
),
(
"Interleave_Shuffle",
lambda: tf.data.Dataset.range(5).interleave(
lambda _: tf.data.Dataset.from_tensors(0).shuffle(1),
cycle_length=1,
),
True,
),
("Map", lambda: tf.data.Dataset.range(5).map(lambda x: x)),
(
"Options",
lambda: tf.data.Dataset.range(5).with_options(tf.data.Options()),
),
("PaddedBatch", lambda: tf.data.Dataset.range(5).padded_batch(2, [])),
(
"ParallelInterleave",
lambda: tf.data.Dataset.range(5).interleave(
lambda _: tf.data.Dataset.from_tensors(0),
cycle_length=1,
num_parallel_calls=1,
),
),
(
"ParallelMap",
lambda: tf.data.Dataset.range(5).map(
lambda x: x, num_parallel_calls=1
),
),
("Prefetch", lambda: tf.data.Dataset.range(5).prefetch(1)),
("Range", lambda: tf.data.Dataset.range(0)),
("Repeat", lambda: tf.data.Dataset.range(0).repeat(0)),
("Shuffle", lambda: tf.data.Dataset.range(5).shuffle(1), True),
("Skip", lambda: tf.data.Dataset.range(5).skip(2)),
("Take", lambda: tf.data.Dataset.range(5).take(2)),
("TextLineDataset", lambda: tf.data.TextLineDataset([])),
("TFRecordDataset", lambda: tf.data.TFRecordDataset([])),
("Window", lambda: tf.data.Dataset.range(5).window(2)),
("Zip", lambda: tf.data.Dataset.zip(tf.data.Dataset.range(5))),
)
def test_verify_dataset_shuffled(self, dataset_fn, expect_shuffled=False):
dataset = dataset_fn()
if not expect_shuffled:
with tf.compat.v1.test.mock.patch.object(
logging, "warning"
) as mock_log:
shuffled = training_utils_v1.verify_dataset_shuffled(dataset)
self.assertRegex(
str(mock_log.call_args),
"input dataset `x` is not shuffled.",
)
self.assertFalse(shuffled)
else:
self.assertTrue(training_utils_v1.verify_dataset_shuffled(dataset))
class StandardizeWeightsTest(test_combinations.TestCase):
def test_sample_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1.0, 1.0, 0.0, 2.0])
weights = training_utils_v1.standardize_weights(y, sample_weights)
self.assertAllClose(weights, sample_weights)
def test_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
class_weights = {0: 0.5, 1: 1.0, 2: 1.5}
weights = training_utils_v1.standardize_weights(
y, class_weight=class_weights
)
self.assertAllClose(weights, np.array([0.5, 1.0, 0.5, 0.5, 1.5]))
def test_sample_weights_and_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1.0, 1.0, 0.0, 2.0])
class_weights = {0: 0.5, 1: 1.0, 2: 1.5}
weights = training_utils_v1.standardize_weights(
y, sample_weights, class_weights
)
expected = sample_weights * np.array([0.5, 1.0, 0.5, 0.5, 1.5])
self.assertAllClose(weights, expected)
def test_dataset_with_class_weight(self):
model = test_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile("rmsprop", "mse")
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
class_weight_np = np.array([0.25, 0.25, 0.25, 0.25])
class_weight = dict(enumerate(class_weight_np))
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=1,
class_weight=class_weight,
)
class MonitoredPool(multiprocessing.pool.ThreadPool):
def __init__(self, *args, **kwargs):
self._apply_counter = 0
self._func_wrapper = None
super().__init__(*args, **kwargs)
def apply_async(self, func, *args, **kwargs):
self._apply_counter += 1
if self._func_wrapper:
func = self._func_wrapper(func)
return super().apply_async(func, *args, **kwargs)
def add_sleep(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
time.sleep(1.0)
return f(*args, **kwargs)
return wrapped
def cause_error(f):
@functools.wraps(f)
def wrapped(batch_element, batch_start, batch_end, is_finished):
# Induce a TypeError during assignment.
return f(None, None, None, is_finished)
return wrapped
_TEST_DATA = np.array(
(
(3, 1, 3, 1, 2, 0, 3, 3, 1, 2),
(0, 1, 2, 1, 3, 0, 0, 1, 3, 0),
(3, 2, 1, 1, 1, 1, 1, 3, 2, 3),
(2, 2, 0, 1, 0, 3, 3, 2, 1, 1),
(3, 0, 3, 3, 3, 2, 1, 0, 0, 1),
(1, 0, 3, 3, 3, 2, 1, 2, 3, 1),
)
)
class AggregationTest(test_combinations.TestCase):
def setUp(self):
super().setUp()
self._old_pool = training_utils_v1._COPY_POOL
self._old_threshold = (
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD
)
self._old_timeout = training_utils_v1.SliceAggregator._MAX_COPY_SECONDS
training_utils_v1._COPY_POOL = MonitoredPool(
training_utils_v1._COPY_THREADS
)
def tearDown(self):
super().tearDown()
training_utils_v1._COPY_POOL = self._old_pool
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = (
self._old_threshold
)
training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = self._old_timeout
def _run_with_steps(self):
aggregator = training_utils_v1.OutputsAggregator(use_steps=True)
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
aggregator.aggregate(batch)
assert len(aggregator.results) == 1
assert isinstance(
aggregator.results[0], training_utils_v1.ConcatAggregator
)
aggregator.finalize()
return aggregator.results
def _run_without_steps(self):
aggregator = training_utils_v1.OutputsAggregator(
use_steps=False, num_samples=6
)
batch_start = 0
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch.shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 1
assert isinstance(
aggregator.results[0], training_utils_v1.SliceAggregator
)
aggregator.finalize()
return aggregator.results
def test_with_steps(self):
self.assertAllEqual(self._run_with_steps(), _TEST_DATA)
def test_without_steps(self):
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
def test_nested_aggregation(self):
aggregator = training_utils_v1.OutputsAggregator(
use_steps=False, num_samples=6
)
batches = np.array_split(_TEST_DATA, 4)
batch_start = 0
for i, batch in enumerate(zip(batches, batches)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch[0].shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 2
aggregator.finalize()
self.assertAllEqual(aggregator.results, (_TEST_DATA, _TEST_DATA))
def test_concat_single_batch(self):
aggregator = training_utils_v1.OutputsAggregator(use_steps=True)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(
aggregator.results[0], training_utils_v1.ConcatAggregator
)
aggregator.aggregate(data)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_slice_single_batch(self):
aggregator = training_utils_v1.OutputsAggregator(
use_steps=False, num_samples=6
)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(
aggregator.results[0], training_utils_v1.SliceAggregator
)
aggregator.aggregate(data, 0, 6)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_async_copy(self):
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
# Two of the four batches will have 20 elements and two will have 10.
self.assertEqual(training_utils_v1._COPY_POOL._apply_counter, 2)
def test_async_copy_timeout(self):
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = 0.1
training_utils_v1._COPY_POOL._func_wrapper = add_sleep
with self.assertRaisesRegex(ValueError, "Timed out waiting for copy"):
self._run_without_steps()
def test_async_copy_reraise(self):
training_utils_v1.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils_v1.SliceAggregator._MAX_COPY_SECONDS = 1.0
training_utils_v1._COPY_POOL._func_wrapper = cause_error
with self.assertRaisesRegex(TypeError, "NoneType"):
self._run_without_steps()
class CompositeTensorTestUtils(test_combinations.TestCase):
def test_is_composite(self):
# Validate that all composite tensor and value types return true.
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
tf.SparseTensor([[0, 0]], [1], [1, 1])
)
)
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
tf.compat.v1.SparseTensorValue([[0, 0]], [1], [1, 1])
)
)
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
tf.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)
)
)
)
self.assertTrue(
training_utils_v1.is_composite_or_composite_value(
tf.compat.v1.ragged.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)
)
)
)
# Test that numpy arrays and tensors return false.
self.assertFalse(
training_utils_v1.is_composite_or_composite_value(
np.ndarray([0, 1])
)
)
self.assertFalse(
training_utils_v1.is_composite_or_composite_value(
tf.convert_to_tensor([3, 1])
)
)
def test_sparse_concatenation(self):
tensor_1 = tf.SparseTensor([[0, 0]], [1], [1, 1])
tensor_2 = tf.SparseTensor([[0, 0]], [2], [1, 1])
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2
)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(evaluated_tensor.values, [1, 2])
self.assertAllEqual(evaluated_tensor.dense_shape, [2, 1])
def test_sparse_value_concatenation(self):
tensor_1 = tf.compat.v1.SparseTensorValue([[0, 0]], [1], [1, 1])
tensor_2 = tf.compat.v1.SparseTensorValue([[0, 0]], [2], [1, 1])
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2
)
self.assertAllEqual(concatenated_tensor.indices, [[0, 0], [1, 0]])
self.assertAllEqual(concatenated_tensor.values, [1, 2])
self.assertAllEqual(concatenated_tensor.dense_shape, [2, 1])
def test_ragged_concatenation(self):
tensor_1 = tf.RaggedTensor.from_row_splits(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)
)
tensor_2 = tf.RaggedTensor.from_row_splits(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64)
)
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2
)
evaluated_tensor = self.evaluate(concatenated_tensor)
self.assertAllEqual(evaluated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(evaluated_tensor.row_splits, [0, 1, 3, 5, 6])
def test_ragged_value_concatenation(self):
tensor_1 = tf.compat.v1.ragged.RaggedTensorValue(
np.array([0, 1, 2]), np.array([0, 1, 3], dtype=np.int64)
)
tensor_2 = tf.compat.v1.ragged.RaggedTensorValue(
np.array([3, 4, 5]), np.array([0, 2, 3], dtype=np.int64)
)
concatenated_tensor = training_utils_v1._append_composite_tensor(
tensor_1, tensor_2
)
self.assertAllEqual(concatenated_tensor.values, [0, 1, 2, 3, 4, 5])
self.assertAllEqual(concatenated_tensor.row_splits, [0, 1, 3, 5, 6])
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/training_utils_v1_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/training_utils_v1_test.py",
"repo_id": "tf-keras",
"token_count": 9273
} | 190 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.feature_column import dense_features
from tf_keras.feature_column import sequence_feature_column as ksfc
from tf_keras.layers import merging
from tf_keras.layers.rnn import base_rnn
from tf_keras.layers.rnn import simple_rnn
# isort: off
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
class SequenceFeatureColumnIntegrationTest(tf.test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature["int_ctx"].int64_list.value.extend([5])
example.context.feature["float_ctx"].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list["int_list"].feature.extend(
[feat]
)
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([tf.compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list["str_list"].feature.extend(
[feat]
)
return example
def _build_feature_columns(self):
col = tf.feature_column.categorical_column_with_identity(
"int_ctx", num_buckets=100
)
ctx_cols = [
tf.feature_column.embedding_column(col, dimension=10),
tf.feature_column.numeric_column("float_ctx"),
]
identity_col = (
tf.feature_column.sequence_categorical_column_with_identity(
"int_list", num_buckets=10
)
)
bucket_col = (
tf.feature_column.sequence_categorical_column_with_hash_bucket(
"bytes_list", hash_bucket_size=100
)
)
seq_cols = [
tf.feature_column.embedding_column(identity_col, dimension=10),
tf.feature_column.embedding_column(bucket_col, dimension=20),
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = tf.io.parse_single_sequence_example(
example,
context_features=tf.feature_column.make_parse_example_spec(
ctx_cols
),
sequence_features=tf.feature_column.make_parse_example_spec(
seq_cols
),
)
ctx.update(seq)
return ctx
ds = tf.data.Dataset.from_tensor_slices(examples)
ds = ds.map(_parse_example)
ds = ds.batch(20)
# Test on a single batch
features = tf.compat.v1.data.make_one_shot_iterator(ds).get_next()
# Tile the context features across the sequence features
sequence_input_layer = ksfc.SequenceFeatures(seq_cols)
seq_input, _ = sequence_input_layer(features)
dense_input_layer = dense_features.DenseFeatures(ctx_cols)
ctx_input = dense_input_layer(features)
ctx_input = backend.repeat(ctx_input, tf.shape(seq_input)[1])
concatenated_input = merging.concatenate([seq_input, ctx_input])
rnn_layer = base_rnn.RNN(simple_rnn.SimpleRNNCell(10))
output = rnn_layer(concatenated_input)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r["int_list"].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
@tf_test_utils.run_deprecated_v1
def test_shared_sequence_non_sequence_into_input_layer(self):
non_seq = tf.feature_column.categorical_column_with_identity(
"non_seq", num_buckets=10
)
seq = tf.feature_column.sequence_categorical_column_with_identity(
"seq", num_buckets=10
)
shared_non_seq, shared_seq = tf.feature_column.shared_embeddings(
[non_seq, seq],
dimension=4,
combiner="sum",
initializer=tf.ones_initializer(),
shared_embedding_collection_name="shared",
)
seq = tf.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2],
)
non_seq = tf.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=[0, 1, 2],
dense_shape=[2, 2],
)
features = {"seq": seq, "non_seq": non_seq}
# Tile the context features across the sequence features
seq_input, seq_length = ksfc.SequenceFeatures([shared_seq])(features)
non_seq_input = dense_features.DenseFeatures([shared_non_seq])(features)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_seq, output_seq_length, output_non_seq = sess.run(
[seq_input, seq_length, non_seq_input]
)
self.assertAllEqual(
output_seq,
[[[1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [0, 0, 0, 0]]],
)
self.assertAllEqual(output_seq_length, [2, 1])
self.assertAllEqual(output_non_seq, [[2, 2, 2, 2], [1, 1, 1, 1]])
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/feature_column/sequence_feature_column_integration_test.py/0 | {
"file_path": "tf-keras/tf_keras/feature_column/sequence_feature_column_integration_test.py",
"repo_id": "tf-keras",
"token_count": 4009
} | 191 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
def _jvp(f, primals, tangents):
"""Compute the jacobian of `f` at `primals` multiplied by `tangents`."""
with tf.autodiff.ForwardAccumulator(primals, tangents) as acc:
primals_out = f(*primals)
return primals_out, acc.jvp(
primals_out, unconnected_gradients=tf.UnconnectedGradients.ZERO
)
def _jacfwd(f, primals):
"""Compute the jacobian of `f` at `primals` using forward-mode autodiff."""
jac_flat = []
flat_primals = tf.nest.flatten(primals)
tangent_mask = [tf.zeros_like(primal) for primal in flat_primals]
for primal_index, primal in enumerate(flat_primals):
primal_vector = tf.reshape(primal, [-1])
primal_vector_length = tf.size(primal_vector)
jac_columns = []
for element_index in tf.range(primal_vector_length):
mask = tf.one_hot(element_index, primal_vector_length)
tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal))
jac_columns.append(
tf.nest.map_structure(
functools.partial(tf.reshape, shape=[-1]),
_jvp(
f,
primals,
tf.nest.pack_sequence_as(primals, tangent_mask),
)[1],
)
)
jac_flat.append(tf.stack(jac_columns, axis=1))
tangent_mask[primal_index] = tf.zeros_like(primal)
return tf.nest.pack_sequence_as(primals, jac_flat)
def _grad(f, argnums=0):
"""Return a function which computes the gradient of `f`."""
def _f(*params):
with tf.GradientTape() as tape:
tape.watch(params)
primals_out = f(*params)
return tape.gradient(
primals_out,
params[argnums],
unconnected_gradients=tf.UnconnectedGradients.ZERO,
)
return _f
def _hvp(f, primals, tangents):
"""Compute a forward-over-back Hessian-vector product."""
with tf.autodiff.ForwardAccumulator(primals, tangents) as acc:
with tf.GradientTape() as tape:
tape.watch(primals)
f_out = f(*primals)
f_out.shape.assert_is_compatible_with([])
return acc.jvp(tape.gradient(f_out, primals))
def _vectorize_parameters(f, params, use_pfor, dtype):
"""Loop over `params`, providing a one-hot mask to `f` for each."""
parameter_sizes = [tf.size(param) for param in params]
total_size = tf.math.add_n(parameter_sizes)
def _wrapper(index):
full_onehot = tf.one_hot(index, total_size)
split_onehot = tf.split(full_onehot, parameter_sizes)
tangents = [
tf.reshape(v, tf.shape(param))
for param, v in zip(params, split_onehot)
]
return f(tangents)
if use_pfor:
return tf.vectorized_map(_wrapper, tf.range(total_size))
else:
return tf.map_fn(_wrapper, tf.range(total_size), dtype)
def _forward_over_back_hessian(f, params, use_pfor, dtype=None):
"""Computes the full Hessian matrix for the scalar-valued f(*params).
Args:
f: A function taking `params` and returning a scalar.
params: A possibly nested structure of tensors.
use_pfor: If true, uses `tf.vectorized_map` calls instead of looping.
dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes
(e.g. `tf.float32`) matching the structure of `f`'s returns.
Returns:
A possibly nested structure of matrix slices corresponding to `params`.
Each slice has shape [P, p_s] where `p_s` is the number of parameters
(`tf.size`) in the corresponding element of `params` and `P` is the total
number of parameters (`sum_s(p_s)`). The full matrix can be obtained by
concatenating along the second axis.
"""
return _vectorize_parameters(
functools.partial(_hvp, f, params),
params,
use_pfor=use_pfor,
dtype=dtype,
)
def _test_gradients(
testcase, f, primals, order, delta=1e-3, rtol=1e-2, atol=1e-6
):
"""Tests forward/backward jacobians of `f`'s [0, `order`)-order
gradients."""
if order < 1:
raise ValueError(
f"`order` should be a positive integer, got '{order}'."
)
if order > 1:
_test_gradients(
testcase=testcase,
f=_grad(f),
primals=primals,
order=order - 1,
delta=delta,
rtol=rtol,
atol=atol,
)
sym_jac_back, num_jac = tf.test.compute_gradient(f, primals, delta=delta)
testcase.assertAllClose(num_jac, sym_jac_back, rtol=rtol, atol=atol)
sym_jac_fwd = _jacfwd(f, primals)
testcase.assertAllClose(num_jac, sym_jac_fwd, rtol=rtol, atol=atol)
# And the symbolic computations should be much closer.
testcase.assertAllClose(sym_jac_back, sym_jac_fwd)
class ForwardpropTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
("Dense", [[0.1]], functools.partial(keras.layers.Dense, 5)),
(
"Conv2D",
np.reshape(
np.arange(start=-1.0, stop=1.0, step=2.0 / (1 * 2 * 4 * 4)),
[1, 2, 4, 4],
),
functools.partial(keras.layers.Conv2D, 2, 2),
1e-3,
),
]
)
def testKerasLayers(self, value, op_fn, atol=1e-6):
layer = op_fn()
input_value = tf.constant(value, dtype=tf.float32)
layer.build(input_value.shape)
# Make sure the test is deterministic by avoiding random variable
# initialization.
for v in layer.trainable_variables:
v.assign(
tf.reshape(
tf.range(
-1.0,
1.0,
2.0 / tf.size(v, out_type=tf.float32),
dtype=tf.float32,
),
v.shape,
)
)
_test_gradients(
self,
layer,
[input_value],
atol=atol,
# These are linear, so second-order is pretty boring.
order=2,
)
@parameterized.named_parameters(
[
(
"NonFused",
[[0.1], [0.2], [-0.3]],
functools.partial(keras.layers.BatchNormalization, fused=False),
),
(
"Fused",
[[[[0.1, 2.0]]], [[[0.2, -3.0]]], [[[-0.3, 4.0]]]],
functools.partial(keras.layers.BatchNormalization, fused=True),
),
]
)
def testBatchNorm(self, value, op_fn):
for training in [True, False]:
layer = op_fn()
input_value = tf.constant(value, dtype=tf.float32)
layer.build(input_value.shape)
_test_gradients(
self,
functools.partial(layer, training=training),
[input_value],
order=2,
atol=1e-3,
)
@parameterized.named_parameters(
[
(
"NonFused",
[[0.1], [0.2], [-0.3]],
functools.partial(keras.layers.BatchNormalization, fused=False),
),
(
"Fused",
[[[[0.1, 2.0]]], [[[0.2, -3.0]]], [[[-0.3, 4.0]]]],
functools.partial(keras.layers.BatchNormalization, fused=True),
),
]
)
def testBatchNormLayerParamGrads(self, value, op_fn):
for training in [True, False]:
layer = op_fn()
with tf.GradientTape() as tape:
input_value = tf.constant(value, dtype=tf.float32)
tape.watch(input_value)
output = layer(input_value, training=training)
jac_back = tape.jacobian(
output, [input_value] + layer.trainable_variables
)
jac_forward = _jacfwd(
lambda *args: layer(args[0], training=training),
[input_value] + layer.trainable_variables,
)
for backward, forward in zip(jac_back, jac_forward):
forward = tf.reshape(forward, tf.shape(backward))
self.assertAllClose(backward, forward)
@parameterized.named_parameters(
[("Function", tf.function), ("NoFunction", lambda f: f)]
)
def testVariablesHVP(self, decorator):
class _Model(tf.Module):
def __init__(self):
self._first_dense = keras.layers.Dense(18)
self._conv = keras.layers.Conv2D(2, 2)
self._norm = keras.layers.BatchNormalization()
self._second_dense = keras.layers.Dense(1)
def __call__(self, x):
x = self._first_dense(x)
x = tf.nn.relu(x)
x = self._norm(x)
x = tf.nn.relu(self._conv(tf.reshape(x, [-1, 2, 3, 3])))
return self._second_dense(x)
model = _Model()
def _loss():
input_value = tf.constant([[-0.5, 1.0], [0.5, -1.0]])
target = tf.constant([[-1.0], [2.0]])
return tf.math.reduce_sum((model(input_value) - target) ** 2.0)
@decorator
def _compute_hvps():
with tf.GradientTape() as tape:
loss = _loss()
vector = tape.gradient(loss, model.trainable_variables)
variable_input_fn = lambda unused_variables: _loss()
(forward_over_back_hvp,) = _hvp(
variable_input_fn, [model.trainable_variables], [vector]
)
with tf.GradientTape(persistent=True) as tape:
tape.watch(model.trainable_variables)
loss = _loss()
first_grads = tape.gradient(loss, model.trainable_variables)
back_over_back_hvp = tape.gradient(
first_grads, model.trainable_variables, output_gradients=vector
)
return forward_over_back_hvp, back_over_back_hvp
self.assertAllClose(*_compute_hvps(), rtol=1e-5, atol=1e-5)
def testEmbeddingLayerInFunction(self):
class M(keras.Model):
def __init__(self):
super().__init__()
self.embed = keras.layers.Embedding(5, 1)
self.proj = keras.layers.Dense(1)
@tf.function
def call(self, x):
return self.proj(self.embed(x))
model = M()
model(tf.zeros([3, 3], dtype=tf.int32))
parameters = model.embed.variables
tangents = [tf.ones_like(v) for v in parameters]
with tf.autodiff.ForwardAccumulator(parameters, tangents):
# Note that forwardprop runs alongside the original computation.
# This test is just checking that it doesn't crash; correctness is
# tested in core TF.
model(tf.zeros([3, 3], dtype=tf.int32))
class HessianTests(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters([("PFor", True), ("MapFn", False)])
def testHessianOfVariables(self, use_pfor):
model = keras.layers.Dense(1)
model.build([None, 2])
def _loss(*unused_args):
input_value = tf.constant([[-0.5, 1.0], [0.5, -1.0]])
target = tf.constant([[-1.0], [2.0]])
return tf.math.reduce_sum((model(input_value) - target) ** 2.0)
kernel_hess, bias_hess = _forward_over_back_hessian(
_loss,
[model.kernel, model.bias],
use_pfor=use_pfor,
dtype=[tf.float32, tf.float32],
)
# 3 total parameters, the whole hessian is the 3x3 concatenation
self.assertEqual([3, 2, 1], kernel_hess.shape)
self.assertEqual([3, 1], bias_hess.shape)
full_hessian = tf.concat(
[tf.reshape(kernel_hess, [3, 2]), bias_hess], axis=1
)
# The full Hessian should be symmetric.
self.assertAllClose(full_hessian, tf.transpose(full_hessian))
if __name__ == "__main__":
if tf.__internal__.tf2.enabled():
tf.test.main()
| tf-keras/tf_keras/integration_test/forwardprop_test.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/forwardprop_test.py",
"repo_id": "tf-keras",
"token_count": 6538
} | 192 |
"""RetinaNet object detection model.
Adapted from https://keras.io/examples/vision/retinanet/
"""
import tensorflow as tf
from tensorflow import keras
from tf_keras.integration_test.models.input_spec import InputSpec
from tf_keras.saving import serialization_lib
NUM_CLASSES = 10
IMG_SIZE = (224, 224)
def get_data_spec(batch_size):
return (
InputSpec((batch_size,) + IMG_SIZE + (3,)),
InputSpec((batch_size, 9441, 5)),
)
def get_input_preprocessor():
return None
def get_backbone():
backbone = keras.applications.ResNet50(
include_top=False,
input_shape=[None, None, 3],
weights=None,
)
c3_output, c4_output, c5_output = [
backbone.get_layer(layer_name).output
for layer_name in [
"conv3_block4_out",
"conv4_block6_out",
"conv5_block3_out",
]
]
return keras.Model(
inputs=[backbone.inputs], outputs=[c3_output, c4_output, c5_output]
)
class FeaturePyramid(keras.layers.Layer):
def __init__(self, backbone=None, **kwargs):
super().__init__(name="FeaturePyramid", **kwargs)
self.backbone = backbone if backbone else get_backbone()
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.conv_c7_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, images, training=False):
c3_output, c4_output, c5_output = self.backbone(
images, training=training
)
p3_output = self.conv_c3_1x1(c3_output)
p4_output = self.conv_c4_1x1(c4_output)
p5_output = self.conv_c5_1x1(c5_output)
p4_output = p4_output + self.upsample_2x(p5_output)
p3_output = p3_output + self.upsample_2x(p4_output)
p3_output = self.conv_c3_3x3(p3_output)
p4_output = self.conv_c4_3x3(p4_output)
p5_output = self.conv_c5_3x3(p5_output)
p6_output = self.conv_c6_3x3(c5_output)
p7_output = self.conv_c7_3x3(tf.nn.relu(p6_output))
return p3_output, p4_output, p5_output, p6_output, p7_output
def build_head(output_filters, bias_init):
head = keras.Sequential([keras.Input(shape=[None, None, 256])])
kernel_init = tf.initializers.RandomNormal(0.0, 0.01)
for _ in range(4):
head.add(
keras.layers.Conv2D(
256, 3, padding="same", kernel_initializer=kernel_init
)
)
head.add(keras.layers.ReLU())
head.add(
keras.layers.Conv2D(
output_filters,
3,
1,
padding="same",
kernel_initializer=kernel_init,
bias_initializer=bias_init,
)
)
return head
class RetinaNet(keras.Model):
def __init__(self, num_classes, backbone=None, **kwargs):
super().__init__(name="RetinaNet", **kwargs)
self.fpn = FeaturePyramid(backbone)
self.num_classes = num_classes
prior_probability = keras.initializers.Constant(
-tf.math.log((1 - 0.01) / 0.01)
)
self.cls_head = build_head(9 * num_classes, prior_probability)
self.box_head = build_head(9 * 4, "zeros")
def call(self, image, training=False):
features = self.fpn(image, training=training)
N = tf.shape(image)[0]
cls_outputs = []
box_outputs = []
for feature in features:
box_outputs.append(tf.reshape(self.box_head(feature), [N, -1, 4]))
cls_outputs.append(
tf.reshape(self.cls_head(feature), [N, -1, self.num_classes])
)
cls_outputs = tf.concat(cls_outputs, axis=1)
box_outputs = tf.concat(box_outputs, axis=1)
return tf.concat([box_outputs, cls_outputs], axis=-1)
def get_config(self):
return {
"num_classes": self.num_classes,
"backbone": self.fpn.backbone,
}
@classmethod
def from_config(cls, config):
backbone = serialization_lib.deserialize_keras_object(
config.pop("backbone")
)
num_classes = config["num_classes"]
retinanet = cls(num_classes=num_classes, backbone=backbone)
retinanet(tf.zeros((1, 32, 32, 3))) # Build model
return retinanet
class RetinaNetBoxLoss(keras.losses.Loss):
def __init__(self, delta):
super().__init__(reduction="none", name="RetinaNetBoxLoss")
self._delta = delta
def call(self, y_true, y_pred):
difference = y_true - y_pred
absolute_difference = tf.abs(difference)
squared_difference = difference**2
loss = tf.where(
tf.less(absolute_difference, self._delta),
0.5 * squared_difference,
absolute_difference - 0.5,
)
return tf.reduce_sum(loss, axis=-1)
def get_config(self):
return {"delta": self._delta}
class RetinaNetClassificationLoss(keras.losses.Loss):
def __init__(self, alpha, gamma):
super().__init__(reduction="none", name="RetinaNetClassificationLoss")
self._alpha = alpha
self._gamma = gamma
def call(self, y_true, y_pred):
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y_pred
)
probs = tf.nn.sigmoid(y_pred)
alpha = tf.where(
tf.equal(y_true, 1.0), self._alpha, (1.0 - self._alpha)
)
pt = tf.where(tf.equal(y_true, 1.0), probs, 1 - probs)
loss = alpha * tf.pow(1.0 - pt, self._gamma) * cross_entropy
return tf.reduce_sum(loss, axis=-1)
def get_config(self):
return {"alpha": self._alpha, "gamma": self._gamma}
class RetinaNetLoss(keras.losses.Loss):
def __init__(self, num_classes=80, alpha=0.25, gamma=2.0, delta=1.0):
super().__init__(reduction="auto", name="RetinaNetLoss")
self._clf_loss = RetinaNetClassificationLoss(alpha, gamma)
self._box_loss = RetinaNetBoxLoss(delta)
self._num_classes = num_classes
self._alpha = alpha
self._gamma = gamma
self._delta = delta
def call(self, y_true, y_pred):
y_pred = tf.cast(y_pred, dtype=tf.float32)
box_labels = y_true[:, :, :4]
box_predictions = y_pred[:, :, :4]
cls_labels = tf.one_hot(
tf.cast(y_true[:, :, 4], dtype=tf.int32),
depth=self._num_classes,
dtype=tf.float32,
)
cls_predictions = y_pred[:, :, 4:]
positive_mask = tf.cast(
tf.greater(y_true[:, :, 4], -1.0), dtype=tf.float32
)
ignore_mask = tf.cast(tf.equal(y_true[:, :, 4], -2.0), dtype=tf.float32)
clf_loss = self._clf_loss(cls_labels, cls_predictions)
box_loss = self._box_loss(box_labels, box_predictions)
clf_loss = tf.where(tf.equal(ignore_mask, 1.0), 0.0, clf_loss)
box_loss = tf.where(tf.equal(positive_mask, 1.0), box_loss, 0.0)
normalizer = tf.reduce_sum(positive_mask, axis=-1)
clf_loss = tf.math.divide_no_nan(
tf.reduce_sum(clf_loss, axis=-1), normalizer
)
box_loss = tf.math.divide_no_nan(
tf.reduce_sum(box_loss, axis=-1), normalizer
)
loss = clf_loss + box_loss
return loss
def get_config(self):
return {
"num_classes": self._num_classes,
"alpha": self._alpha,
"gamma": self._gamma,
"delta": self._delta,
}
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
resnet50_backbone = get_backbone()
loss_fn = RetinaNetLoss(NUM_CLASSES)
model = RetinaNet(NUM_CLASSES, resnet50_backbone)
if compile:
learning_rates = [2.5e-06, 0.000625, 0.00125, 0.0025, 0.00025, 2.5e-05]
learning_rate_boundaries = [125, 250, 500, 240000, 360000]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=learning_rate_boundaries, values=learning_rates
)
optimizer = keras.optimizers.SGD(
learning_rate=learning_rate_fn, momentum=0.9
)
model.compile(
loss=loss_fn, optimizer=optimizer, jit_compile=jit_compile
)
return model
def get_custom_objects():
return {
"RetinaNetLoss": RetinaNetLoss,
"RetinaNetClassificationLoss": RetinaNetClassificationLoss,
"RetinaNetBoxLoss": RetinaNetBoxLoss,
"RetinaNet": RetinaNet,
"FeaturePyramid": FeaturePyramid,
}
| tf-keras/tf_keras/integration_test/models/retinanet.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/models/retinanet.py",
"repo_id": "tf-keras",
"token_count": 4437
} | 193 |
"""Test Model.fit with a PyMetric."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import Sequential
from tf_keras import layers
from tf_keras import losses
from tf_keras import metrics
from tf_keras.testing_infra import test_combinations
def get_dataset(num_batches=5, batch_size=2):
x = tf.random.uniform((num_batches * batch_size, 100))
y = tf.random.uniform((num_batches * batch_size, 2))
dataset = (
tf.data.Dataset.from_tensor_slices((x, y))
.prefetch(batch_size * 2)
.batch(batch_size)
)
return dataset
class CountingPyMetric(metrics.PyMetric):
"""A test-only PyMetric which simply counts how many results it's seen."""
def update_state(self, y_true, y_pred, sample_weight=None):
self.y_pred.append(y_pred)
def reset_state(self):
self.y_pred = []
def result(self):
return len(self.y_pred)
class PyMetricTest(test_combinations.TestCase):
@parameterized.named_parameters(("eager", True), ("graph", False))
def test_fit(self, run_eagerly):
num_batches = 5
dataset = get_dataset(num_batches=num_batches)
counting_metric = CountingPyMetric()
model = Sequential(layers.Dense(2))
model.compile(
loss=losses.BinaryCrossentropy(),
metrics=[counting_metric],
run_eagerly=run_eagerly,
)
model.fit(dataset, epochs=1)
self.assertEqual(counting_metric.result(), num_batches)
@parameterized.named_parameters(("eager", True), ("graph", False))
def test_evaluate(self, run_eagerly):
num_batches = 5
dataset = get_dataset(num_batches=num_batches)
model = Sequential(layers.Dense(2))
model.compile(
loss=losses.BinaryCrossentropy(),
metrics=[CountingPyMetric()],
run_eagerly=run_eagerly,
)
loss, count = model.evaluate(dataset)
self.assertEqual(count, num_batches)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/integration_test/py_metric_test.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/py_metric_test.py",
"repo_id": "tf-keras",
"token_count": 904
} | 194 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exponential Linear Unit activation layer."""
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.ELU")
class ELU(Layer):
"""Exponential Linear Unit.
It follows:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha: Scale for the negative factor.
"""
def __init__(self, alpha=1.0, **kwargs):
super().__init__(**kwargs)
if alpha is None:
raise ValueError(
"Alpha of an ELU layer cannot be None, expecting a float. "
f"Received: {alpha}"
)
self.supports_masking = True
self.alpha = backend.cast_to_floatx(alpha)
def call(self, inputs):
return backend.elu(inputs, self.alpha)
def get_config(self):
config = {"alpha": float(self.alpha)}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| tf-keras/tf_keras/layers/activation/elu.py/0 | {
"file_path": "tf-keras/tf_keras/layers/activation/elu.py",
"repo_id": "tf-keras",
"token_count": 775
} | 195 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention layer that can be used in sequence DNN/CNN models.
This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
Attention is formed by three tensors: Query, Key and Value.
"""
import tensorflow.compat.v2 as tf
from tf_keras.layers.attention.base_dense_attention import BaseDenseAttention
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Attention")
class Attention(BaseDenseAttention):
"""Dot-product attention layer, a.k.a. Luong-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor
of shape `[batch_size, Tv, dim]` and `key` tensor of shape
`[batch_size, Tv, dim]`. The calculation follows the steps:
1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot
product: `scores = tf.matmul(query, key, transpose_b=True)`.
2. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
3. Use `distribution` to create a linear combination of `value` with
shape `[batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to 0.0.
score_mode: Function to use to compute attention scores, one of
`{"dot", "concat"}`. `"dot"` refers to the dot product between the
query and key vectors. `"concat"` refers to the hyperbolic tangent
of the concatenation of the query and key vectors.
Call arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past.
Defaults to `False`.
Output:
Attention outputs of shape `[batch_size, Tq, dim]`.
[Optional] Attention scores after masking and softmax with shape
`[batch_size, Tq, Tv]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `Attention` in a CNN+Attention network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(input_dim=1000, output_dim=64)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(value_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.Attention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=False, score_mode="dot", **kwargs):
super().__init__(**kwargs)
self.use_scale = use_scale
self.score_mode = score_mode
if self.score_mode not in ["dot", "concat"]:
raise ValueError(
f"Received: score_mode={score_mode}. Acceptable values "
'are: ["dot", "concat"]'
)
def build(self, input_shape):
"""Creates variable when `use_scale` is True or `score_mode` is
`concat`."""
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
else:
self.scale = None
if self.score_mode == "concat":
self.concat_score_weight = self.add_weight(
name="concat_score_weight",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
else:
self.concat_score_weight = None
super().build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
if self.score_mode == "dot":
scores = tf.matmul(query, key, transpose_b=True)
if self.scale is not None:
scores *= self.scale
elif self.score_mode == "concat":
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = tf.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = tf.expand_dims(key, axis=-3)
if self.scale is not None:
scores = self.concat_score_weight * tf.reduce_sum(
tf.tanh(self.scale * (q_reshaped + k_reshaped)), axis=-1
)
else:
scores = self.concat_score_weight * tf.reduce_sum(
tf.tanh(q_reshaped + k_reshaped), axis=-1
)
return scores
def get_config(self):
config = {"use_scale": self.use_scale, "score_mode": self.score_mode}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/attention/attention.py/0 | {
"file_path": "tf-keras/tf_keras/layers/attention/attention.py",
"repo_id": "tf-keras",
"token_count": 3610
} | 196 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras 3D transposed convolution layer (sometimes called deconvolution)."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.dtensor import utils
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.convolutional.conv3d import Conv3D
from tf_keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.Conv3DTranspose", "keras.layers.Convolution3DTranspose"
)
class Conv3DTranspose(Conv3D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3
channels if `data_format="channels_last"`.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros
evenly to the left/right or up/down of the input such that output has
the same height/width dimension as the input.
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, depth, height, width)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector
(see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector
(see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")
(see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix
(see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector
(see `keras.constraints`).
Input shape:
5D tensor with shape:
`(batch_size, channels, depth, rows, cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch_size, depth, rows, cols, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch_size, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch_size, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
If `output_padding` is specified::
```
new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] +
output_padding[2])
```
Returns:
A tensor of rank 5 representing
`activation(conv3dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
@utils.allow_initializer_layout
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs,
)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, "output_padding", allow_zero=True
)
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError(
"Strides must be greater than output padding. "
f"Received strides={self.strides}, "
f"output_padding={self.output_padding}."
)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError(
"Inputs should have rank 5. "
f"Received input_shape={input_shape}."
)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError(
"The channel dimension of the inputs "
"to `Conv3DTranspose` should be defined. "
f"The input_shape received is {input_shape}, "
f"where axis {channel_axis} (0-based) "
"is the channel dimension, which found to be `None`."
)
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
"kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
"bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == "channels_first":
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_depth = conv_utils.deconv_output_length(
depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d,
)
out_height = conv_utils.deconv_output_length(
height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
)
out_width = conv_utils.deconv_output_length(
width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
)
if self.data_format == "channels_first":
output_shape = (
batch_size,
self.filters,
out_depth,
out_height,
out_width,
)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (
batch_size,
out_depth,
out_height,
out_width,
self.filters,
)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = tf.stack(output_shape)
outputs = tf.nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(
self.data_format, ndim=5
),
padding=self.padding.upper(),
)
if not tf.executing_eagerly() and inputs.shape.rank:
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = tf.nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(
self.data_format, ndim=4
),
)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == "channels_first":
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d,
)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
)
return tf.TensorShape(output_shape)
def get_config(self):
config = super().get_config()
config.pop("dilation_rate")
config["output_padding"] = self.output_padding
return config
# Alias
Convolution3DTranspose = Conv3DTranspose
| tf-keras/tf_keras/layers/convolutional/conv3d_transpose.py/0 | {
"file_path": "tf-keras/tf_keras/layers/convolutional/conv3d_transpose.py",
"repo_id": "tf-keras",
"token_count": 6682
} | 197 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.dtensor import utils
from tf_keras.engine import base_layer_utils
from tf_keras.engine.base_layer import Layer
from tf_keras.saving.serialization_lib import deserialize_keras_object
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Embedding")
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`
This layer can only be used on positive integer inputs of a fixed range. The
`tf.keras.layers.TextVectorization`, `tf.keras.layers.StringLookup`,
and `tf.keras.layers.IntegerLookup` preprocessing layers can help prepare
inputs for an `Embedding` layer.
This layer accepts `tf.Tensor`, `tf.RaggedTensor` and `tf.SparseTensor`
input.
Example:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Embedding(1000, 64, input_length=10))
>>> # The model will take as input an integer matrix of size (batch,
>>> # input_length), and the largest integer (i.e. word index) in the input
>>> # should be no larger than 999 (vocabulary size).
>>> # Now model.output_shape is (None, 10, 64), where `None` is the batch
>>> # dimension.
>>> input_array = np.random.randint(1000, size=(32, 10))
>>> model.compile('rmsprop', 'mse')
>>> output_array = model.predict(input_array)
>>> print(output_array.shape)
(32, 10, 64)
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out. This is useful when using
recurrent layers which may take variable length input. If this is
`True`, then all subsequent layers in the model need to support masking
or an exception will be raised. If mask_zero is set to True, as a
consequence, index 0 cannot be used in the vocabulary (input_dim should
equal size of vocabulary + 1).
input_length: Length of input sequences, when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
sparse: If True, calling this layer returns a `tf.SparseTensor`. If False,
the layer returns a dense `tf.Tensor`. For an entry with no features in
a sparse tensor (entry with value 0), the embedding vector of index 0 is
returned by default.
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
**Note on variable placement:**
By default, if a GPU is available, the embedding matrix will be placed on
the GPU. This achieves the best performance, but it might cause issues:
- You may be using an optimizer that does not support sparse GPU kernels.
In this case you will see an error upon training your model.
- Your embedding matrix may be too large to fit on your GPU. In this case
you will see an Out Of Memory (OOM) error.
In such cases, you should place the embedding matrix on the CPU memory.
You can do so with a device scope, as such:
```python
with tf.device('cpu:0'):
embedding_layer = Embedding(...)
embedding_layer.build()
```
The pre-built `embedding_layer` instance can then be added to a `Sequential`
model (e.g. `model.add(embedding_layer)`), called in a Functional model
(e.g. `x = embedding_layer(x)`), or used in a subclassed model.
"""
@utils.allow_initializer_layout
def __init__(
self,
input_dim,
output_dim,
embeddings_initializer="uniform",
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
sparse=False,
**kwargs,
):
if "input_shape" not in kwargs:
if input_length:
kwargs["input_shape"] = (input_length,)
else:
kwargs["input_shape"] = (None,)
if input_dim <= 0 or output_dim <= 0:
raise ValueError(
"Both `input_dim` and `output_dim` should be positive, "
f"Received input_dim = {input_dim} "
f"and output_dim = {output_dim}"
)
if (
not base_layer_utils.v2_dtype_behavior_enabled()
and "dtype" not in kwargs
):
# In TF1, the dtype defaults to the input dtype which is typically
# int32, so explicitly set it to floatx
kwargs["dtype"] = backend.floatx()
# We set autocast to False, as we do not want to cast floating- point
# inputs to self.dtype. In call(), we cast to int32, and casting to
# self.dtype before casting to int32 might cause the int32 values to be
# different due to a loss of precision.
kwargs["autocast"] = False
use_one_hot_matmul = kwargs.pop("use_one_hot_matmul", False)
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.input_length = input_length
self.sparse = sparse
if self.sparse and self.mask_zero:
raise ValueError(
"`mask_zero` cannot be enabled when "
"`tf.keras.layers.Embedding` is used with `tf.SparseTensor` "
"input."
)
# Make this flag private and do not serialize it for now.
# It will be part of the public API after further testing.
self._use_one_hot_matmul = use_one_hot_matmul
@tf_utils.shape_type_conversion
def build(self, input_shape=None):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name="embeddings",
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
experimental_autocast=False,
)
self.built = True
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return tf.not_equal(inputs, 0)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.input_length is None:
return input_shape + (self.output_dim,)
else:
# input_length can be tuple if input is 3D or higher
if isinstance(self.input_length, (list, tuple)):
in_lens = list(self.input_length)
else:
in_lens = [self.input_length]
if len(in_lens) != len(input_shape) - 1:
raise ValueError(
f'"input_length" is {self.input_length}, but received '
f"input has shape {input_shape}"
)
else:
for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
if s1 is not None and s2 is not None and s1 != s2:
raise ValueError(
f'"input_length" is {self.input_length}, but '
f"received input has shape {input_shape}"
)
elif s1 is None:
in_lens[i] = s2
return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
def call(self, inputs):
dtype = backend.dtype(inputs)
if dtype != "int32" and dtype != "int64":
inputs = tf.cast(inputs, "int32")
if isinstance(inputs, tf.sparse.SparseTensor):
if self.sparse:
# get sparse embedding values
embedding_values = tf.nn.embedding_lookup(
params=self.embeddings, ids=inputs.values
)
embedding_values = tf.reshape(embedding_values, [-1])
# get sparse embedding indices
indices_values_embed_axis = tf.range(self.output_dim)
repeat_times = [inputs.indices.shape[0]]
indices_values_embed_axis = tf.expand_dims(
tf.tile(indices_values_embed_axis, repeat_times), -1
)
indices_values_embed_axis = tf.cast(
indices_values_embed_axis, dtype=tf.int64
)
current_indices = tf.repeat(
inputs.indices, [self.output_dim], axis=0
)
new_indices = tf.concat(
[current_indices, indices_values_embed_axis], 1
)
new_shape = tf.concat(
[tf.cast(inputs.shape, dtype=tf.int64), [self.output_dim]],
axis=-1,
)
out = tf.SparseTensor(
indices=new_indices,
values=embedding_values,
dense_shape=new_shape,
)
else:
sparse_inputs_expanded = tf.sparse.expand_dims(inputs, axis=-1)
out = tf.nn.safe_embedding_lookup_sparse(
embedding_weights=self.embeddings,
sparse_ids=sparse_inputs_expanded,
default_id=0,
)
elif self._use_one_hot_matmul:
# Note that we change the dtype of the one_hot to be same as the
# weight tensor, since the input data are usually ints, and weights
# are floats. The nn.embedding_lookup support ids as ints, but
# the one_hot matmul need both inputs and weights to be same dtype.
one_hot_data = tf.one_hot(
inputs, depth=self.input_dim, dtype=self.dtype
)
out = tf.matmul(one_hot_data, self.embeddings)
else:
out = tf.nn.embedding_lookup(self.embeddings, inputs)
if self.sparse and not isinstance(out, tf.SparseTensor):
out = tf.sparse.from_dense(out)
if (
self._dtype_policy.compute_dtype
!= self._dtype_policy.variable_dtype
):
# Instead of casting the variable as in most layers, cast the
# output, as this is mathematically equivalent but is faster.
out = tf.cast(out, self._dtype_policy.compute_dtype)
return out
def get_config(self):
config = {
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"embeddings_initializer": initializers.serialize(
self.embeddings_initializer
),
"embeddings_regularizer": regularizers.serialize(
self.embeddings_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"embeddings_constraint": constraints.serialize(
self.embeddings_constraint
),
"mask_zero": self.mask_zero,
"input_length": self.input_length,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# Handles deserialization of tensors passed as dimension arguments
if isinstance(config["input_dim"], dict):
config["input_dim"] = deserialize_keras_object(config["input_dim"])
if isinstance(config["output_dim"], dict):
config["output_dim"] = deserialize_keras_object(
config["output_dim"]
)
return cls(**config)
| tf-keras/tf_keras/layers/core/embedding.py/0 | {
"file_path": "tf-keras/tf_keras/layers/core/embedding.py",
"repo_id": "tf-keras",
"token_count": 5977
} | 198 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer that computes the minimum (element-wise) of several inputs."""
import tensorflow.compat.v2 as tf
from tf_keras.layers.merging.base_merge import _Merge
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Minimum")
class Minimum(_Merge):
"""Layer that computes the minimum (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[0],
[1],
[2],
[3],
[4]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> minned = tf.keras.layers.Minimum()([x1, x2])
>>> minned.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = tf.minimum(output, inputs[i])
return output
@keras_export("keras.layers.minimum")
def minimum(inputs, **kwargs):
"""Functional interface to the `Minimum` layer.
Args:
inputs: A list of input tensors.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise minimum of the inputs.
"""
return Minimum(**kwargs)(inputs)
| tf-keras/tf_keras/layers/merging/minimum.py/0 | {
"file_path": "tf-keras/tf_keras/layers/merging/minimum.py",
"repo_id": "tf-keras",
"token_count": 820
} | 199 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit Normalization layer."""
import tensorflow.compat.v2 as tf
from tf_keras.engine import base_layer
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.UnitNormalization", v1=[])
class UnitNormalization(base_layer.Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = tf.constant(np.arange(6).reshape(2, 3), dtype=tf.float32)
>>> normalized_data = tf.keras.layers.UnitNormalization()(data)
>>> print(tf.reduce_sum(normalized_data[0, :] ** 2).numpy())
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
def build(self, input_shape):
self.axis = tf_utils.validate_axis(self.axis, input_shape)
def call(self, inputs):
inputs = tf.cast(inputs, self.compute_dtype)
return tf.linalg.l2_normalize(inputs, axis=self.axis)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
| tf-keras/tf_keras/layers/normalization/unit_normalization.py/0 | {
"file_path": "tf-keras/tf_keras/layers/normalization/unit_normalization.py",
"repo_id": "tf-keras",
"token_count": 961
} | 200 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of categorical hash columns with dense
inputs."""
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import hashing
from tf_keras.layers.preprocessing.benchmarks import (
feature_column_benchmark as fc_bm,
)
# isort: off
from tensorflow.python.eager.def_function import (
function as tf_function,
)
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
num_buckets = 10000
vocab = fc_bm.create_vocabulary(32768)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.0
)
# TF-Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.string))
model.add(hashing.Hashing(num_buckets))
# FC implementation
fc = tf.feature_column.sequence_categorical_column_with_hash_bucket(
"data", num_buckets
)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(
tf.__internal__.feature_column.FeatureTransformationCache(tensors),
None,
)
# Benchmark runs
keras_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = f"hash|dense|batch_{batch}"
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/category_hash_dense_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/category_hash_dense_benchmark.py",
"repo_id": "tf-keras",
"token_count": 1053
} | 201 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for TF-Keras text vectorization preprocessing layer's adapt method.
"""
import os
import random
import string
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import index_lookup
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def tensor_gen(batch, num_elements):
data = []
for _ in range(batch):
batch_element = []
for _ in range(num_elements - 1):
tok = "".join(random.choice(string.ascii_letters) for i in range(2))
batch_element.append(tok)
batch_element.append("") # Explicitly test the empty string.
data.append(batch_element)
return tf.constant(data)
def get_vocab():
vocab = list(
set([a + b for a in string.ascii_letters for b in string.ascii_letters])
)
vocab.sort()
return vocab
# This class uses TestCase for get_temp_dir().
class BenchmarkLookup(tf.test.Benchmark):
"""Benchmark the index lookup layer's forward pass."""
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def run_numpy_implementation(self, data, vocab):
"""Test the python implementation."""
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="OOV",
dtype=tf.string,
)
out_t = layer(input_t)
model = keras.Model(input_t, out_t)
num_repeats = 5
starts = []
ends = []
_ = model(data)
for _ in range(num_repeats):
starts.append(time.time())
out = model(data)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time, out
def bm_adapt_implementation(self, num_elements, batch_size):
"""Test the KPL adapt implementation."""
vocab = get_vocab()
vocab_file = self._write_to_temp_file("vocab", vocab)
vocabulary_initializer = tf.lookup.TextFileInitializer(
filename=vocab_file,
key_dtype=tf.string,
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64,
value_index=tf.lookup.TextFileIndex.LINE_NUMBER,
value_index_offset=2,
)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocabulary_initializer,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="OOV",
dtype=tf.string,
)
out_t = layer(input_t)
model = keras.Model(input_t, out_t)
num_repeats = 5
starts = []
ends = []
data = tensor_gen(batch_size, num_elements)
_ = model(data)
for _ in range(num_repeats):
starts.append(time.time())
_ = model(data)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
baseline, _ = self.run_numpy_implementation(data, vocab)
extras = {
"numpy implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100,
}
name = "index_lookup_forward|%s_elements|batch_%s" % (
num_elements,
batch_size,
)
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name
)
def benchmark_vocab_size_by_batch(self):
for tensor_size in [100, 1000, 10000]:
for batch in [1, 16, 2048]:
self.bm_adapt_implementation(tensor_size, batch)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/index_lookup_forward_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/index_lookup_forward_benchmark.py",
"repo_id": "tf-keras",
"token_count": 2191
} | 202 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing layers."""
import functools
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.engine import sequential
from tf_keras.layers.preprocessing import image_preprocessing
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.ops import stateless_random_ops
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class ResizingTest(test_combinations.TestCase):
def _run_test(self, kwargs, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs.update({"height": expected_height, "width": expected_width})
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.Resizing,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(
None,
expected_height,
expected_width,
channels,
),
)
@parameterized.named_parameters(
("down_sample_bilinear_2_by_2", {"interpolation": "bilinear"}, 2, 2),
("down_sample_bilinear_3_by_2", {"interpolation": "bilinear"}, 3, 2),
("down_sample_nearest_2_by_2", {"interpolation": "nearest"}, 2, 2),
("down_sample_nearest_3_by_2", {"interpolation": "nearest"}, 3, 2),
("down_sample_area_2_by_2", {"interpolation": "area"}, 2, 2),
("down_sample_area_3_by_2", {"interpolation": "area"}, 3, 2),
(
"down_sample_crop_to_aspect_ratio_3_by_2",
{
"interpolation": "bilinear",
"crop_to_aspect_ratio": True,
},
3,
2,
),
)
def test_down_sampling(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
("up_sample_bilinear_10_by_12", {"interpolation": "bilinear"}, 10, 12),
("up_sample_bilinear_12_by_12", {"interpolation": "bilinear"}, 12, 12),
("up_sample_nearest_10_by_12", {"interpolation": "nearest"}, 10, 12),
("up_sample_nearest_12_by_12", {"interpolation": "nearest"}, 12, 12),
("up_sample_area_10_by_12", {"interpolation": "area"}, 10, 12),
("up_sample_area_12_by_12", {"interpolation": "area"}, 12, 12),
(
"up_sample_crop_to_aspect_ratio_12_by_14",
{
"interpolation": "bilinear",
"crop_to_aspect_ratio": True,
},
12,
14,
),
)
def test_up_sampling(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
def test_down_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(
dtype
)
layer = image_preprocessing.Resizing(
height=2, width=2, interpolation="nearest"
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([[5, 7], [13, 15]]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_up_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(
dtype
)
layer = image_preprocessing.Resizing(
height=4, width=4, interpolation="nearest"
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray(
[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]
).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 4, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
("reshape_bilinear_10_by_4", {"interpolation": "bilinear"}, 10, 4)
)
def test_reshaping(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
image_preprocessing.Resizing(5, 5, "invalid_interpolation")
def test_config_with_custom_name(self):
layer = image_preprocessing.Resizing(5, 5, name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_crop_to_aspect_ratio(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(
"float32"
)
layer = image_preprocessing.Resizing(
4, 2, crop_to_aspect_ratio=True
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[1, 2],
[5, 6],
[9, 10],
[13, 14],
]
).astype("float32")
expected_output = np.reshape(expected_output, (1, 4, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype(
"float32"
)
layer = image_preprocessing.Resizing(2, 2, interpolation="nearest")
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 7],
[13, 15],
]
).astype("float32")
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
("crop_to_aspect_ratio_false", False),
("crop_to_aspect_ratio_true", True),
)
def test_ragged_image(self, crop_to_aspect_ratio):
with test_utils.use_gpu():
inputs = tf.ragged.constant(
[
np.ones((8, 8, 1)),
np.ones((8, 4, 1)),
np.ones((4, 8, 1)),
np.ones((2, 2, 1)),
],
dtype="float32",
)
layer = image_preprocessing.Resizing(
2,
2,
interpolation="nearest",
crop_to_aspect_ratio=crop_to_aspect_ratio,
)
outputs = layer(inputs)
expected_output = [
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
]
self.assertIsInstance(outputs, tf.Tensor)
self.assertNotIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(expected_output, outputs)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.Resizing(2, 2)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.Resizing(2, 2, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@parameterized.named_parameters(
("batch_crop_to_aspect_ratio", True, True),
("batch_dont_crop_to_aspect_ratio", False, True),
("single_sample_crop_to_aspect_ratio", True, False),
("single_sample_dont_crop_to_aspect_ratio", False, False),
)
def test_static_shape_inference(self, crop_to_aspect_ratio, batch):
channels = 3
input_height = 8
input_width = 8
target_height = 4
target_width = 6
layer = image_preprocessing.Resizing(
target_height,
target_width,
crop_to_aspect_ratio=crop_to_aspect_ratio,
)
unit_test = self
@tf.function
def tf_function(img):
unit_test.assertListEqual(
[input_height, input_width, channels], img.shape.as_list()[-3:]
)
img = layer(img)
unit_test.assertListEqual(
[target_height, target_width, channels],
img.shape.as_list()[-3:],
)
return img
with test_utils.use_gpu():
if batch:
input_shape = (2, input_height, input_width, channels)
else:
input_shape = (input_height, input_width, channels)
img_data = np.random.random(size=input_shape).astype("float32")
tf_function(img_data)
def get_numpy_center_crop(images, expected_height, expected_width):
orig_height = images.shape[1]
orig_width = images.shape[2]
height_start = int((orig_height - expected_height) / 2)
width_start = int((orig_width - expected_width) / 2)
height_end = height_start + expected_height
width_end = width_start + expected_width
return images[:, height_start:height_end, width_start:width_end, :]
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class CenterCropTest(test_combinations.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {"height": expected_height, "width": expected_width}
input_images = np.random.random(
(num_samples, orig_height, orig_width, channels)
).astype(np.float32)
expected_output = get_numpy_center_crop(
input_images, expected_height, expected_width
)
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.CenterCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
input_data=input_images,
expected_output=expected_output,
expected_output_shape=(
None,
expected_height,
expected_width,
channels,
),
)
@parameterized.named_parameters(
("center_crop_3_by_4", 3, 4), ("center_crop_3_by_2", 3, 2)
)
def test_center_crop_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
("center_crop_4_by_5", 4, 5), ("center_crop_4_by_3", 4, 3)
)
def test_center_crop_mis_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
("center_crop_4_by_6", 4, 6), ("center_crop_3_by_2", 3, 2)
)
def test_center_crop_half_mis_aligned(
self, expected_height, expected_width
):
self._run_test(expected_height, expected_width)
def test_input_smaller_than_crop_box(self):
np.random.seed(1337)
height, width = 10, 8
inp = np.random.random((12, 3, 3, 3))
with test_utils.use_gpu():
layer = image_preprocessing.CenterCrop(height, width)
actual_output = layer(inp)
# In this case, output should equal resizing
# with crop_to_aspect ratio.
resize_layer = image_preprocessing.Resizing(
height, width, crop_to_aspect_ratio=True
)
expected_output = resize_layer(inp)
self.assertAllEqual(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.CenterCrop(5, 5, name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.CenterCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype(
"float32"
)
layer = image_preprocessing.CenterCrop(2, 2)
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 6],
[9, 10],
]
).astype("float32")
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.CenterCrop(2, 2)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.CenterCrop(2, 2, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomCropTest(test_combinations.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {"height": expected_height, "width": expected_width}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(
None,
expected_height,
expected_width,
channels,
),
)
def test_input_smaller_than_crop_box(self):
np.random.seed(1337)
height, width = 10, 8
inp = np.random.random((12, 3, 3, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp)
# In this case, output should equal resizing
# with crop_to_aspect ratio.
resize_layer = image_preprocessing.Resizing(
height, width, crop_to_aspect_ratio=True
)
expected_output = resize_layer(inp)
self.assertAllEqual(expected_output, actual_output)
def test_training_with_mock(self):
np.random.seed(1337)
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
mock_offset = [height_offset, width_offset]
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator,
"random_uniform",
return_value=mock_offset,
):
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=True)
expected_output = inp[
:,
height_offset : (height_offset + height),
width_offset : (width_offset + width),
:,
]
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
("random_crop_4_by_6", 4, 6), ("random_crop_3_by_2", 3, 2)
)
def test_random_crop_output_shape(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
def test_random_crop_full_height(self):
self._run_test(5, 2)
def test_random_crop_full_width(self):
self._run_test(3, 8)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
inp = np.random.random((12, 8, 16, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_predicting_with_mock_longer_height(self):
np.random.seed(1337)
height, width = 3, 3
inp = np.random.random((12, 10, 6, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=False)
resized_inp = tf.image.resize(inp, size=[5, 3])
expected_output = resized_inp[:, 1:4, :, :]
self.assertAllClose(expected_output, actual_output)
def test_predicting_with_mock_longer_width(self):
np.random.seed(1337)
height, width = 4, 6
inp = np.random.random((12, 8, 16, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=False)
resized_inp = tf.image.resize(inp, size=[4, 8])
expected_output = resized_inp[:, :, 1:7, :]
self.assertAllClose(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomCrop(5, 5, name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
np.random.seed(1337)
inp = np.random.random((16, 16, 3))
mock_offset = [2, 2]
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(8, 8)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator,
"random_uniform",
return_value=mock_offset,
):
actual_output = layer(inp, training=True)
self.assertAllClose(inp[2:10, 2:10, :], actual_output)
@test_utils.run_v2_only
def test_uint8_input(self):
inputs = keras.Input((128, 128, 3), batch_size=2, dtype=tf.uint8)
layer = image_preprocessing.RandomCrop(64, 64)
self.assertAllEqual(layer(inputs).dtype, "float32")
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomCrop(2, 2)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomCrop(2, 2, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
class RescalingTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_rescaling_base(self):
kwargs = {"scale": 1.0 / 127.5, "offset": -1.0}
test_utils.layer_test(
image_preprocessing.Rescaling,
kwargs=kwargs,
input_shape=(2, 5, 6, 3),
expected_output_shape=(None, 5, 6, 3),
)
@test_utils.run_v2_only
def test_rescaling_correctness_float(self):
layer = image_preprocessing.Rescaling(scale=1.0 / 127.5, offset=-1.0)
inputs = tf.random.uniform((2, 4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1.0 / 127.5) - 1)
@test_utils.run_v2_only
def test_rescaling_correctness_int(self):
layer = image_preprocessing.Rescaling(scale=1.0 / 127.5, offset=-1)
inputs = tf.random.uniform((2, 4, 5, 3), 0, 100, dtype="int32")
outputs = layer(inputs)
self.assertEqual(outputs.dtype.name, "float32")
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1.0 / 127.5) - 1)
def test_config_with_custom_name(self):
layer = image_preprocessing.Rescaling(0.5, name="rescaling")
config = layer.get_config()
layer_1 = image_preprocessing.Rescaling.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_unbatched_image(self):
layer = image_preprocessing.Rescaling(scale=1.0 / 127.5, offset=-1)
inputs = tf.random.uniform((4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1.0 / 127.5) - 1)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.Rescaling(0.5)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.Rescaling(0.5, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomFlipTest(test_combinations.TestCase):
def _run_test(self, mode, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = [1 for _ in range(num_samples)]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
expected_output = inp
if mode == "horizontal" or mode == "horizontal_and_vertical":
expected_output = np.flip(expected_output, axis=2)
if mode == "vertical" or mode == "horizontal_and_vertical":
expected_output = np.flip(expected_output, axis=1)
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
"stateless_random_uniform",
return_value=mock_random,
):
with test_utils.use_gpu():
layer = image_preprocessing.RandomFlip(mode)
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
("random_flip_horizontal", "horizontal"),
("random_flip_vertical", "vertical"),
("random_flip_both", "horizontal_and_vertical"),
)
def test_random_flip(self, mode):
self._run_test(mode)
def test_random_flip_horizontal_half(self):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=1)
self._run_test("horizontal", expected_output, mock_random)
def test_random_flip_vertical_half(self):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0)
self._run_test("vertical", expected_output, mock_random)
def test_random_flip_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_default(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(np.flip(input_images, axis=1), axis=2)
mock_random = [1, 1]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
"stateless_random_uniform",
return_value=mock_random,
):
with self.cached_session():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=True)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomFlip(name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.RandomFlip.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_random_flip_unbatched_image(self):
input_image = np.random.random((4, 4, 1)).astype(np.float32)
expected_output = np.flip(input_image, axis=0)
# mock_random = np.reshape([0.], [1, 1, 1])
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
"stateless_random_uniform",
return_value=0.0,
):
with self.cached_session():
layer = image_preprocessing.RandomFlip("vertical")
actual_output = layer(input_image, training=True)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomFlip()
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomFlip(dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomContrastTest(test_combinations.TestCase):
def _run_test(self, lower, upper, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = 0.2
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
# reduce mean on height.
inp_mean = np.mean(inp, axis=1, keepdims=True)
# reduce mean on width.
inp_mean = np.mean(inp_mean, axis=2, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
"stateless_random_uniform",
return_value=mock_random,
):
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((lower, upper))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
("random_contrast_2_by_5", 0.2, 0.5),
("random_contrast_2_by_13", 0.2, 1.3),
("random_contrast_5_by_2", 0.5, 0.2),
("random_contrast_10_by_10", 1.0, 1.0),
)
def test_random_contrast(self, lower, upper):
self._run_test(lower, upper)
@parameterized.named_parameters(
("random_contrast_amplitude_2", 0.2),
("random_contrast_amplitude_5", 0.5),
)
def test_random_contrast_amplitude(self, amplitude):
input_images = np.random.random((2, 5, 8, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast(amplitude)
layer(input_images)
def test_random_contrast_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_contrast_int_dtype(self):
input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
layer(input_images)
def test_random_contrast_invalid_bounds(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((-0.1, 0.5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((1.1, 0.5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((0.1, -0.2))
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomContrast(
(0.5, 0.6), name="image_preproc"
)
config = layer.get_config()
layer_1 = image_preprocessing.RandomContrast.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_value_clip(self):
input_images = np.random.random((5, 8, 3)).astype(np.float32) * 255.0
# Give a factor range [1.0, 11.0] so that
# it will produce large contrast.
layer = image_preprocessing.RandomContrast((0.0, 10.0))
output = layer(input_images)
self.assertLessEqual(tf.reduce_max(output), 255.0)
self.assertGreaterEqual(tf.reduce_min(output), 0.0)
def test_unbatched_image(self):
np.random.seed(1337)
mock_random = 0.2
inp = np.random.random((4, 4, 1))
inp_mean = np.mean(inp, axis=0, keepdims=True)
inp_mean = np.mean(inp_mean, axis=1, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
"stateless_random_uniform",
return_value=mock_random,
):
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.2, 0.5))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomContrast((0.5, 0.6))
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomContrast((0.5, 0.6), dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomBrightnessTest(test_combinations.TestCase):
def test_factor_input_validation(self):
with self.assertRaisesRegex(ValueError, r"in the range \[-1.0, 1.0\]"):
image_preprocessing.RandomBrightness(2.0)
with self.assertRaisesRegex(ValueError, "list of two numbers"):
image_preprocessing.RandomBrightness([1.0])
with self.assertRaisesRegex(ValueError, "should be a number"):
image_preprocessing.RandomBrightness("one")
def test_factor_normalize(self):
layer = image_preprocessing.RandomBrightness(1.0)
self.assertEqual(layer._factor, [-1.0, 1.0])
layer = image_preprocessing.RandomBrightness((0.5, 0.3))
self.assertEqual(layer._factor, [0.3, 0.5])
layer = image_preprocessing.RandomBrightness(-0.2)
self.assertEqual(layer._factor, [-0.2, 0.2])
@test_utils.run_v2_only
def test_output_value_range(self):
# Always scale up to 255
layer = image_preprocessing.RandomBrightness([1.0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
output_min = tf.math.reduce_min(output)
output_max = tf.math.reduce_max(output)
self.assertEqual(output_min, 255)
self.assertEqual(output_max, 255)
# Always scale down to 0
layer = image_preprocessing.RandomBrightness([-1.0, -1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
output_min = tf.math.reduce_min(output)
output_max = tf.math.reduce_max(output)
self.assertEqual(output_min, 0)
self.assertEqual(output_max, 0)
def test_output(self):
# Always scale up, but randomly between 0 ~ 255
layer = image_preprocessing.RandomBrightness([0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
diff = output - inputs
self.assertGreaterEqual(tf.math.reduce_min(diff), 0)
self.assertGreater(tf.math.reduce_mean(diff), 0)
# Always scale down, but randomly between 0 ~ 255
layer = image_preprocessing.RandomBrightness([-1.0, 0.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
diff = output - inputs
self.assertLessEqual(tf.math.reduce_max(diff), 0)
self.assertLess(tf.math.reduce_mean(diff), 0)
@test_utils.run_v2_only
def test_scale_output(self):
layer = image_preprocessing.RandomBrightness([0, 1.0], seed=1337)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
# Create a new layer with same seed but different value range
layer2 = image_preprocessing.RandomBrightness(
[0, 1.0], value_range=[0, 1], seed=1337
)
inputs2 = inputs / 255.0
output2 = layer2(inputs2)
# Make sure the outputs are the same, but just scaled with 255
self.assertAllClose(output, output2 * 255.0)
def test_different_adjustment_within_batch(self):
layer = image_preprocessing.RandomBrightness([0.2, 0.3])
inputs = np.zeros(shape=(2, 10, 10, 3)) # 2 images with all zeros
output = layer(inputs)
diff = output - inputs
# Make sure two images gets the different adjustment
self.assertNotAllClose(diff[0], diff[1])
# Make sure all the pixel are the same with the same image
image1 = output[0]
# The reduced mean pixel value among width and height are the same as
# any of the pixel in the image.
self.assertAllClose(
tf.reduce_mean(image1), image1[0, 0, 0], rtol=1e-5, atol=1e-5
)
def test_inference(self):
layer = image_preprocessing.RandomBrightness([0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
@test_utils.run_v2_only
def test_dtype(self):
layer = image_preprocessing.RandomBrightness([0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertEqual(output.dtype, tf.float32)
layer = image_preprocessing.RandomBrightness([0, 1.0], dtype="uint8")
output = layer(inputs)
self.assertEqual(output.dtype, tf.uint8)
def test_seed(self):
layer = image_preprocessing.RandomBrightness([0, 1.0], seed=1337)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output_1 = layer(inputs)
layer2 = image_preprocessing.RandomBrightness([0, 1.0], seed=1337)
output_2 = layer2(inputs)
self.assertAllClose(output_1, output_2)
def test_config(self):
layer = image_preprocessing.RandomBrightness(
[0, 1.0], value_range=[0.0, 1.0], seed=1337
)
config = layer.get_config()
self.assertEqual(config["factor"], [0.0, 1.0])
self.assertEqual(config["value_range"], [0.0, 1.0])
self.assertEqual(config["seed"], 1337)
reconstructed_layer = image_preprocessing.RandomBrightness.from_config(
config
)
self.assertEqual(reconstructed_layer._factor, layer._factor)
self.assertEqual(reconstructed_layer._value_range, layer._value_range)
self.assertEqual(reconstructed_layer._seed, layer._seed)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomTranslationTest(test_combinations.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {"height_factor": height_factor, "width_factor": width_factor}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomTranslation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels),
)
@parameterized.named_parameters(
("random_translate_4_by_6", 0.4, 0.6),
("random_translate_3_by_2", 0.3, 0.2),
("random_translate_tuple_factor", (-0.5, 0.4), (0.2, 0.3)),
)
def test_random_translation(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_translation_up_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-0.2, -0.2), width_factor=0.0
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_up_numeric_constant(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-0.2, -0.2),
width_factor=0.0,
fill_mode="constant",
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(0.2, 0.2), width_factor=0.0
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_asymmetric_size_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 8, 2, 1)).astype(
dtype
)
# Shifting by .5 * 8 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(0.5, 0.5), width_factor=0.0
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray(
[
[6, 7],
[4, 5],
[2, 3],
[0, 1],
[0, 1],
[2, 3],
[4, 5],
[6, 7],
]
).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 8, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_constant(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(0.2, 0.2),
width_factor=0.0,
fill_mode="constant",
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0.0, width_factor=(-0.2, -0.2)
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[1, 2, 3, 4, 4],
[6, 7, 8, 9, 9],
[11, 12, 13, 14, 14],
[16, 17, 18, 19, 19],
[21, 22, 23, 24, 24],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_constant(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(
dtype
)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0.0,
width_factor=(-0.2, -0.2),
fill_mode="constant",
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[1, 2, 3, 4, 0],
[6, 7, 8, 9, 0],
[11, 12, 13, 14, 0],
[16, 17, 18, 19, 0],
[21, 22, 23, 24, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomTranslation(0.5, 0.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomTranslation(
0.5, 0.6, name="image_preproc"
)
config = layer.get_config()
layer_1 = image_preprocessing.RandomTranslation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(
np.int64
)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-0.2, -0.2), width_factor=0.0
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]
).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomTranslation(0.5, 0.6)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomTranslation(0.5, 0.6, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomTransformTest(test_combinations.TestCase):
def _run_random_transform_with_mock(
self,
transform_matrix,
expected_output,
mode,
fill_value=0.0,
interpolation="bilinear",
):
inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32)
with self.cached_session():
output = image_preprocessing.transform(
inp,
transform_matrix,
fill_mode=mode,
fill_value=fill_value,
interpolation=interpolation,
)
self.assertAllClose(expected_output, output)
def test_random_translation_reflect(self):
# reflected output is (dcba|abcd|dcba)
# Test down shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "reflect"
)
# Test up shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11.0],
[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "reflect"
)
# Test left shift by 1.
# reflected output is (dcba|abcd|dcba)
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 2.0, 2.0],
[4.0, 5.0, 5.0],
[7.0, 8.0, 8.0],
[10.0, 11.0, 11.0],
[13.0, 14.0, 14.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "reflect"
)
# Test right shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 0.0, 1.0],
[3.0, 3.0, 4],
[6.0, 6.0, 7.0],
[9.0, 9.0, 10.0],
[12.0, 12.0, 13.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "reflect"
)
def test_random_translation_wrap(self):
# warpped output is (abcd|abcd|abcd)
# Test down shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[12.0, 13.0, 14.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "wrap"
)
# Test up shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11.0],
[12.0, 13.0, 14.0],
[0.0, 1.0, 2.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "wrap"
)
# Test left shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 2.0, 0.0],
[4.0, 5.0, 3.0],
[7.0, 8.0, 6.0],
[10.0, 11.0, 9.0],
[13.0, 14.0, 12.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "wrap"
)
# Test right shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[2.0, 0.0, 1.0],
[5.0, 3.0, 4],
[8.0, 6.0, 7.0],
[11.0, 9.0, 10.0],
[14.0, 12.0, 13.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "wrap"
)
def test_random_translation_nearest(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "nearest"
)
# Test up shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11.0],
[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "nearest"
)
# Test left shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 2.0, 2.0],
[4.0, 5.0, 5.0],
[7.0, 8.0, 8.0],
[10.0, 11.0, 11.0],
[13.0, 14.0, 14.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "nearest"
)
# Test right shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 0.0, 1.0],
[3.0, 3.0, 4],
[6.0, 6.0, 7.0],
[9.0, 9.0, 10.0],
[12.0, 12.0, 13.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "nearest"
)
def test_random_translation_constant_0(self):
# constant output is (0000|abcd|0000)
# Test down shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 0.0, 0.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant"
)
# Test up shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11.0],
[12.0, 13.0, 14.0],
[0.0, 0.0, 0.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant"
)
# Test left shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 2.0, 0.0],
[4.0, 5.0, 0.0],
[7.0, 8.0, 0.0],
[10.0, 11.0, 0.0],
[13.0, 14.0, 0.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant"
)
# Test right shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 0.0, 1.0],
[0.0, 3.0, 4],
[0.0, 6.0, 7.0],
[0.0, 9.0, 10.0],
[0.0, 12.0, 13.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant"
)
def test_random_translation_constant_1(self):
with tf.compat.forward_compatibility_horizon(2020, 8, 6):
# constant output is (1111|abcd|1111)
# Test down shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 1.0, 1.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant", fill_value=1.0
)
# Test up shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11.0],
[12.0, 13.0, 14.0],
[1.0, 1.0, 1.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant", fill_value=1.0
)
# Test left shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 2.0, 1.0],
[4.0, 5.0, 1.0],
[7.0, 8.0, 1.0],
[10.0, 11.0, 1.0],
[13.0, 14.0, 1.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant", fill_value=1.0
)
# Test right shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 0.0, 1.0],
[1.0, 3.0, 4],
[1.0, 6.0, 7.0],
[1.0, 9.0, 10.0],
[1.0, 12.0, 13.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix, expected_output, "constant", fill_value=1.0
)
def test_random_translation_nearest_interpolation(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 0.0, 0.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode="constant",
interpolation="nearest",
)
# Test up shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[3.0, 4.0, 5.0],
[6.0, 7.0, 8],
[9.0, 10.0, 11.0],
[12.0, 13.0, 14.0],
[0.0, 0.0, 0.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode="constant",
interpolation="nearest",
)
# Test left shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[1.0, 2.0, 0.0],
[4.0, 5.0, 0.0],
[7.0, 8.0, 0.0],
[10.0, 11.0, 0.0],
[13.0, 14.0, 0.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode="constant",
interpolation="nearest",
)
# Test right shift by 1.
# pyformat: disable
expected_output = (
np.asarray(
[
[0.0, 0.0, 1.0],
[0.0, 3.0, 4],
[0.0, 6.0, 7.0],
[0.0, 9.0, 10.0],
[0.0, 12.0, 13.0],
]
)
.reshape((1, 5, 3, 1))
.astype(np.float32)
)
# pyformat: enable
transform_matrix = np.asarray(
[[1.0, 0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
)
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode="constant",
interpolation="nearest",
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomRotationTest(test_combinations.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {"factor": factor}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomRotation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels),
)
@parameterized.named_parameters(
("random_rotate_4", 0.4),
("random_rotate_3", 0.3),
("random_rotate_tuple_factor", (-0.5, 0.4)),
)
def test_random_rotation(self, factor):
self._run_test(factor)
def test_random_rotation_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomRotation(0.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_distribution_strategy(self):
"""Tests that RandomRotation can be created within DistStrats."""
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
with test_utils.use_gpu():
strat = tf.distribute.MirroredStrategy(devices=["cpu", "gpu"])
with strat.scope():
layer = image_preprocessing.RandomRotation(0.5)
output = strat.run(lambda: layer(input_images, training=True))
values = output.values
self.assertAllEqual(2, len(values))
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomRotation(0.5, name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.RandomRotation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(
np.float32
)
# 180 rotation.
layer = image_preprocessing.RandomRotation(factor=(0.5, 0.5))
output_image = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).astype(np.float32)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllClose(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomRotation(0.5)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomRotation(0.5, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomZoomTest(test_combinations.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {"height_factor": height_factor, "width_factor": width_factor}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomZoom,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels),
)
@parameterized.named_parameters(
("random_zoom_4_by_6", -0.4, -0.6),
("random_zoom_2_by_3", -0.2, -0.3),
("random_zoom_tuple_factor", (-0.4, -0.5), (-0.2, -0.3)),
)
def test_random_zoom_in(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
@parameterized.named_parameters(
("random_zoom_4_by_6", 0.4, 0.6),
("random_zoom_2_by_3", 0.2, 0.3),
("random_zoom_tuple_factor", (0.4, 0.5), (0.2, 0.3)),
)
def test_random_zoom_out(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_zoom_in_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(
dtype
)
layer = image_preprocessing.RandomZoom(
(-0.5, -0.5), (-0.5, -0.5), interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(
dtype
)
layer = image_preprocessing.RandomZoom(
(0.5, 0.5),
(0.8, 0.8),
fill_mode="constant",
interpolation="nearest",
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric_preserve_aspect_ratio(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(
dtype
)
layer = image_preprocessing.RandomZoom(
(0.5, 0.5), fill_mode="constant", interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 6, 7, 9, 0],
[0, 11, 12, 14, 0],
[0, 21, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomZoom(0.5, 0.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomZoom(0.5, 0.6, name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(
np.int64
)
layer = image_preprocessing.RandomZoom(
(-0.5, -0.5), (-0.5, -0.5), interpolation="nearest"
)
output_image = layer(input_image)
expected_output = np.asarray(
[
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]
).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomZoom(0.5, 0.5)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomZoom(0.5, 0.5, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomHeightTest(test_combinations.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with test_utils.use_gpu():
img = np.random.random(
(num_samples, orig_height, orig_width, channels)
)
layer = image_preprocessing.RandomHeight(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[2], 8)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(
("random_height_4_by_6", (0.4, 0.6)),
("random_height_3_by_2", (-0.3, 0.2)),
("random_height_3", 0.3),
)
def test_random_height_basic(self, factor):
self._run_test(factor)
def test_valid_random_height(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((12, 5, 8, 3))
layer = image_preprocessing.RandomHeight(0.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator,
"random_uniform",
return_value=mock_factor,
):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
def test_random_height_longer_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (2, 3, 1)).astype(
dtype
)
layer = image_preprocessing.RandomHeight(factor=(1.0, 1.0))
# Return type of RandomHeight() is float32
# if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`;
# cast `layer` to desired dtype.
output_image = tf.cast(
layer(np.expand_dims(input_image, axis=0)), dtype=dtype
)
# pyformat: disable
expected_output = np.asarray(
[
[0, 1, 2],
[0.75, 1.75, 2.75],
[2.25, 3.25, 4.25],
[3, 4, 5],
]
).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 3, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (4, 2, 1)).astype(
dtype
)
layer = image_preprocessing.RandomHeight(
factor=(-0.5, -0.5), interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([[2, 3], [6, 7]]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomHeight((-1.5, 0.4))
def test_random_height_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomHeight(0.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomHeight(0.5, name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.RandomHeight.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((5, 8, 3))
layer = image_preprocessing.RandomHeight(0.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator,
"random_uniform",
return_value=mock_factor,
):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 3)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomHeight(0.2)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomHeight(0.2, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomWidthTest(test_combinations.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with test_utils.use_gpu():
img = np.random.random(
(num_samples, orig_height, orig_width, channels)
)
layer = image_preprocessing.RandomWidth(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[1], 5)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(
("random_width_4_by_6", (0.4, 0.6)),
("random_width_3_by_2", (-0.3, 0.2)),
("random_width_3", 0.3),
)
def test_random_width_basic(self, factor):
self._run_test(factor)
def test_valid_random_width(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((12, 8, 5, 3))
layer = image_preprocessing.RandomWidth(0.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator,
"random_uniform",
return_value=mock_factor,
):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[2], 3)
def test_random_width_longer_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (3, 2, 1)).astype(
dtype
)
layer = image_preprocessing.RandomWidth(factor=(1.0, 1.0))
# Return type of RandomWidth() is float32
# if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`;
# cast `layer` to desired dtype.
output_image = tf.cast(
layer(np.expand_dims(input_image, axis=0)), dtype=dtype
)
# pyformat: disable
expected_output = np.asarray(
[[0, 0.25, 0.75, 1], [2, 2.25, 2.75, 3], [4, 4.25, 4.75, 5]]
).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 3, 4, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (2, 4, 1)).astype(
dtype
)
layer = image_preprocessing.RandomWidth(
factor=(-0.5, -0.5), interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([[1, 3], [5, 7]]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomWidth((-1.5, 0.4))
def test_random_width_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomWidth(0.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomWidth(0.5, name="image_preproc")
config = layer.get_config()
layer_1 = image_preprocessing.RandomWidth.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((8, 5, 3))
layer = image_preprocessing.RandomWidth(0.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator,
"random_uniform",
return_value=mock_factor,
):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = image_preprocessing.RandomWidth(0.2)
self.assertAllEqual(layer(inputs).dtype, "float32")
layer = image_preprocessing.RandomWidth(0.2, dtype="uint8")
self.assertAllEqual(layer(inputs).dtype, "uint8")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class LearningPhaseTest(test_combinations.TestCase):
def test_plain_call(self):
layer = image_preprocessing.RandomWidth(0.5, seed=123)
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = layer(img) # Defaults to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
def test_call_in_container(self):
layer1 = image_preprocessing.RandomWidth(0.5, seed=123)
layer2 = image_preprocessing.RandomHeight(0.5, seed=123)
seq = sequential.Sequential([layer1, layer2])
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = seq(img) # Defaults to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class DeterminismTest(test_combinations.TestCase):
@parameterized.named_parameters(
("random_flip", image_preprocessing.RandomFlip),
(
"random_contrast",
functools.partial(image_preprocessing.RandomContrast, factor=1.0),
),
(
"random_crop",
functools.partial(
image_preprocessing.RandomCrop, height=2, width=2
),
),
(
"random_translation",
functools.partial(image_preprocessing.RandomTranslation, 0.3, 0.2),
),
(
"random_rotation",
functools.partial(image_preprocessing.RandomRotation, 0.5),
),
("random_zoom", functools.partial(image_preprocessing.RandomZoom, 0.2)),
(
"random_height",
functools.partial(image_preprocessing.RandomHeight, 0.4),
),
(
"random_width",
functools.partial(image_preprocessing.RandomWidth, 0.3),
),
)
def test_seed_constructor_arg(self, layer_cls):
input_image = np.random.random((2, 5, 8, 3)).astype(np.float32)
layer1 = layer_cls(seed=0.0)
layer2 = layer_cls(seed=0.0)
layer1_output = layer1(input_image)
layer2_output = layer2(input_image)
self.assertAllClose(
layer1_output.numpy().tolist(), layer2_output.numpy().tolist()
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/image_preprocessing_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/image_preprocessing_test.py",
"repo_id": "tf-keras",
"token_count": 47831
} | 203 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras text vectorization preprocessing layer."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.layers.preprocessing import string_lookup
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name": "test_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed
# by 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": None,
},
"expected_output": [[1], [2], [3], [4], [4], [3], [1], [0]],
"input_dtype": tf.string,
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class StringLookupLayerTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(
self,
vocab_data,
input_data,
kwargs,
use_dataset,
expected_output,
input_dtype,
):
cls = string_lookup.StringLookup
expected_output_dtype = tf.int64
input_shape = input_data.shape
if use_dataset:
# TF-Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# StringLookup), the concatenation fails. In real use cases, this
# may not be an issue because users are likely to pipe the
# preprocessing layer into other keras layers instead of predicting
# it directly. A workaround for these unit tests is to have the
# dataset only contain one batch, so no concatenation needs to
# happen with the result. For consistency with numpy input, we
# should make `predict` join differently shaped results together
# sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0]
)
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0]
)
output_data = test_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data,
)
self.assertAllClose(expected_output, output_data)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class StringLookupVocabularyTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = ["", "[UNK]", "earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data, mask_token="")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_no_oov(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth", ""]]
)
invalid_input = np.array(
[
["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, mask_token="", num_oov_indices=0
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "found OOV values.*michigan"
):
_ = model.predict(invalid_input)
def test_no_vocab(self):
with self.assertRaisesRegex(
RuntimeError, "you must set the layer's vocabulary"
):
layer = string_lookup.StringLookup(output_mode="binary")
layer([["a"]])
def test_one_hot_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire", "michigan"])
expected_output = [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="one_hot"
)
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[0, 1, 1, 1, 1], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="multi_hot"
)
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "earth", "fire", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[0, 2, 0, 0, 2], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="count"
)
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_sparse_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="multi_hot", sparse=True
)
res = layer(input_data)
self.assertTrue(res.__class__.__name__, "SparseKerasTensor")
def test_get_vocab_returns_str(self):
vocab_data = ["earth", "wind", "and", "fire"]
expected_vocab = ["[UNK]", "earth", "wind", "and", "fire"]
layer = string_lookup.StringLookup(vocabulary=vocab_data)
layer_vocab = layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], str)
inverse_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True
)
layer_vocab = inverse_layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], str)
def test_int_output_explicit_vocab_from_file(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_from_file_via_setter(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup()
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, ".*repeated term.*fire.*"):
_ = string_lookup.StringLookup(vocabulary=vocab_data)
def test_non_unique_vocab_from_file_fails(self):
vocab_list = ["earth", "wind", "and", "fire", "earth"]
vocab_path = self._write_to_temp_file("repeat_vocab_file", vocab_list)
with self.assertRaisesRegex(
tf.errors.FailedPreconditionError,
"HashTable has different value for same key.*earth",
):
_ = string_lookup.StringLookup(vocabulary=vocab_path)
def test_inverse_layer(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth", ""]]
)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True, mask_token=""
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 0]])
expected_output = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"],
]
)
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(vocabulary=vocab_path, invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file_with_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth", "[M]"]]
)
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = string_lookup.StringLookup(
vocabulary=vocab_path, invert=True, mask_token="[M]"
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"],
]
)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
invert_layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True
)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_adapted_vocab(self):
adapt_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"],
]
)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup()
layer.adapt(adapt_data)
invert_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True
)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_ragged_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant(
[["earth", "wind", "fire"], ["fire", "and", "earth", "ohio"]]
)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = string_lookup.StringLookup(num_oov_indices=2)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_tensor_vocab(self):
vocab_data = ["[UNK]", "wind", "and", "fire"]
vocab_tensor = tf.constant(vocab_data)
layer = string_lookup.StringLookup(vocabulary=vocab_tensor)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 4)
fn = tf.function(lambda: layer.set_vocabulary(vocab_tensor))
with self.assertRaisesRegex(
RuntimeError, "Cannot set a tensor vocabulary"
):
fn()
@test_utils.run_v2_only()
def test_saving_v3(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire"])
# First, with a static vocabulary.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
ref_output = model.predict(input_array)
temp_dir = self.get_temp_dir()
model_path = os.path.join(temp_dir, "mymodel.keras")
model.save(model_path, save_format="keras_v3")
model = keras.models.load_model(model_path)
output = model.predict(input_array)
self.assertAllEqual(output, ref_output)
# Second, with adapt().
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup()
layer.adapt(vocab_data)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
ref_output = model.predict(input_array)
model.save(model_path, save_format="keras_v3", overwrite=True)
model = keras.models.load_model(model_path)
output = model.predict(input_array)
self.assertAllEqual(output, ref_output)
# Test TF-IDF + adapt().
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = string_lookup.StringLookup(output_mode="tf_idf")
layer.adapt(vocab_data)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
ref_output = model.predict(input_array)
model.save(model_path, save_format="keras_v3", overwrite=True)
model = keras.models.load_model(model_path)
output = model.predict(input_array)
self.assertAllEqual(output, ref_output)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/string_lookup_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/string_lookup_test.py",
"repo_id": "tf-keras",
"token_count": 9863
} | 204 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the SpatialDropout1D layer."""
import tensorflow.compat.v2 as tf
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.regularization.dropout import Dropout
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.SpatialDropout1D")
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
Call arguments:
inputs: A 3D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
3D tensor with shape: `(samples, timesteps, channels)`
Output shape: Same as input.
References: - [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super().__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = tf.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
| tf-keras/tf_keras/layers/regularization/spatial_dropout1d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/regularization/spatial_dropout1d.py",
"repo_id": "tf-keras",
"token_count": 730
} | 205 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Reshape layer."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.engine.base_layer import Layer
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Reshape")
class Reshape(Layer):
"""Layer that reshapes inputs into the given shape.
Input shape:
Arbitrary, although all dimensions in the input shape must be known/fixed.
Use the keyword argument `input_shape` (tuple of integers, does not
include the samples/batch size axis) when using this layer as the first
layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
>>> # as first layer in a Sequential model
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Reshape((3, 4), input_shape=(12,)))
>>> # model.output_shape == (None, 3, 4), `None` is the batch size.
>>> model.output_shape
(None, 3, 4)
>>> # as intermediate layer in a Sequential model
>>> model.add(tf.keras.layers.Reshape((6, 2)))
>>> model.output_shape
(None, 6, 2)
>>> # also supports shape inference using `-1` as dimension
>>> model.add(tf.keras.layers.Reshape((-1, 2, 2)))
>>> model.output_shape
(None, 3, 2, 2)
"""
def __init__(self, target_shape, **kwargs):
"""Creates a `tf.keras.layers.Reshape` layer instance.
Args:
target_shape: Target shape. Tuple of integers, does not include the
samples dimension (batch size).
**kwargs: Any additional layer keyword arguments.
"""
super().__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Args:
input_shape: Shape of array being reshaped
output_shape: Desired shape of the array with at most a single -1
which indicates a dimension that should be derived from the input
shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises:
ValueError: If the total array size of the output_shape is
different than the input_shape, or more than one unknown dimension
is specified.
"""
output_shape = list(output_shape)
msg = (
"total size of new array must be unchanged, "
"input_shape = {}, output_shape = {}".format(
input_shape, output_shape
)
)
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError(
"There must be at most one unknown dimension in "
f"output_shape. Received: output_shape={output_shape}."
)
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
# input shape (partially) unknown? replace -1's with None's
output_shape += tuple(
s if s != -1 else None for s in self.target_shape
)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(
input_shape[1:], self.target_shape
)
return tf.TensorShape(output_shape)
def call(self, inputs):
result = tf.reshape(inputs, (tf.shape(inputs)[0],) + self.target_shape)
if not tf.executing_eagerly():
# Set the static shape for the result since it might lost during
# array_ops reshape, eg, some `None` dim in the result could be
# inferred.
result.set_shape(self.compute_output_shape(inputs.shape))
return result
def get_config(self):
config = {"target_shape": self.target_shape}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/reshaping/reshape.py/0 | {
"file_path": "tf-keras/tf_keras/layers/reshaping/reshape.py",
"repo_id": "tf-keras",
"token_count": 2193
} | 206 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for recurrent layers."""
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.rnn import rnn_utils
from tf_keras.layers.rnn.dropout_rnn_cell_mixin import DropoutRNNCellMixin
from tf_keras.layers.rnn.stacked_rnn_cells import StackedRNNCells
from tf_keras.saving import serialization_lib
from tf_keras.saving.legacy.saved_model import layer_serialization
from tf_keras.utils import generic_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export("keras.layers.RNN")
class RNN(base_layer.Layer):
"""Base class for recurrent layers.
See
[the TF-Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Args:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- A `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers (one size per state).
The `state_size` can also be TensorShape or tuple/list of
TensorShape, to represent high dimension state.
- A `output_size` attribute. This can be a single integer or a
TensorShape, which represent the shape of the output. For backward
compatible reason, if this attribute is not available for the
cell, the value will be inferred by the first element of the
`state_size`.
- A `get_initial_state(inputs=None, batch_size=None, dtype=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if the user didn't specify any initial state via other
means. The returned initial state should have a shape of
[batch_size, cell.state_size]. The cell might choose to create a
tensor full of zeros, or full of other values based on the cell's
implementation.
`inputs` is the input tensor to the RNN layer, which should
contain the batch size as its shape[0], and also dtype. Note that
the shape[0] might be `None` during the graph construction. Either
the `inputs` or the pair of `batch_size` and `dtype` are provided.
`batch_size` is a scalar tensor that represents the batch size
of the inputs. `dtype` is `tf.DType` that represents the dtype of
the inputs.
For backward compatibility, if this method is not implemented
by the cell, the RNN layer will create a zero filled tensor with the
size of [batch_size, cell.state_size].
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on top of each other in the RNN, resulting in an
efficient stacked RNN.
return_sequences: Boolean (default `False`). Whether to return the last
output in the output sequence, or the full sequence.
return_state: Boolean (default `False`). Whether to return the last state
in addition to the output.
go_backwards: Boolean (default `False`).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default `False`). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default `False`).
If True, the network will be unrolled, else a symbolic loop will be
used. Unrolling can speed-up a RNN, although it tends to be more
memory-intensive. Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean (default `False`).
Whether the output should use zeros for the masked timesteps. Note that
this field is only used when `return_sequences` is True and mask is
provided. It can useful if you want to reuse the raw output sequence of
the RNN without interference from the masked timesteps, eg, merging
bidirectional RNNs.
Call arguments:
inputs: Input tensor.
mask: Binary tensor of shape `[batch_size, timesteps]` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
N-D tensor with shape `[batch_size, timesteps, ...]` or
`[timesteps, batch_size, ...]` when time_major is True.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `[batch_size, state_size]`, where `state_size` could
be a high dimension tensor shape.
- If `return_sequences`: N-D tensor with shape
`[batch_size, timesteps, output_size]`, where `output_size` could
be a high dimension tensor shape, or
`[timesteps, batch_size, output_size]` when `time_major` is True.
- Else, N-D tensor with shape `[batch_size, output_size]`, where
`output_size` could be a high dimension tensor shape.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [tf.keras.layers.Embedding] layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
Else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- Specify `shuffle=False` when calling `fit()`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
from tf_keras.layers import RNN
from tf_keras import backend
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = backend.dot(inputs, self.kernel)
output = h + backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(
self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs,
):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if "call" not in dir(cell):
raise ValueError(
"Argument `cell` should have a `call` method. "
f"The RNN was passed: cell={cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"The RNN cell should have a `state_size` attribute "
"(tuple of integers, one integer per RNN state). "
f"Received: cell={cell}"
)
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked
# timestep.
self.zero_output_for_mask = kwargs.pop("zero_output_for_mask", False)
if "input_shape" not in kwargs and (
"input_dim" in kwargs or "input_length" in kwargs
):
input_shape = (
kwargs.pop("input_length", None),
kwargs.pop("input_dim", None),
)
kwargs["input_shape"] = input_shape
super().__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs,
# and the input spec will be the list of specs for nested inputs, the
# structure of the input_spec will be the same as the input.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = 0
if stateful:
if tf.distribute.has_strategy():
raise ValueError(
"Stateful RNNs (created with `stateful=True`) "
"are not yet supported with tf.distribute.Strategy."
)
@property
def _use_input_spec_as_call_signature(self):
if self.unroll:
# When the RNN layer is unrolled, the time step shape cannot be
# unknown. The input spec does not define the time step (because
# this layer can be called with any time step value, as long as it
# is not None), so it cannot be used as the call function signature
# when saving to SavedModel.
return False
return super()._use_input_spec_as_call_signature
@property
def states(self):
if self._states is None:
state = tf.nest.map_structure(lambda _: None, self.cell.state_size)
return state if tf.nest.is_nested(self.cell.state_size) else [state]
return self._states
@states.setter
# Automatic tracking catches "self._states" which adds an extra weight and
# breaks HDF5 checkpoints.
@tf.__internal__.tracking.no_automatic_dependency_tracking
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from
# numpy inputs.
try:
input_shape = tf.TensorShape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = tf.nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if rnn_utils.is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tf.TensorShape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tf.TensorShape(
[time_step, batch] + output_dim
)
else:
output_shape = tf.TensorShape(
[batch, time_step] + output_dim
)
else:
output_shape = tf.TensorShape([batch] + output_dim)
return output_shape
if getattr(self.cell, "output_size", None) is not None:
# cell.output_size could be nested structure.
output_shape = tf.nest.flatten(
tf.nest.map_structure(_get_output_shape, self.cell.output_size)
)
output_shape = (
output_shape[0] if len(output_shape) == 1 else output_shape
)
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tf.TensorShape(flat_state).as_list()
return tf.TensorShape(state_shape)
state_shape = tf.nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + tf.nest.flatten(
state_shape
)
else:
return output_shape
def compute_mask(self, inputs, mask):
# Time step masks must be the same for each input.
# This is because the mask for an RNN is of size [batch, time_steps, 1],
# and specifies which time steps should be skipped, and a time step
# must be skipped for all inputs.
# TODO(scottzhu): Should we accept multiple different masks?
mask = tf.nest.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor,
# or a nested structure of tensors.
def get_input_spec(shape):
"""Convert input shape to InputSpec."""
if isinstance(shape, tf.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tf.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
def get_state_spec(shape):
state_spec_shape = tf.TensorShape(shape).as_list()
# append batch dim
state_spec_shape = [None] + state_spec_shape
return InputSpec(shape=tuple(state_spec_shape))
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from
# numpy inputs.
try:
input_shape = tf.TensorShape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
pass
if not tf.nest.is_nested(input_shape):
# This indicates the there is only one input.
if self.input_spec is not None:
self.input_spec[0] = get_input_spec(input_shape)
else:
self.input_spec = [get_input_spec(input_shape)]
step_input_shape = get_step_input_shape(input_shape)
else:
if self.input_spec is not None:
self.input_spec[0] = tf.nest.map_structure(
get_input_spec, input_shape
)
else:
self.input_spec = generic_utils.to_list(
tf.nest.map_structure(get_input_spec, input_shape)
)
step_input_shape = tf.nest.map_structure(
get_step_input_shape, input_shape
)
# allow cell (if layer) to build before we set or validate state_spec.
if isinstance(self.cell, base_layer.Layer) and not self.cell.built:
with backend.name_scope(self.cell.name):
self.cell.build(step_input_shape)
self.cell.built = True
# set or validate state_spec
if rnn_utils.is_multiple_state(self.cell.state_size):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
self._validate_state_spec(state_size, self.state_spec)
else:
if tf.nest.is_nested(state_size):
self.state_spec = tf.nest.map_structure(
get_state_spec, state_size
)
else:
self.state_spec = [
InputSpec(shape=[None] + tf.TensorShape(dim).as_list())
for dim in state_size
]
# ensure the generated state_spec is correct.
self._validate_state_spec(state_size, self.state_spec)
if self.stateful:
self.reset_states()
super().build(input_shape)
@staticmethod
def _validate_state_spec(cell_state_sizes, init_state_specs):
"""Validate the state spec between the initial_state and the state_size.
Args:
cell_state_sizes: list, the `state_size` attribute from the cell.
init_state_specs: list, the `state_spec` from the initial_state that
is passed in `call()`.
Raises:
ValueError: When initial state spec is not compatible with the state
size.
"""
validation_error = ValueError(
"An `initial_state` was passed that is not compatible with "
"`cell.state_size`. Received `state_spec`={}; "
"however `cell.state_size` is "
"{}".format(init_state_specs, cell_state_sizes)
)
flat_cell_state_sizes = tf.nest.flatten(cell_state_sizes)
flat_state_specs = tf.nest.flatten(init_state_specs)
if len(flat_cell_state_sizes) != len(flat_state_specs):
raise validation_error
for cell_state_spec, cell_state_size in zip(
flat_state_specs, flat_cell_state_sizes
):
if not tf.TensorShape(
# Ignore the first axis for init_state which is for batch
cell_state_spec.shape[1:]
).is_compatible_with(tf.TensorShape(cell_state_size)):
raise validation_error
@doc_controls.do_not_doc_inheritable
def get_initial_state(self, inputs):
get_initial_state_fn = getattr(self.cell, "get_initial_state", None)
if tf.nest.is_nested(inputs):
# The input are nested sequences. Use the first element in the seq
# to get batch size and dtype.
inputs = tf.nest.flatten(inputs)[0]
input_shape = tf.shape(inputs)
batch_size = input_shape[1] if self.time_major else input_shape[0]
dtype = inputs.dtype
if get_initial_state_fn:
init_state = get_initial_state_fn(
inputs=None, batch_size=batch_size, dtype=dtype
)
else:
init_state = rnn_utils.generate_zero_filled_state(
batch_size, self.cell.state_size, dtype
)
# TF-Keras RNN expect the states in a list, even if it's a single state
# tensor.
if not tf.nest.is_nested(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg
# LSTMStateTuple.
return list(init_state)
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = rnn_utils.standardize_args(
inputs, initial_state, constants, self._num_constants
)
if initial_state is None and constants is None:
return super().__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
additional_inputs += initial_state
self.state_spec = tf.nest.map_structure(
lambda s: InputSpec(shape=backend.int_shape(s)), initial_state
)
additional_specs += self.state_spec
if constants is not None:
additional_inputs += constants
self.constants_spec = [
InputSpec(shape=backend.int_shape(constant))
for constant in constants
]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# additional_inputs can be empty if initial_state or constants are
# provided but empty (e.g. the cell is stateless).
flat_additional_inputs = tf.nest.flatten(additional_inputs)
is_keras_tensor = (
backend.is_keras_tensor(flat_additional_inputs[0])
if flat_additional_inputs
else True
)
for tensor in flat_additional_inputs:
if backend.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError(
"The initial state or constants of an RNN layer cannot be "
"specified via a mix of TF-Keras tensors and non-Keras "
'tensors (a "Keras tensor" is a tensor that was returned '
"by a TF-Keras layer or by `Input` during Functional "
"model construction). Received: "
f"initial_state={initial_state}, constants={constants}"
)
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
if self.built:
# Keep the input_spec since it has been populated in build()
# method.
full_input_spec = self.input_spec + additional_specs
else:
# The original input_spec is None since there could be a nested
# tensor input. Update the input_spec to match the inputs.
full_input_spec = (
generic_utils.to_list(
tf.nest.map_structure(lambda _: None, inputs)
)
+ additional_specs
)
# Perform the call with temporarily replaced input_spec
self.input_spec = full_input_spec
output = super().__call__(full_input, **kwargs)
# Remove the additional_specs from input spec and keep the rest. It
# is important to keep since the input spec was populated by
# build(), and will be reused in the stateful=True.
self.input_spec = self.input_spec[: -len(additional_specs)]
return output
else:
if initial_state is not None:
kwargs["initial_state"] = initial_state
if constants is not None:
kwargs["constants"] = constants
return super().__call__(inputs, **kwargs)
def call(
self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None,
):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = backend.convert_inputs_if_ragged(inputs)
is_ragged_input = row_lengths is not None
self._validate_args_if_ragged(is_ragged_input, mask)
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants
)
self._maybe_reset_cell_dropout_mask(self.cell)
if isinstance(self.cell, StackedRNNCells):
for cell in self.cell.cells:
self._maybe_reset_cell_dropout_mask(cell)
if mask is not None:
# Time step masks must be the same for each input.
# TODO(scottzhu): Should we accept multiple different masks?
mask = tf.nest.flatten(mask)[0]
if tf.nest.is_nested(inputs):
# In the case of nested input, use the first element for shape
# check.
input_shape = backend.int_shape(tf.nest.flatten(inputs)[0])
else:
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if self.unroll and timesteps is None:
raise ValueError(
"Cannot unroll a RNN if the "
"time dimension is undefined. \n"
"- If using a Sequential model, "
"specify the time dimension by passing "
"an `input_shape` or `batch_input_shape` "
"argument to your first layer. If your "
"first layer is an Embedding, you can "
"also use the `input_length` argument.\n"
"- If using the functional API, specify "
"the time dimension by passing a `shape` "
"or `batch_shape` argument to your Input layer."
)
kwargs = {}
if generic_utils.has_arg(self.cell.call, "training"):
kwargs["training"] = training
# TF RNN cells expect single tensor as state instead of list wrapped
# tensor.
is_tf_rnn_cell = getattr(self.cell, "_is_tf_rnn_cell", None) is not None
# Use the __call__ function for callable objects, eg layers, so that it
# will have the proper name scopes for the ops, etc.
cell_call_fn = (
self.cell.__call__ if callable(self.cell) else self.cell.call
)
if constants:
if not generic_utils.has_arg(self.cell.call, "constants"):
raise ValueError(
f"RNN cell {self.cell} does not support constants. "
f"Received: constants={constants}"
)
def step(inputs, states):
constants = states[-self._num_constants :]
states = states[: -self._num_constants]
states = (
states[0] if len(states) == 1 and is_tf_rnn_cell else states
)
output, new_states = cell_call_fn(
inputs, states, constants=constants, **kwargs
)
if not tf.nest.is_nested(new_states):
new_states = [new_states]
return output, new_states
else:
def step(inputs, states):
states = (
states[0] if len(states) == 1 and is_tf_rnn_cell else states
)
output, new_states = cell_call_fn(inputs, states, **kwargs)
if not tf.nest.is_nested(new_states):
new_states = [new_states]
return output, new_states
last_output, outputs, states = backend.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths if row_lengths is not None else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask,
return_all_outputs=self.return_sequences,
)
if self.stateful:
updates = [
tf.compat.v1.assign(
self_state, tf.cast(state, self_state.dtype)
)
for self_state, state in zip(
tf.nest.flatten(self.states), tf.nest.flatten(states)
)
]
self.add_update(updates)
if self.return_sequences:
output = backend.maybe_convert_to_ragged(
is_ragged_input,
outputs,
row_lengths,
go_backwards=self.go_backwards,
)
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return generic_utils.to_list(output) + states
else:
return output
def _process_inputs(self, inputs, initial_state, constants):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, collections.abc.Sequence) and not isinstance(
inputs, tuple
):
# get initial_state from full input spec
# as they could be copied to multiple GPU.
if not self._num_constants:
initial_state = inputs[1:]
else:
initial_state = inputs[1 : -self._num_constants]
constants = inputs[-self._num_constants :]
if len(initial_state) == 0:
initial_state = None
inputs = inputs[0]
if self.stateful:
if initial_state is not None:
# When layer is stateful and initial_state is provided, check if
# the recorded state is same as the default value (zeros). Use
# the recorded state if it is not same as the default.
non_zero_count = tf.add_n(
[
tf.math.count_nonzero(s)
for s in tf.nest.flatten(self.states)
]
)
# Set strict = True to keep the original structure of the state.
initial_state = tf.compat.v1.cond(
non_zero_count > 0,
true_fn=lambda: self.states,
false_fn=lambda: initial_state,
strict=True,
)
else:
initial_state = self.states
initial_state = tf.nest.map_structure(
# When the layer has a inferred dtype, use the dtype from the
# cell.
lambda v: tf.cast(
v, self.compute_dtype or self.cell.compute_dtype
),
initial_state,
)
elif initial_state is None:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError(
f"Layer has {len(self.states)} "
f"states but was passed {len(initial_state)} initial "
f"states. Received: initial_state={initial_state}"
)
return inputs, initial_state, constants
def _validate_args_if_ragged(self, is_ragged_input, mask):
if not is_ragged_input:
return
if mask is not None:
raise ValueError(
f"The mask that was passed in was {mask}, which "
"cannot be applied to RaggedTensor inputs. Please "
"make sure that there is no mask injected by upstream "
"layers."
)
if self.unroll:
raise ValueError(
"The input received contains RaggedTensors and does "
"not support unrolling. Disable unrolling by passing "
"`unroll=False` in the RNN Layer constructor."
)
def _maybe_reset_cell_dropout_mask(self, cell):
if isinstance(cell, DropoutRNNCellMixin):
cell.reset_dropout_mask()
cell.reset_recurrent_dropout_mask()
def reset_states(self, states=None):
"""Reset the recorded states for the stateful RNN layer.
Can only be used when RNN layer is constructed with `stateful` = `True`.
Args:
states: Numpy arrays that contains the value for the initial state,
which will be feed to cell at the first time step. When the value is
None, zero filled numpy array will be created based on the cell
state size.
Raises:
AttributeError: When the RNN layer is not stateful.
ValueError: When the batch size of the RNN layer is unknown.
ValueError: When the input numpy array is not compatible with the RNN
layer state, either size wise or dtype wise.
"""
if not self.stateful:
raise AttributeError("Layer must be stateful.")
spec_shape = None
if self.input_spec is not None:
spec_shape = tf.nest.flatten(self.input_spec[0])[0].shape
if spec_shape is None:
# It is possible to have spec shape to be None, eg when construct a
# RNN with a custom cell, or standard RNN layers (LSTM/GRU) which we
# only know it has 3 dim input, but not its full shape spec before
# build().
batch_size = None
else:
batch_size = spec_shape[1] if self.time_major else spec_shape[0]
if not batch_size:
raise ValueError(
"If a RNN is stateful, it needs to know "
"its batch size. Specify the batch size "
"of your input tensors: \n"
"- If using a Sequential model, "
"specify the batch size by passing "
"a `batch_input_shape` "
"argument to your first layer.\n"
"- If using the functional API, specify "
"the batch size by passing a "
"`batch_shape` argument to your Input layer."
)
# initialize state if None
if tf.nest.flatten(self.states)[0] is None:
if getattr(self.cell, "get_initial_state", None):
flat_init_state_values = tf.nest.flatten(
self.cell.get_initial_state(
inputs=None,
batch_size=batch_size,
# Use variable_dtype instead of compute_dtype, since the
# state is stored in a variable
dtype=self.variable_dtype or backend.floatx(),
)
)
else:
flat_init_state_values = tf.nest.flatten(
rnn_utils.generate_zero_filled_state(
batch_size,
self.cell.state_size,
self.variable_dtype or backend.floatx(),
)
)
flat_states_variables = tf.nest.map_structure(
lambda v: backend.variable(v, v.dtype), flat_init_state_values
)
self.states = tf.nest.pack_sequence_as(
self.cell.state_size, flat_states_variables
)
if not tf.nest.is_nested(self.states):
self.states = [self.states]
elif states is None:
for state, size in zip(
tf.nest.flatten(self.states),
tf.nest.flatten(self.cell.state_size),
):
backend.set_value(
state,
np.zeros([batch_size] + tf.TensorShape(size).as_list()),
)
else:
flat_states = tf.nest.flatten(self.states)
flat_input_states = tf.nest.flatten(states)
if len(flat_input_states) != len(flat_states):
raise ValueError(
f"Layer {self.name} expects {len(flat_states)} "
f"states, but it received {len(flat_input_states)} "
f"state values. States received: {states}"
)
set_value_tuples = []
for i, (value, state) in enumerate(
zip(flat_input_states, flat_states)
):
if value.shape != state.shape:
raise ValueError(
f"State {i} is incompatible with layer {self.name}: "
f"expected shape={(batch_size, state)} "
f"but found shape={value.shape}"
)
set_value_tuples.append((state, value))
backend.batch_set_value(set_value_tuples)
def get_config(self):
config = {
"return_sequences": self.return_sequences,
"return_state": self.return_state,
"go_backwards": self.go_backwards,
"stateful": self.stateful,
"unroll": self.unroll,
"time_major": self.time_major,
}
if self._num_constants:
config["num_constants"] = self._num_constants
if self.zero_output_for_mask:
config["zero_output_for_mask"] = self.zero_output_for_mask
config["cell"] = serialization_lib.serialize_keras_object(self.cell)
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tf_keras.layers import deserialize as deserialize_layer
cell = deserialize_layer(
config.pop("cell"), custom_objects=custom_objects
)
num_constants = config.pop("num_constants", 0)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def _trackable_saved_model_saver(self):
return layer_serialization.RNNSavedModelSaver(self)
| tf-keras/tf_keras/layers/rnn/base_rnn.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/base_rnn.py",
"repo_id": "tf-keras",
"token_count": 19125
} | 207 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gated Recurrent Unit layer."""
import uuid
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import backend
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine import base_layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.rnn import gru_lstm_utils
from tf_keras.layers.rnn import rnn_utils
from tf_keras.layers.rnn.base_rnn import RNN
from tf_keras.layers.rnn.dropout_rnn_cell_mixin import DropoutRNNCellMixin
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
RECURRENT_DROPOUT_WARNING_MSG = (
"RNN `implementation=2` is not supported when `recurrent_dropout` is set. "
"Using `implementation=1`."
)
@keras_export("keras.layers.GRUCell", v1=[])
class GRUCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
"""Cell class for the GRU layer.
See
[the TF-Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.GRU` processes the whole sequence.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4))
>>> output = rnn(inputs)
>>> print(output.shape)
(32, 4)
>>> rnn = tf.keras.layers.RNN(
... tf.keras.layers.GRUCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and cuDNN compatible).
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: A 2D tensor with shape of `[batch, units]`, which is the state
from the previous time step. For timestep 0, the initial state provided
by user will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
reset_after=True,
**kwargs,
):
if units <= 0:
raise ValueError(
"Received an invalid value for argument `units`, "
f"expected a positive integer, got {units}."
)
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop(
"enable_caching_device", True
)
else:
self._enable_caching_device = kwargs.pop(
"enable_caching_device", False
)
super().__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
implementation = kwargs.pop("implementation", 2)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
super().build(input_shape)
input_dim = input_shape[-1]
default_caching_device = rnn_utils.caching_device(self)
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device,
)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU
# biases `(2 * 3 * self.units,)`, so that we can distinguish the
# classes when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(
shape=bias_shape,
name="bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device,
)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = (
states[0] if tf.nest.is_nested(states) else states
) # previous memory
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=3
)
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = tf.unstack(self.bias)
if self.implementation == 1:
if 0.0 < self.dropout < 1.0:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = backend.dot(inputs_z, self.kernel[:, : self.units])
x_r = backend.dot(
inputs_r, self.kernel[:, self.units : self.units * 2]
)
x_h = backend.dot(inputs_h, self.kernel[:, self.units * 2 :])
if self.use_bias:
x_z = backend.bias_add(x_z, input_bias[: self.units])
x_r = backend.bias_add(
x_r, input_bias[self.units : self.units * 2]
)
x_h = backend.bias_add(x_h, input_bias[self.units * 2 :])
if 0.0 < self.recurrent_dropout < 1.0:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = backend.dot(
h_tm1_z, self.recurrent_kernel[:, : self.units]
)
recurrent_r = backend.dot(
h_tm1_r, self.recurrent_kernel[:, self.units : self.units * 2]
)
if self.reset_after and self.use_bias:
recurrent_z = backend.bias_add(
recurrent_z, recurrent_bias[: self.units]
)
recurrent_r = backend.bias_add(
recurrent_r, recurrent_bias[self.units : self.units * 2]
)
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = backend.dot(
h_tm1_h, self.recurrent_kernel[:, self.units * 2 :]
)
if self.use_bias:
recurrent_h = backend.bias_add(
recurrent_h, recurrent_bias[self.units * 2 :]
)
recurrent_h = r * recurrent_h
else:
recurrent_h = backend.dot(
r * h_tm1_h, self.recurrent_kernel[:, self.units * 2 :]
)
hh = self.activation(x_h + recurrent_h)
else:
if 0.0 < self.dropout < 1.0:
inputs = inputs * dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = backend.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = backend.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = tf.split(matrix_x, 3, axis=-1)
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = backend.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = backend.bias_add(
matrix_inner, recurrent_bias
)
else:
# hidden state projected separately for update/reset and new
matrix_inner = backend.dot(
h_tm1, self.recurrent_kernel[:, : 2 * self.units]
)
recurrent_z, recurrent_r, recurrent_h = tf.split(
matrix_inner, [self.units, self.units, -1], axis=-1
)
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * recurrent_h
else:
recurrent_h = backend.dot(
r * h_tm1, self.recurrent_kernel[:, 2 * self.units :]
)
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
new_state = [h] if tf.nest.is_nested(states) else h
return h, new_state
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"implementation": self.implementation,
"reset_after": self.reset_after,
}
config.update(rnn_utils.config_for_enable_caching_device(self))
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return rnn_utils.generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype
)
@keras_export("keras.layers.GRU", v1=[])
class GRU(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer):
"""Gated Recurrent Unit - Cho et al. 2014.
See
[the TF-Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or pure-TensorFlow)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the cuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. `reset_after` is `True`
7. Inputs, if use masking, are strictly right-padded.
8. Eager execution is enabled in the outermost context.
There are two variants of the GRU implementation. The default one is based
on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to
hidden state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `reset_after=True` and
`recurrent_activation='sigmoid'`.
For example:
>>> inputs = tf.random.normal([32, 10, 8])
>>> gru = tf.keras.layers.GRU(4)
>>> output = gru(inputs)
>>> print(output.shape)
(32, 4)
>>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True)
>>> whole_sequence_output, final_state = gru(inputs)
>>> print(whole_sequence_output.shape)
(32, 10, 4)
>>> print(final_state.shape)
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition to the
output. Default: `False`.
go_backwards: Boolean (default `False`).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`[timesteps, batch, feature]`, whereas in the False case, it will be
`[batch, timesteps, feature]`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and cuDNN compatible).
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[samples, timesteps]` indicating whether
a given timestep should be masked (optional).
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the
corresponding timestep should be ignored. Defaults to `None`.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional). Defaults to `None`.
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, `None` causes creation
of zero-filled initial state tensors). Defaults to `None`.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
reset_after=True,
**kwargs,
):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self._return_runtime = kwargs.pop("return_runtime", False)
implementation = kwargs.pop("implementation", 2)
if implementation == 0:
logging.warning(
"`implementation=0` has been deprecated, "
"and now defaults to `implementation=2`."
"Please update your layer call."
)
if "enable_caching_device" in kwargs:
cell_kwargs = {
"enable_caching_device": kwargs.pop("enable_caching_device")
}
else:
cell_kwargs = {}
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
dtype=kwargs.get("dtype"),
trainable=kwargs.get("trainable", True),
name="gru_cell",
**cell_kwargs,
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
time_major=time_major,
**kwargs,
)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
# GPU kernel uses following setting by default and not configurable.
self._could_use_gpu_kernel = (
self.activation in (activations.tanh, tf.tanh)
and self.recurrent_activation in (activations.sigmoid, tf.sigmoid)
and recurrent_dropout == 0
and not unroll
and use_bias
and reset_after
and tf.compat.v1.executing_eagerly_outside_functions()
)
if tf.config.list_logical_devices("GPU"):
# Only show the message when there is GPU available, user will not
# care about the cuDNN if there isn't any GPU.
if self._could_use_gpu_kernel:
logging.debug(gru_lstm_utils.CUDNN_AVAILABLE_MSG % self.name)
else:
logging.warning(
gru_lstm_utils.CUDNN_NOT_AVAILABLE_MSG % self.name
)
if gru_lstm_utils.use_new_gru_lstm_impl():
self._defun_wrapper = gru_lstm_utils.DefunWrapper(
time_major, go_backwards, "gru"
)
def call(self, inputs, mask=None, training=None, initial_state=None):
# The input should be dense, padded with zeros. If a ragged input is fed
# into the layer, it is padded and the row lengths are used for masking.
inputs, row_lengths = backend.convert_inputs_if_ragged(inputs)
is_ragged_input = row_lengths is not None
self._validate_args_if_ragged(is_ragged_input, mask)
# GRU does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(
inputs, initial_state, None
)
if isinstance(mask, list):
mask = mask[0]
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self._could_use_gpu_kernel:
kwargs = {"training": training}
self._maybe_reset_cell_dropout_mask(self.cell)
def step(cell_inputs, cell_states):
return self.cell(cell_inputs, cell_states, **kwargs)
last_output, outputs, states = backend.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=row_lengths
if row_lengths is not None
else timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask,
return_all_outputs=self.return_sequences,
)
# This is a dummy tensor for testing purpose.
runtime = gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_UNKNOWN)
else:
last_output, outputs, runtime, states = self._defun_gru_call(
inputs, initial_state, training, mask, row_lengths
)
if self.stateful:
updates = [
tf.compat.v1.assign(
self.states[0], tf.cast(states[0], self.states[0].dtype)
)
]
self.add_update(updates)
if self.return_sequences:
output = backend.maybe_convert_to_ragged(
is_ragged_input,
outputs,
row_lengths,
go_backwards=self.go_backwards,
)
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self._return_runtime:
return output, runtime
else:
return output
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"implementation": self.implementation,
"reset_after": self.reset_after,
}
config.update(rnn_utils.config_for_enable_caching_device(self.cell))
base_config = super().get_config()
del base_config["cell"]
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if "implementation" in config and config["implementation"] == 0:
config["implementation"] = 1
return cls(**config)
def _defun_gru_call(
self, inputs, initial_state, training, mask, sequence_lengths
):
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and
# dtypes.
self.reset_dropout_mask()
dropout_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
if dropout_mask is not None:
inputs = inputs * dropout_mask[0]
if gru_lstm_utils.use_new_gru_lstm_impl():
gru_kwargs = {
"inputs": inputs,
"init_h": gru_lstm_utils.read_variable_value(initial_state[0]),
"kernel": gru_lstm_utils.read_variable_value(self.cell.kernel),
"recurrent_kernel": gru_lstm_utils.read_variable_value(
self.cell.recurrent_kernel
),
"bias": gru_lstm_utils.read_variable_value(self.cell.bias),
"mask": mask,
"time_major": self.time_major,
"go_backwards": self.go_backwards,
"sequence_lengths": sequence_lengths,
"zero_output_for_mask": self.zero_output_for_mask,
}
(
last_output,
outputs,
new_h,
runtime,
) = self._defun_wrapper.defun_layer(**gru_kwargs)
else:
gpu_gru_kwargs = {
"inputs": inputs,
"init_h": gru_lstm_utils.read_variable_value(initial_state[0]),
"kernel": gru_lstm_utils.read_variable_value(self.cell.kernel),
"recurrent_kernel": gru_lstm_utils.read_variable_value(
self.cell.recurrent_kernel
),
"bias": gru_lstm_utils.read_variable_value(self.cell.bias),
"mask": mask,
"time_major": self.time_major,
"go_backwards": self.go_backwards,
"sequence_lengths": sequence_lengths,
"return_sequences": self.return_sequences,
}
normal_gru_kwargs = gpu_gru_kwargs.copy()
normal_gru_kwargs.update(
{
"zero_output_for_mask": self.zero_output_for_mask,
}
)
if tf.executing_eagerly():
device_type = gru_lstm_utils.get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is
# available.
(
device_type == gru_lstm_utils.GPU_DEVICE_NAME
or (
device_type is None
and tf.config.list_logical_devices("GPU")
)
)
and (
gru_lstm_utils.is_cudnn_supported_inputs(
mask, self.time_major, sequence_lengths
)
)
)
# Under eager context, check the device placement and prefer the
if can_use_gpu:
last_output, outputs, new_h, runtime = gpu_gru(
**gpu_gru_kwargs
)
else:
last_output, outputs, new_h, runtime = standard_gru(
**normal_gru_kwargs
)
else:
(
last_output,
outputs,
new_h,
runtime,
) = gru_with_backend_selection(**normal_gru_kwargs)
states = [new_h]
return last_output, outputs, runtime, states
def standard_gru(
inputs,
init_h,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
sequence_lengths,
zero_output_for_mask,
return_sequences,
):
"""GRU with standard kernel implementation.
This implementation can be run on all types of hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the cuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since cuDNN implementation does not support that.
Args:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. The bias contains
the combined input_bias and recurrent_bias.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable
length input, such as ragged tensors. If the input has a fixed timestep
size, this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
return_sequences: Boolean. If True, return the recurrent outputs for all
timesteps in the sequence. If False, only return the output for the
last timestep (which consumes less memory).
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs:
- If `return_sequences=True`: output tensor for all timesteps,
which has shape [batch, time, units].
- Else, a tensor equal to `last_output` with shape [batch, 1, units]
state_0: the cell output, which has same shape as init_h.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = backend.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
input_bias, recurrent_bias = tf.unstack(bias)
def step(cell_inputs, cell_states):
"""Step function that will be used by TF-Keras RNN backend."""
h_tm1 = cell_states[0]
# inputs projected by all gate matrices at once
matrix_x = backend.dot(cell_inputs, kernel)
matrix_x = backend.bias_add(matrix_x, input_bias)
x_z, x_r, x_h = tf.split(matrix_x, 3, axis=1)
# hidden state projected by all gate matrices at once
matrix_inner = backend.dot(h_tm1, recurrent_kernel)
matrix_inner = backend.bias_add(matrix_inner, recurrent_bias)
recurrent_z, recurrent_r, recurrent_h = tf.split(
matrix_inner, 3, axis=1
)
z = tf.sigmoid(x_z + recurrent_z)
r = tf.sigmoid(x_r + recurrent_r)
hh = tf.tanh(x_h + r * recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
last_output, outputs, new_states = backend.rnn(
step,
inputs,
[init_h],
constants=None,
unroll=False,
time_major=time_major,
mask=mask,
go_backwards=go_backwards,
input_length=sequence_lengths
if sequence_lengths is not None
else timesteps,
zero_output_for_mask=zero_output_for_mask,
return_all_outputs=return_sequences,
)
return (
last_output,
outputs,
new_states[0],
gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_CPU),
)
def gpu_gru(
inputs,
init_h,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
sequence_lengths,
return_sequences,
):
"""GRU with cuDNN implementation which is only available for GPU."""
if mask is not None:
sequence_lengths = gru_lstm_utils.calculate_sequence_by_mask(
mask, time_major
)
if not time_major and sequence_lengths is None:
inputs = tf.transpose(inputs, perm=(1, 0, 2))
seq_axis, batch_axis = (0, 1)
else:
seq_axis, batch_axis = (0, 1) if time_major else (1, 0)
# For init_h, cuDNN expects one more dim of num_layers before or after batch
# dim for time major or batch major inputs respectively
init_h = tf.expand_dims(init_h, axis=seq_axis)
weights = tf.split(kernel, 3, axis=1)
weights += tf.split(recurrent_kernel, 3, axis=1)
# Note that the bias was initialized as shape (2, 3 * units), flat it into
# (6 * units)
bias = tf.split(backend.flatten(bias), 6)
if tf.sysconfig.get_build_info()["is_cuda_build"]:
# Note that the gate order for cuDNN is different from the canonical
# format. canonical format is [z, r, h], whereas cuDNN is [r, z, h].
# The swap need to be done for kernel, recurrent_kernel, input_bias,
# recurrent_bias.
# z is update gate weights.
# r is reset gate weights.
# h is output gate weights.
weights[0], weights[1] = weights[1], weights[0]
weights[3], weights[4] = weights[4], weights[3]
bias[0], bias[1] = bias[1], bias[0]
bias[3], bias[4] = bias[4], bias[3]
params = gru_lstm_utils.canonical_to_params(
weights=weights,
biases=bias,
shape=tf.constant([-1]),
transpose_weights=True,
)
if sequence_lengths is not None:
if go_backwards:
# Three reversals are required. E.g.,
# normal input = [1, 2, 3, 0, 0] # where 0 need to be masked
# reversed_input_to_cudnn = [3, 2, 1, 0, 0]
# output_from_cudnn = [6, 5, 4, 0, 0]
# expected_output = [0, 0, 6, 5 ,4]
inputs = tf.reverse_sequence(
inputs,
sequence_lengths,
seq_axis=seq_axis,
batch_axis=batch_axis,
)
outputs, h, _, _, _ = tf.raw_ops.CudnnRNNV3(
input=inputs,
input_h=init_h,
input_c=0,
params=params,
is_training=True,
rnn_mode="gru",
sequence_lengths=sequence_lengths,
time_major=time_major,
)
if go_backwards:
outputs = tf.reverse_sequence(
outputs,
sequence_lengths,
seq_axis=seq_axis,
batch_axis=batch_axis,
)
outputs = tf.reverse(outputs, axis=[seq_axis])
else:
if go_backwards:
# Reverse axis 0 since the input is already convert to time major.
inputs = tf.reverse(inputs, axis=[0])
outputs, h, _, _ = tf.raw_ops.CudnnRNN(
input=inputs,
input_h=init_h,
input_c=0,
params=params,
is_training=True,
rnn_mode="gru",
)
last_output = outputs[-1]
if not time_major and sequence_lengths is None and return_sequences:
outputs = tf.transpose(outputs, perm=[1, 0, 2])
h = tf.squeeze(h, axis=seq_axis)
# In the case of variable length input, the cudnn kernel will fill zeros for
# the output, whereas the default keras behavior is to bring over the
# previous output for t-1, so that in the return_sequence=False case, user
# can quickly get the final effect output instead just 0s at the last
# timestep. In order to mimic the default keras behavior, we copy the final
# h state as the last_output, since it is numerically same as the output.
if sequence_lengths is not None:
last_output = h
# Match CPU return format
if not return_sequences:
outputs = tf.expand_dims(last_output, axis=0 if time_major else 1)
return (
last_output,
outputs,
h,
gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_GPU),
)
def gru_with_backend_selection(
inputs,
init_h,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
sequence_lengths,
zero_output_for_mask,
return_sequences,
):
"""Call the GRU with optimized backend kernel selection.
Under the hood, this function will create two TF function, one with the most
generic kernel and can run on all device condition, and the second one with
cuDNN specific kernel, which can only run on GPU.
The first function will be called with normal_lstm_params, while the second
function is not called, but only registered in the graph. The Grappler will
do the proper graph rewrite and swap the optimized TF function based on the
device placement.
Args:
inputs: Input tensor of GRU layer.
init_h: Initial state tensor for the cell output.
kernel: Weights for cell kernel.
recurrent_kernel: Weights for cell recurrent kernel.
bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
mask: Boolean tensor for mask out the steps within sequence.
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the
corresponding timestep should be ignored.
time_major: Boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
sequence_lengths: The lengths of all sequences coming from a variable
length input, such as ragged tensors. If the input has a fixed timestep
size, this should be None.
zero_output_for_mask: Boolean, whether to output zero for masked timestep.
return_sequences: Boolean. If True, return the recurrent outputs for all
timesteps in the sequence. If False, only return the output for the
last timestep (which consumes less memory).
Returns:
List of output tensors, same as standard_gru.
"""
params = {
"inputs": inputs,
"init_h": init_h,
"kernel": kernel,
"recurrent_kernel": recurrent_kernel,
"bias": bias,
"mask": mask,
"time_major": time_major,
"go_backwards": go_backwards,
"sequence_lengths": sequence_lengths,
"zero_output_for_mask": zero_output_for_mask,
"return_sequences": return_sequences,
}
def gpu_gru_with_fallback(
inputs,
init_h,
kernel,
recurrent_kernel,
bias,
mask,
time_major,
go_backwards,
sequence_lengths,
zero_output_for_mask,
return_sequences,
):
"""Use cuDNN kernel when mask is none or strictly right padded."""
def cudnn_gru_fn():
return gpu_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
return_sequences=return_sequences,
)
def standard_gru_fn():
return standard_gru(
inputs=inputs,
init_h=init_h,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
mask=mask,
time_major=time_major,
go_backwards=go_backwards,
sequence_lengths=sequence_lengths,
zero_output_for_mask=zero_output_for_mask,
return_sequences=return_sequences,
)
return tf.__internal__.smart_cond.smart_cond(
gru_lstm_utils.is_cudnn_supported_inputs(
mask, time_major, sequence_lengths
),
true_fn=cudnn_gru_fn,
false_fn=standard_gru_fn,
)
if gru_lstm_utils.use_new_gru_lstm_impl():
# Chooses the implementation dynamically based on the running device.
(
last_output,
outputs,
new_h,
runtime,
) = tf.__internal__.execute_fn_for_device(
{
gru_lstm_utils.CPU_DEVICE_NAME: lambda: standard_gru(**params),
gru_lstm_utils.GPU_DEVICE_NAME: lambda: gpu_gru_with_fallback(
**params
),
},
lambda: standard_gru(**params),
)
else:
# Each time a `tf.function` is called, we will give it a unique
# identifiable API name, so that Grappler won't get confused when it
# sees multiple GRU layers added into same graph, and it will be able
# to pair up the different implementations across them.
api_name = "gru_" + str(uuid.uuid4())
supportive_attribute = {
"time_major": time_major,
"go_backwards": go_backwards,
}
defun_standard_gru = gru_lstm_utils.generate_defun_backend(
api_name,
gru_lstm_utils.CPU_DEVICE_NAME,
standard_gru,
supportive_attribute,
)
defun_gpu_gru = gru_lstm_utils.generate_defun_backend(
api_name,
gru_lstm_utils.GPU_DEVICE_NAME,
gpu_gru_with_fallback,
supportive_attribute,
)
# Call the normal GRU impl and register the cuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, runtime = defun_standard_gru(**params)
gru_lstm_utils.function_register(defun_gpu_gru, **params)
return last_output, outputs, new_h, runtime
| tf-keras/tf_keras/layers/rnn/gru.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/gru.py",
"repo_id": "tf-keras",
"token_count": 22906
} | 208 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper allowing a stack of RNN cells to behave as a single cell."""
import functools
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.layers.rnn import rnn_utils
from tf_keras.saving import serialization_lib
from tf_keras.utils import generic_utils
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.StackedRNNCells")
class StackedRNNCells(base_layer.Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Args:
cells: List of RNN cell instances.
Examples:
```python
batch_size = 3
sentence_max_length = 5
n_features = 2
new_shape = (batch_size, sentence_max_length, n_features)
x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32)
rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
lstm_layer = tf.keras.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if "call" not in dir(cell):
raise ValueError(
"All cells must have a `call` method. "
f"Received cell without a `call` method: {cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"All cells must have a `state_size` attribute. "
f"Received cell without a `state_size`: {cell}"
)
self.cells = cells
# reverse_state_order determines whether the state size will be in a
# reverse order of the cells' state. User might want to set this to True
# to keep the existing behavior. This is only useful when use
# RNN(return_state=True) since the state will be returned as the same
# order of state_size.
self.reverse_state_order = kwargs.pop("reverse_state_order", False)
if self.reverse_state_order:
logging.warning(
"reverse_state_order=True in StackedRNNCells will soon "
"be deprecated. Please update the code to work with the "
"natural order of states if you rely on the RNN states, "
"eg RNN(return_state=True)."
)
super().__init__(**kwargs)
@property
def state_size(self):
return tuple(
c.state_size
for c in (
self.cells[::-1] if self.reverse_state_order else self.cells
)
)
@property
def output_size(self):
if getattr(self.cells[-1], "output_size", None) is not None:
return self.cells[-1].output_size
elif rnn_utils.is_multiple_state(self.cells[-1].state_size):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
initial_states = []
for cell in (
self.cells[::-1] if self.reverse_state_order else self.cells
):
get_initial_state_fn = getattr(cell, "get_initial_state", None)
if get_initial_state_fn:
initial_states.append(
get_initial_state_fn(
inputs=inputs, batch_size=batch_size, dtype=dtype
)
)
else:
initial_states.append(
rnn_utils.generate_zero_filled_state_for_cell(
cell, inputs, batch_size, dtype
)
)
return tuple(initial_states)
def call(self, inputs, states, constants=None, training=None, **kwargs):
# Recover per-cell states.
state_size = (
self.state_size[::-1]
if self.reverse_state_order
else self.state_size
)
nested_states = tf.nest.pack_sequence_as(
state_size, tf.nest.flatten(states)
)
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
states = states if tf.nest.is_nested(states) else [states]
# TF cell does not wrap the state into list when there is only one
# state.
is_tf_rnn_cell = getattr(cell, "_is_tf_rnn_cell", None) is not None
states = (
states[0] if len(states) == 1 and is_tf_rnn_cell else states
)
if generic_utils.has_arg(cell.call, "training"):
kwargs["training"] = training
else:
kwargs.pop("training", None)
# Use the __call__ function for callable objects, eg layers, so that
# it will have the proper name scopes for the ops, etc.
cell_call_fn = cell.__call__ if callable(cell) else cell.call
if generic_utils.has_arg(cell.call, "constants"):
inputs, states = cell_call_fn(
inputs, states, constants=constants, **kwargs
)
else:
inputs, states = cell_call_fn(inputs, states, **kwargs)
new_nested_states.append(states)
return inputs, tf.nest.pack_sequence_as(
state_size, tf.nest.flatten(new_nested_states)
)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
def get_batch_input_shape(batch_size, dim):
shape = tf.TensorShape(dim).as_list()
return tuple([batch_size] + shape)
for cell in self.cells:
if isinstance(cell, base_layer.Layer) and not cell.built:
with backend.name_scope(cell.name):
cell.build(input_shape)
cell.built = True
if getattr(cell, "output_size", None) is not None:
output_dim = cell.output_size
elif rnn_utils.is_multiple_state(cell.state_size):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
batch_size = tf.nest.flatten(input_shape)[0]
if tf.nest.is_nested(output_dim):
input_shape = tf.nest.map_structure(
functools.partial(get_batch_input_shape, batch_size),
output_dim,
)
input_shape = tuple(input_shape)
else:
input_shape = tuple(
[batch_size] + tf.TensorShape(output_dim).as_list()
)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append(serialization_lib.serialize_keras_object(cell))
config = {"cells": cells}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tf_keras.layers import deserialize as deserialize_layer
cells = []
for cell_config in config.pop("cells"):
cells.append(
deserialize_layer(cell_config, custom_objects=custom_objects)
)
return cls(cells, **config)
| tf-keras/tf_keras/layers/rnn/stacked_rnn_cells.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/stacked_rnn_cells.py",
"repo_id": "tf-keras",
"token_count": 3816
} | 209 |
"""Tests for migration_utils."""
import tensorflow as tf
from tf_keras.legacy_tf_layers import migration_utils
class DeterministicRandomTestToolTest(tf.test.TestCase):
def test_constant_mode_no_seed(self):
"""Test random tensor generation consistancy in constant mode.
Verify that the random tensor generated without using the seed is
consistant between graph and eager mode
"""
# Generate three random tensors to show how the stateful random number
# generation match between sessions and eager execution.
random_tool = migration_utils.DeterministicRandomTestTool()
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
a = tf.compat.v1.random.uniform(shape=(3, 1))
# adding additional computation/ops to the graph and ensuring
# consistant random number generation
a = a * 3
b = tf.compat.v1.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v1.random.uniform(shape=(3, 3))
c = c * 3
graph_a, graph_b, graph_c = sess.run([a, b, c])
a = tf.compat.v2.random.uniform(shape=(3, 1))
a = a * 3
b = tf.compat.v2.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v2.random.uniform(shape=(3, 3))
c = c * 3
# validate that the generated random tensors match
self.assertAllClose(graph_a, a)
self.assertAllClose(graph_b, b)
self.assertAllClose(graph_c, c)
# In constant mode, because b and c were generated with the same seed
# within the same scope and have the same shape, they will have exactly
# the same values.
# validate that b and c are the same, also graph_b and graph_c
self.assertAllClose(b, c)
self.assertAllClose(graph_b, graph_c)
def test_constant_mode_seed_argument(self):
"""Test random tensor generation consistancy in constant mode.
Verify that the random tensor generated by setting the global seeed
in the args is consistant between graph and eager mode.
"""
random_tool = migration_utils.DeterministicRandomTestTool()
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
# adding additional computation/ops to the graph and ensuring
# consistant random number generation
a = tf.compat.v1.random.uniform(shape=(3, 1), seed=1234)
a = a * 3
b = tf.compat.v1.random.uniform(shape=(3, 3), seed=1234)
b = b * 3
graph_a, graph_b = sess.run([a, b])
a = tf.compat.v2.random.uniform(shape=(3, 1), seed=1234)
a = a * 3
b = tf.compat.v2.random.uniform(shape=(3, 3), seed=1234)
b = b * 3
# validate that the generated random tensors match
self.assertAllClose(graph_a, a)
self.assertAllClose(graph_b, b)
def test_num_rand_ops(self):
"""Test random tensor generation consistancy in num_random_ops mode.
Verify that the random tensor generated without using the seed is
consistant between graph and eager mode.
Random tensor generated should be different based on random ops ordering
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops"
)
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
# adding additional computation/ops to the graph and ensuring
# consistant random number generation
a = tf.compat.v1.random.uniform(shape=(3, 1))
a = a * 3
b = tf.compat.v1.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v1.random.uniform(shape=(3, 3))
c = c * 3
graph_a, graph_b, graph_c = sess.run([a, b, c])
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops"
)
with random_tool.scope():
a = tf.compat.v2.random.uniform(shape=(3, 1))
a = a * 3
b = tf.compat.v2.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v2.random.uniform(shape=(3, 3))
c = c * 3
# validate that the generated random tensors match
self.assertAllClose(graph_a, a)
self.assertAllClose(graph_b, b)
self.assertAllClose(graph_c, c)
# validate that the tensors differ based on ops ordering
self.assertNotAllClose(b, c)
self.assertNotAllClose(graph_b, graph_c)
def test_num_rand_ops_program_order(self):
"""Test random tensor generation consistancy in num_random_ops mode.
validate that in this mode random number generation is sensitive to
program order, so the generated random tesnors should not match.
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops"
)
with random_tool.scope():
a = tf.random.uniform(shape=(3, 1))
# adding additional computation/ops to the graph and ensuring
# consistant random number generation
a = a * 3
b = tf.random.uniform(shape=(3, 3))
b = b * 3
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops"
)
with random_tool.scope():
b_prime = tf.random.uniform(shape=(3, 3))
# adding additional computation/ops to the graph and ensuring
# consistant random number generation
b_prime = b_prime * 3
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
# validate that the tensors are different
self.assertNotAllClose(a, a_prime)
self.assertNotAllClose(b, b_prime)
def test_num_rand_ops_operation_seed(self):
"""Test random tensor generation consistancy in num_random_ops mode.
validate if random number generation match across two different program
orders.
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops"
)
with random_tool.scope():
# operation seed = 0
a = tf.random.uniform(shape=(3, 1))
a = a * 3
# operation seed = 1
b = tf.random.uniform(shape=(3, 3))
b = b * 3
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops"
)
with random_tool.scope():
random_tool.operation_seed = 1
b_prime = tf.random.uniform(shape=(3, 3))
b_prime = b_prime * 3
random_tool.operation_seed = 0
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
self.assertAllClose(a, a_prime)
self.assertAllClose(b, b_prime)
def test_num_rand_ops_disallow_repeated_ops_seed(self):
"""Test random tensor generation consistancy in num_random_ops mode.
validate if DeterministicRandomTestTool disallows reusing already-used
operation seeds.
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops"
)
with random_tool.scope():
random_tool.operation_seed = 1
b_prime = tf.random.uniform(shape=(3, 3))
b_prime = b_prime * 3
random_tool.operation_seed = 0
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
error_string = "An exception should have been raised before this"
try:
tf.random.uniform(shape=(3, 1))
raise RuntimeError(error_string)
except ValueError as err:
err_raised = err
self.assertNotEqual(err_raised, error_string)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/legacy_tf_layers/migration_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/migration_utils_test.py",
"repo_id": "tf-keras",
"token_count": 3839
} | 210 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for confusion metrics."""
import json
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tensorflow.python.platform import tf_logging
from tf_keras import backend
from tf_keras import layers
from tf_keras import metrics
from tf_keras import models
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import metrics_utils
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class FalsePositivesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name="my_fp", thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, "my_fp")
self.assertLen(fp_obj.variables, 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, "my_fp")
self.assertLen(fp_obj2.variables, 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7.0, result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14.0, self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7.0, 4.0, 2.0], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = (
(1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0),
(5.0, 15.0, 10.0, 0),
)
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125.0, 42.0, 12.0], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.FalsePositives(thresholds=[None])
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class FalseNegativesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name="my_fn", thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, "my_fn")
self.assertLen(fn_obj.variables, 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, "my_fn")
self.assertLen(fn_obj2.variables, 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3.0, result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5.0, self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1.0, 4.0, 6.0], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4.0, 16.0, 23.0], self.evaluate(result))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class TrueNegativesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name="my_tn", thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, "my_tn")
self.assertLen(tn_obj.variables, 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, "my_tn")
self.assertLen(tn_obj2.variables, 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3.0, result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4.0, self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2.0, 5.0, 7.0], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5.0, 15.0, 23.0], self.evaluate(result))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class TruePositivesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name="my_tp", thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, "my_tp")
self.assertLen(tp_obj.variables, 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, "my_tp")
self.assertLen(tp_obj2.variables, 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7.0, result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12.0, self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6.0, 3.0, 1.0], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_pred = tf.constant(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = tf.constant(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
result = tp_obj(y_true, y_pred, sample_weight=37.0)
self.assertAllClose([222.0, 111.0, 37.0], self.evaluate(result))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class PrecisionTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name="my_precision", thresholds=[0.4, 0.9], top_k=15, class_id=12
)
self.assertEqual(p_obj.name, "my_precision")
self.assertLen(p_obj.variables, 2)
self.assertEqual(
[v.name for v in p_obj.variables],
["true_positives:0", "false_positives:0"],
)
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, "my_precision")
self.assertLen(p_obj2.variables, 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = tf.random.uniform(shape=(10, 3))
y_true = tf.random.uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(
initial_precision, self.evaluate(p_obj.result()), 1e-3
)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = tf.constant([1, 0, 1, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs)
y_true = tf.constant(1 - inputs)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = tf.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]),
)
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = tf.constant([0, 0, 0, 0])
y_true = tf.constant([0, 0, 0, 0])
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = tf.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.0], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.0])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]], shape=(2, 2), dtype=tf.float32)
weights = tf.constant([[4, 0], [3, 1]], shape=(2, 2), dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.0
weighted_positives = (0 + 3.0) + (4.0 + 0.0)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear(
[expected_precision, 0], self.evaluate(result), 1e-3
)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.0])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]], shape=(2, 2), dtype=tf.float32)
weights = tf.constant([[4, 0], [3, 1]], shape=(2, 2), dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.0) + (0 + 3.0)
weighted_positives = ((0 + 3.0) + (4.0 + 0.0)) + (
(0 + 3.0) + (4.0 + 0.0)
)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear(
[expected_precision, 0], self.evaluate(p_obj.result()), 1e-3
)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = tf.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1.0 / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = tf.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = tf.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1, y_pred1, sample_weight=tf.constant([[1, 4, 2, 3, 5]])
)
)
y_pred2 = tf.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = tf.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=tf.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = tf.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_pred = tf.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = tf.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=0.7, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_pred = tf.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class RecallTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name="my_recall", thresholds=[0.4, 0.9], top_k=15, class_id=12
)
self.assertEqual(r_obj.name, "my_recall")
self.assertLen(r_obj.variables, 2)
self.assertEqual(
[v.name for v in r_obj.variables],
["true_positives:0", "false_negatives:0"],
)
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, "my_recall")
self.assertLen(r_obj2.variables, 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = tf.random.uniform(shape=(10, 3))
y_true = tf.random.uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(
initial_recall, self.evaluate(r_obj.result()), 1e-3
)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = tf.constant([1, 0, 1, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs)
y_true = tf.constant(1 - inputs)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]),
)
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = tf.constant([0, 0, 0, 0])
y_true = tf.constant([0, 0, 0, 0])
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = tf.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.0], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.0])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]], shape=(2, 2), dtype=tf.float32)
weights = tf.constant([[1, 4], [3, 2]], shape=(2, 2), dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.0
weighted_positives = (0 + 3.0) + (4.0 + 0.0)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.0])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]], shape=(2, 2), dtype=tf.float32)
weights = tf.constant([[1, 4], [3, 2]], shape=(2, 2), dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.0) + (0 + 3.0)
weighted_positives = ((0 + 3.0) + (4.0 + 0.0)) + (
(0 + 3.0) + (4.0 + 0.0)
)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear(
[expected_recall, 0], self.evaluate(r_obj.result()), 1e-3
)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = tf.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = tf.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = tf.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1, y_pred1, sample_weight=tf.constant([[1, 4, 2, 3, 5]])
)
)
y_pred2 = tf.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = tf.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=tf.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = tf.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_pred = tf.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = tf.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=0.7, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_pred = tf.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SensitivityAtSpecificityTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4,
num_thresholds=100,
class_id=12,
name="sensitivity_at_specificity_1",
)
self.assertEqual(s_obj.name, "sensitivity_at_specificity_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(
s_obj.get_config()
)
self.assertEqual(s_obj2.name, "sensitivity_at_specificity_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = tf.random.uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
y_true = tf.random.uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(
initial_sensitivity, self.evaluate(s_obj.result()), 1e-3
)
def test_unweighted_all_correct(self):
with self.test_session():
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegex(
ValueError, r"`specificity` must be in the range \[0, 1\]."
):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SpecificityAtSensitivityTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4,
num_thresholds=100,
class_id=12,
name="specificity_at_sensitivity_1",
)
self.assertEqual(s_obj.name, "specificity_at_sensitivity_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(
s_obj.get_config()
)
self.assertEqual(s_obj2.name, "specificity_at_sensitivity_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = tf.random.uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
y_true = tf.random.uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(
initial_specificity, self.evaluate(s_obj.result()), 1e-3
)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(1.0)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.2, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`sensitivity` must be in the range \[0, 1\]."
):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class PrecisionAtRecallTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.PrecisionAtRecall(
0.4, num_thresholds=100, class_id=12, name="precision_at_recall_1"
)
self.assertEqual(s_obj.name, "precision_at_recall_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.recall, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.PrecisionAtRecall.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, "precision_at_recall_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.recall, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.PrecisionAtRecall(0.7)
y_pred = tf.random.uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
y_true = tf.random.uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(
initial_precision, self.evaluate(s_obj.result()), 1e-3
)
def test_unweighted_all_correct(self):
s_obj = metrics.PrecisionAtRecall(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_recall(self):
s_obj = metrics.PrecisionAtRecall(0.8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# For 0.5 < decision threshold < 0.6.
self.assertAlmostEqual(2.0 / 3, self.evaluate(result))
def test_unweighted_low_recall(self):
s_obj = metrics.PrecisionAtRecall(0.6)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.PrecisionAtRecall(0.6, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.PrecisionAtRecall(7.0 / 8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [2, 1, 2, 1, 2, 1, 2, 2, 1, 2]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
# For 0.0 < decision threshold < 0.2.
self.assertAlmostEqual(0.7, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`recall` must be in the range \[0, 1\]."
):
metrics.PrecisionAtRecall(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.PrecisionAtRecall(0.4, num_thresholds=-1)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class RecallAtPrecisionTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.RecallAtPrecision(
0.4, num_thresholds=100, class_id=12, name="recall_at_precision_1"
)
self.assertEqual(s_obj.name, "recall_at_precision_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.precision, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.RecallAtPrecision.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, "recall_at_precision_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.precision, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.RecallAtPrecision(0.7)
y_pred = tf.random.uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
y_true = tf.random.uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(
initial_recall, self.evaluate(s_obj.result()), 1e-3
)
def test_unweighted_all_correct(self):
s_obj = metrics.RecallAtPrecision(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_precision(self):
s_obj = metrics.RecallAtPrecision(0.75)
pred_values = [
0.05,
0.1,
0.2,
0.3,
0.3,
0.35,
0.4,
0.45,
0.5,
0.6,
0.9,
0.95,
]
label_values = [0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2,
# 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6,
# 1/6].
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The precision 0.75 can be reached at thresholds 0.4<=t<0.45.
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_low_precision(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3)
pred_values = [
0.05,
0.1,
0.2,
0.3,
0.3,
0.35,
0.4,
0.45,
0.5,
0.6,
0.9,
0.95,
]
label_values = [0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2,
# 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6,
# 1/6].
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The precision 5/7 can be reached at thresholds 00.3<=t<0.35.
self.assertAlmostEqual(5.0 / 6, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3, class_id=2)
pred_values = [
0.05,
0.1,
0.2,
0.3,
0.3,
0.35,
0.4,
0.45,
0.5,
0.6,
0.9,
0.95,
]
label_values = [0, 2, 0, 0, 0, 2, 2, 0, 2, 2, 0, 2]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2,
# 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6,
# 1/6].
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The precision 5/7 can be reached at thresholds 00.3<=t<0.35.
self.assertAlmostEqual(5.0 / 6, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.RecallAtPrecision(0.75)
pred_values = [0.1, 0.2, 0.3, 0.5, 0.6, 0.9, 0.9]
label_values = [0, 1, 0, 0, 0, 1, 1]
weight_values = [1, 2, 1, 2, 1, 2, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.6, self.evaluate(result))
def test_unachievable_precision(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3)
pred_values = [0.1, 0.2, 0.3, 0.9]
label_values = [1, 1, 0, 0]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The highest possible precision is 1/2 which is below the required
# value, expect 0 recall.
self.assertAlmostEqual(0, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`precision` must be in the range \[0, 1\]."
):
metrics.RecallAtPrecision(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.RecallAtPrecision(0.4, num_thresholds=-1)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class AUCTest(tf.test.TestCase, parameterized.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = tf.constant([0, 0.5, 0.3, 0.9], dtype=tf.float32)
self.y_pred_multi_label = tf.constant(
[[0.0, 0.4], [0.5, 0.7], [0.3, 0.2], [0.9, 0.3]], dtype=tf.float32
)
epsilon = 1e-12
self.y_pred_logits = -tf.math.log(1.0 / (self.y_pred + epsilon) - 1.0)
self.y_true = tf.constant([0, 0, 1, 1])
self.y_true_multi_label = tf.constant([[0, 0], [1, 1], [1, 1], [1, 0]])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=100,
curve="PR",
summation_method="majoring",
name="auc_1",
dtype=tf.float64,
multi_label=True,
num_labels=2,
from_logits=True,
)
auc_obj.update_state(self.y_true_multi_label, self.y_pred_multi_label)
self.assertEqual(auc_obj.name, "auc_1")
self.assertEqual(auc_obj._dtype, tf.float64)
self.assertLen(auc_obj.variables, 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
self.assertTrue(auc_obj.multi_label)
self.assertEqual(auc_obj.num_labels, 2)
self.assertTrue(auc_obj._from_logits)
old_config = auc_obj.get_config()
self.assertNotIn("thresholds", old_config)
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
auc_obj2.update_state(self.y_true_multi_label, self.y_pred_multi_label)
self.assertEqual(auc_obj2.name, "auc_1")
self.assertLen(auc_obj2.variables, 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj2.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
self.assertTrue(auc_obj2.multi_label)
self.assertEqual(auc_obj2.num_labels, 2)
self.assertTrue(auc_obj2._from_logits)
new_config = auc_obj2.get_config()
self.assertNotIn("thresholds", new_config)
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=None,
curve="PR",
summation_method="majoring",
name="auc_1",
thresholds=[0.3, 0.5],
)
auc_obj.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj.name, "auc_1")
self.assertLen(auc_obj.variables, 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
auc_obj2.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj2.name, "auc_1")
self.assertLen(auc_obj2.variables, 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj2.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(
initial_auc, self.evaluate(auc_obj.result()), 1e-3
)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.75 * 1 + 0.25 * 0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_unweighted_from_logits(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, from_logits=True
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred_logits)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.75 * 1 + 0.25 * 0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
self.setup()
# Verify that when specified, thresholds are used instead of
# num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.75 * 1 + 0.25 * 0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.7855 * 1 + 0.2855 * 0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method="majoring"
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 1 * 1 + 0.571 * 0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method="minoring"
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.571 * 1 + 0 * 0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve="PR",
summation_method="majoring",
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = 1 * 0.429 + 1 * 0.571
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve="PR",
summation_method="minoring",
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = 0.7 * 0.429 + 0 * 0.571
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve="PR")
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = 2.416 / 7 + 4 / 7
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 1"
):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 1."
):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegex(
ValueError, 'Invalid AUC curve value: "Invalid".'
):
metrics.AUC(curve="Invalid")
def test_invalid_summation_method(self):
with self.assertRaisesRegex(
ValueError, 'Invalid AUC summation method value: "Invalid".'
):
metrics.AUC(summation_method="Invalid")
def test_extra_dims(self):
try:
from scipy import special
self.setup()
logits = special.expit(
-np.array(
[
[[-10.0, 10.0, -10.0], [10.0, -10.0, 10.0]],
[[-12.0, 12.0, -12.0], [12.0, -12.0, 12.0]],
],
dtype=np.float32,
)
)
labels = np.array(
[[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]], dtype=np.int64
)
auc_obj = metrics.AUC()
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(labels, logits)
self.assertEqual(self.evaluate(result), 0.5)
except ImportError as e:
tf_logging.warning(f"Cannot test special functions: {str(e)}")
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MultiAUCTest(tf.test.TestCase, parameterized.TestCase):
def setup(self):
self.num_thresholds = 5
self.y_pred = tf.constant(
np.array([[0, 0.5, 0.3, 0.9], [0.1, 0.2, 0.3, 0.4]]).T,
dtype=tf.float32,
)
epsilon = 1e-12
self.y_pred_logits = -tf.math.log(1.0 / (self.y_pred + epsilon) - 1.0)
self.y_true_good = tf.constant(np.array([[0, 0, 1, 1], [0, 0, 1, 1]]).T)
self.y_true_bad = tf.constant(np.array([[0, 0, 1, 1], [1, 1, 0, 0]]).T)
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.25, 0.5, 0.75, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [[1, 1, 1, 1], [1, 1, 1, 1]]
# y_pred when threshold = 0.25 : [[0, 1, 1, 1], [0, 0, 1, 1]]
# y_pred when threshold = 0.5 : [[0, 0, 0, 1], [0, 0, 0, 0]]
# y_pred when threshold = 0.75 : [[0, 0, 0, 1], [0, 0, 0, 0]]
# y_pred when threshold = 1 + 1e-7 : [[0, 0, 0, 0], [0, 0, 0, 0]]
# for y_true_good, over thresholds:
# tp = [[2, 2, 1, 1, 0], [2, 2, 0, 0, 0]]
# fp = [[2, 1, 0, 0 , 0], [2, 0, 0 ,0, 0]]
# fn = [[0, 0, 1, 1, 2], [0, 0, 2, 2, 2]]
# tn = [[0, 1, 2, 2, 2], [0, 2, 2, 2, 2]]
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
# for y_true_bad:
# tp = [[2, 2, 1, 1, 0], [2, 0, 0, 0, 0]]
# fp = [[2, 1, 0, 0 , 0], [2, 2, 0 ,0, 0]]
# fn = [[0, 0, 1, 1, 2], [0, 2, 2, 2, 2]]
# tn = [[0, 1, 2, 2, 2], [0, 0, 2, 2, 2]]
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 0, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 1, 0, 0, 0]]
# for y_true_good with sample_weights:
# tp = [[7, 7, 4, 4, 0], [7, 7, 0, 0, 0]]
# fp = [[3, 2, 0, 0, 0], [3, 0, 0, 0, 0]]
# fn = [[0, 0, 3, 3, 7], [0, 0, 7, 7, 7]]
# tn = [[0, 1, 3, 3, 3], [0, 3, 3, 3, 3]]
# tpr = [[1, 1, 0.57, 0.57, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.67, 0, 0, 0], [1, 0, 0, 0, 0]]
def test_value_is_idempotent(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(num_thresholds=5, multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true_good, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(
initial_auc, self.evaluate(auc_obj.result()), 1e-3
)
def test_unweighted_all_correct(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_true_good)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted_all_correct_flat(self):
self.setup()
auc_obj = metrics.AUC(multi_label=False)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_true_good)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=True
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 + 1.0) / 2.0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_unweighted_from_logits(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=True,
from_logits=True,
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred_logits)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 + 1.0) / 2.0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_sample_weight_flat(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=False
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true_good, self.y_pred, sample_weight=[1, 2, 3, 4]
)
# tpr = [1, 1, 0.2857, 0.2857, 0]
# fpr = [1, 0.3333, 0, 0, 0]
expected_result = 1.0 - (0.3333 * (1.0 - 0.2857) / 2.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_full_sample_weight_flat(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=False
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
sw = np.arange(4 * 2)
sw = sw.reshape(4, 2)
result = auc_obj(self.y_true_good, self.y_pred, sample_weight=sw)
# tpr = [1, 1, 0.2727, 0.2727, 0]
# fpr = [1, 0.3333, 0, 0, 0]
expected_result = 1.0 - (0.3333 * (1.0 - 0.2727) / 2.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_label_weights(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=True,
label_weights=[0.75, 0.25],
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 * 0.75 + 1.0 * 0.25) / (0.75 + 0.25)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_label_weights_flat(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=False,
label_weights=[0.75, 0.25],
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [1, 1, 0.375, 0.375, 0]
# fpr = [1, 0.375, 0, 0, 0]
expected_result = 1.0 - ((1.0 - 0.375) * 0.375 / 2.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-2)
def test_unweighted_flat(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=False
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tp = [4, 4, 1, 1, 0]
# fp = [4, 1, 0, 0, 0]
# fn = [0, 0, 3, 3, 4]
# tn = [0, 3, 4, 4, 4]
# tpr = [1, 1, 0.25, 0.25, 0]
# fpr = [1, 0.25, 0, 0, 0]
expected_result = 1.0 - (3.0 / 32.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_unweighted_flat_from_logits(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=False,
from_logits=True,
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred_logits)
# tp = [4, 4, 1, 1, 0]
# fp = [4, 1, 0, 0, 0]
# fn = [0, 0, 3, 3, 4]
# tn = [0, 3, 4, 4, 4]
# tpr = [1, 1, 0.25, 0.25, 0]
# fpr = [1, 0.25, 0, 0, 0]
expected_result = 1.0 - (3.0 / 32.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
with self.test_session():
self.setup()
# Verify that when specified, thresholds are used instead of
# num_thresholds.
auc_obj = metrics.AUC(
num_thresholds=2, thresholds=[0.5], multi_label=True
)
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tp = [[2, 1, 0], [2, 0, 0]]
# fp = [2, 0, 0], [2, 0, 0]]
# fn = [[0, 1, 2], [0, 2, 2]]
# tn = [[0, 2, 2], [0, 2, 2]]
# tpr = [[1, 0.5, 0], [1, 0, 0]]
# fpr = [[1, 0, 0], [1, 0, 0]]
# auc by slice = [0.75, 0.5]
expected_result = (0.75 + 0.5) / 2.0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=True
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true_good, self.y_pred, sample_weight=self.sample_weight
)
# tpr = [[1, 1, 0.57, 0.57, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.67, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = 1.0 - 0.5 * 0.43 * 0.67
self.assertAllClose(self.evaluate(result), expected_result, 1e-1)
def test_pr_interpolation_unweighted(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, curve="PR", multi_label=True
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
good_result = auc_obj(self.y_true_good, self.y_pred)
with self.subTest(name="good"):
# PR AUCs are 0.917 and 1.0 respectively
self.assertAllClose(
self.evaluate(good_result), (0.91667 + 1.0) / 2.0, 1e-1
)
bad_result = auc_obj(self.y_true_bad, self.y_pred)
with self.subTest(name="bad"):
# PR AUCs are 0.917 and 0.5 respectively
self.assertAllClose(
self.evaluate(bad_result), (0.91667 + 0.5) / 2.0, 1e-1
)
def test_pr_interpolation(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, curve="PR", multi_label=True
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
good_result = auc_obj(
self.y_true_good, self.y_pred, sample_weight=self.sample_weight
)
# PR AUCs are 0.939 and 1.0 respectively
self.assertAllClose(
self.evaluate(good_result), (0.939 + 1.0) / 2.0, 1e-1
)
def test_keras_model_compiles(self):
inputs = layers.Input(shape=(10,))
output = layers.Dense(3, activation="sigmoid")(inputs)
model = models.Model(inputs=inputs, outputs=output)
model.compile(
loss="binary_crossentropy", metrics=[metrics.AUC(multi_label=True)]
)
def test_reset_state(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=True
)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
auc_obj(self.y_true_good, self.y_pred)
auc_obj.reset_state()
self.assertAllEqual(auc_obj.true_positives, np.zeros((5, 2)))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
class ThresholdsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
metrics.TruePositives(),
metrics.TrueNegatives(),
metrics.FalsePositives(),
metrics.FalseNegatives(),
metrics.Precision(),
metrics.Recall(),
metrics.SensitivityAtSpecificity(0.5),
metrics.SpecificityAtSensitivity(0.5),
metrics.PrecisionAtRecall(0.5),
metrics.RecallAtPrecision(0.5),
metrics.AUC(),
]
)
def test_with_default_thresholds(self, metric_obj):
# By default, the thresholds will be evenly distributed if there are
# more than 1. In case there is only 1 thresholds, then we expect
# _thresholds_distributed_evenly to be false.
expected = len(metric_obj.thresholds) > 1
self.assertEqual(metric_obj._thresholds_distributed_evenly, expected)
@parameterized.parameters(
[
metrics.TruePositives,
metrics.TrueNegatives,
metrics.FalsePositives,
metrics.FalseNegatives,
metrics.Precision,
metrics.Recall,
]
)
def test_with_manual_thresholds(self, metric_cls):
even_thresholds = [0.0, 0.25, 0.5, 0.75, 1.0]
metric_obj = metric_cls(thresholds=even_thresholds)
self.assertTrue(metric_obj._thresholds_distributed_evenly)
uneven_thresholds = [0.0, 0.45, 1.0]
metric_obj = metric_cls(thresholds=uneven_thresholds)
self.assertFalse(metric_obj._thresholds_distributed_evenly)
def test_manual_thresholds_auc(self):
# The AUC metric handles manual thresholds input differently (it will
# add 0.0 and 1.0 for user).
even_thresholds = [0.25, 0.5, 0.75]
auc = metrics.AUC(thresholds=even_thresholds)
self.assertTrue(auc._thresholds_distributed_evenly)
# Test for save model
cloned = metrics.AUC.from_config(auc.get_config())
self.assertTrue(cloned._thresholds_distributed_evenly)
uneven_thresholds = [
0.45,
]
auc = metrics.AUC(thresholds=uneven_thresholds)
self.assertFalse(auc._thresholds_distributed_evenly)
cloned = metrics.AUC.from_config(auc.get_config())
self.assertFalse(cloned._thresholds_distributed_evenly)
@parameterized.parameters(
[
metrics.TruePositives,
metrics.TrueNegatives,
metrics.FalsePositives,
metrics.FalseNegatives,
metrics.Precision,
metrics.Recall,
metrics.AUC,
]
)
def test_even_thresholds_correctness(self, metric_cls):
with tf.compat.forward_compatibility_horizon(2021, 6, 9):
# make sure the old approach and new approach produce same result
# for evenly distributed thresholds
y_true = np.random.randint(2, size=(10,))
y_pred = np.random.rand(10)
even_thresholds = [0.0, 0.25, 0.5, 0.75, 1.0]
if metric_cls == metrics.AUC:
even_thresholds = even_thresholds[1:-1]
metric_obj = metric_cls(thresholds=even_thresholds)
metric_obj.update_state(y_true, y_pred)
result1 = metric_obj.result()
metric_obj2 = metric_cls(thresholds=even_thresholds)
# Force to use the old approach
metric_obj2._thresholds_distributed_evenly = False
metric_obj2.update_state(y_true, y_pred)
result2 = metric_obj2.result()
self.assertAllClose(result1, result2)
# Check all the variables are the same, eg tp, tn, fp, fn
for v1, v2 in zip(metric_obj.variables, metric_obj2.variables):
self.assertAllClose(v1, v2)
@parameterized.parameters(
[
metrics.SensitivityAtSpecificity,
metrics.SpecificityAtSensitivity,
metrics.PrecisionAtRecall,
metrics.RecallAtPrecision,
]
)
def test_even_thresholds_correctness_2(self, metric_cls):
with tf.compat.forward_compatibility_horizon(2021, 6, 9):
y_true = np.random.randint(2, size=(10,))
y_pred = np.random.rand(10)
metric_obj = metric_cls(0.5)
metric_obj.update_state(y_true, y_pred)
result1 = metric_obj.result()
metric_obj2 = metric_cls(0.5)
# Force to use the old approach
metric_obj2._thresholds_distributed_evenly = False
metric_obj2.update_state(y_true, y_pred)
result2 = metric_obj2.result()
self.assertAllClose(result1, result2)
# Check all the variables are the same, eg tp, tn, fp, fn
for v1, v2 in zip(metric_obj.variables, metric_obj2.variables):
self.assertAllClose(v1, v2)
class BinaryTruePositives(metrics.Metric):
def __init__(self, name="binary_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="tp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, dtype=self.dtype)
sample_weight = tf.__internal__.ops.broadcast_weights(
sample_weight, values
)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
class BinaryTruePositivesViaControlFlow(metrics.Metric):
def __init__(self, name="binary_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="tp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if y_true[i][j] and y_pred[i][j]:
if sample_weight is None:
self.true_positives.assign_add(1)
else:
self.true_positives.assign_add(sample_weight[i][0])
def result(self):
if tf.constant(True):
return self.true_positives
return 0.0
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation="relu", kernel_initializer="ones"),
layers.Dense(1, activation="sigmoid", kernel_initializer="ones"),
]
model = test_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss="mae",
metrics=compile_metrics,
optimizer="rmsprop",
run_eagerly=test_utils.should_run_eagerly(),
)
return model
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
class ResetStatesTest(test_combinations.TestCase):
def test_reset_state_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.0)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.0)
def test_reset_state_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.0)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.0)
def test_reset_state_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.0)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.0)
def test_reset_state_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.0)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.0)
def test_reset_state_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.0)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.0)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.0)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.0)
def test_precision_update_state_with_logits(self):
p_obj = metrics.Precision()
# Update state with logits (not in range (0, 1)) should not an raise
# error.
p_obj.update_state([-0.5, 0.5], [-2.0, 2.0])
def test_reset_state_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.0)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.0)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.0)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.0)
def test_reset_state_sensitivity_at_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate(
(
np.ones((25, 4)),
np.zeros((25, 4)),
np.zeros((25, 4)),
np.ones((25, 4)),
)
)
y = np.concatenate(
(
np.ones((25, 1)),
np.zeros((25, 1)),
np.ones((25, 1)),
np.zeros((25, 1)),
)
)
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.0)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.0)
def test_reset_state_specificity_at_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate(
(
np.ones((25, 4)),
np.zeros((25, 4)),
np.zeros((25, 4)),
np.ones((25, 4)),
)
)
y = np.concatenate(
(
np.ones((25, 1)),
np.zeros((25, 1)),
np.ones((25, 1)),
np.zeros((25, 1)),
)
)
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.0)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.0)
def test_reset_state_precision_at_recall(self):
s_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate(
(
np.ones((25, 4)),
np.zeros((25, 4)),
np.zeros((25, 4)),
np.ones((25, 4)),
)
)
y = np.concatenate(
(
np.ones((25, 1)),
np.zeros((25, 1)),
np.ones((25, 1)),
np.zeros((25, 1)),
)
)
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.0)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.0)
def test_reset_state_recall_at_precision(self):
s_obj = metrics.RecallAtPrecision(precision=0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate(
(
np.ones((25, 4)),
np.zeros((25, 4)),
np.zeros((25, 4)),
np.ones((25, 4)),
)
)
y = np.concatenate(
(
np.ones((25, 1)),
np.zeros((25, 1)),
np.ones((25, 1)),
np.zeros((25, 1)),
)
)
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.0)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.0)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.0)
def test_reset_state_auc(self):
auc_obj = metrics.AUC(num_thresholds=3)
model = _get_model([auc_obj])
x = np.concatenate(
(
np.ones((25, 4)),
np.zeros((25, 4)),
np.zeros((25, 4)),
np.ones((25, 4)),
)
)
y = np.concatenate(
(
np.ones((25, 1)),
np.zeros((25, 1)),
np.ones((25, 1)),
np.zeros((25, 1)),
)
)
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.0)
def test_reset_state_auc_from_logits(self):
auc_obj = metrics.AUC(num_thresholds=3, from_logits=True)
model_layers = [
layers.Dense(1, kernel_initializer="ones", use_bias=False)
]
model = test_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss="mae",
metrics=[auc_obj],
optimizer="rmsprop",
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.concatenate(
(
np.ones((25, 4)),
-np.ones((25, 4)),
-np.ones((25, 4)),
np.ones((25, 4)),
)
)
y = np.concatenate(
(
np.ones((25, 1)),
np.zeros((25, 1)),
np.ones((25, 1)),
np.zeros((25, 1)),
)
)
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.0)
def test_reset_state_auc_manual_thresholds(self):
auc_obj = metrics.AUC(thresholds=[0.5])
model = _get_model([auc_obj])
x = np.concatenate(
(
np.ones((25, 4)),
np.zeros((25, 4)),
np.zeros((25, 4)),
np.ones((25, 4)),
)
)
y = np.concatenate(
(
np.ones((25, 1)),
np.zeros((25, 1)),
np.ones((25, 1)),
np.zeros((25, 1)),
)
)
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.0)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.0)
def test_reset_state_mean_iou(self):
m_obj = metrics.MeanIoU(num_classes=2)
model = _get_model([m_obj])
x = np.asarray(
[[0, 0, 0, 0], [1, 1, 1, 1], [1, 0, 1, 0], [0, 1, 0, 1]],
dtype=np.float32,
)
y = np.asarray([[0], [1], [1], [1]], dtype=np.float32)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
def test_reset_state_recall_float64(self):
# Test case for GitHub issue 36790.
try:
backend.set_floatx("float64")
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.0)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.0)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.0)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.0)
finally:
backend.set_floatx("float32")
def test_function_wrapped_reset_state(self):
m = metrics.Mean(name="my_mean")
# check reset_state in function.
@tf.function
def reset_in_fn():
m.reset_state()
m.update_state(100)
for _ in range(5):
reset_in_fn()
if not tf.executing_eagerly():
self.evaluate(
tf.compat.v1.get_default_graph().get_operations()[-1]
)
self.assertEqual(self.evaluate(m.count), 1)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MergeStateTest(test_combinations.TestCase):
def test_merge_state_incompatible_metrics(self):
with self.assertRaisesRegex(
ValueError, "Metric .* is not compatible with .*"
):
obj1 = metrics.FalsePositives()
self.evaluate(tf.compat.v1.variables_initializer(obj1.variables))
obj2 = metrics.Accuracy()
self.evaluate(tf.compat.v1.variables_initializer(obj2.variables))
self.evaluate(obj1.merge_state([obj2]))
def test_merge_state_accuracy(self):
a_objs = []
for y_true, y_pred in zip(
[[[1], [2]], [[3], [4]]], [[[0], [2]], [[3], [4]]]
):
a_obj = metrics.Accuracy()
a_objs.append(a_obj)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
self.evaluate(a_obj.update_state(y_true, y_pred))
self.evaluate(a_objs[0].merge_state(a_objs[1:]))
self.assertEqual(self.evaluate(a_objs[0].total), 3.0)
self.assertEqual(self.evaluate(a_objs[0].count), 4.0)
self.assertEqual(self.evaluate(a_objs[0].result()), 0.75)
def test_merge_state_false_positives(self):
fp_objs = []
for _ in range(4):
fp_obj = metrics.FalsePositives()
fp_objs.append(fp_obj)
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_true = np.zeros((25, 1))
y_pred = np.ones((25, 1))
self.evaluate(fp_obj.update_state(y_true, y_pred))
self.evaluate(fp_objs[0].merge_state(fp_objs[1:]))
self.assertEqual(self.evaluate(fp_objs[0].accumulator), 100.0)
def test_merge_state_false_negatives(self):
fn_objs = []
for _ in range(4):
fn_obj = metrics.FalseNegatives()
fn_objs.append(fn_obj)
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_true = np.ones((25, 1))
y_pred = np.zeros((25, 1))
self.evaluate(fn_obj.update_state(y_true, y_pred))
self.evaluate(fn_objs[0].merge_state(fn_objs[1:]))
self.assertEqual(self.evaluate(fn_objs[0].accumulator), 100.0)
def test_merge_state_true_negatives(self):
tn_objs = []
for _ in range(4):
tn_obj = metrics.TrueNegatives()
tn_objs.append(tn_obj)
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_true = np.zeros((25, 1))
y_pred = np.zeros((25, 1))
self.evaluate(tn_obj.update_state(y_true, y_pred))
self.evaluate(tn_objs[0].merge_state(tn_objs[1:]))
self.assertEqual(self.evaluate(tn_objs[0].accumulator), 100.0)
def test_merge_state_true_positives(self):
tp_objs = []
for _ in range(4):
tp_obj = metrics.TruePositives()
tp_objs.append(tp_obj)
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_true = np.ones((25, 1))
y_pred = np.ones((25, 1))
self.evaluate(tp_obj.update_state(y_true, y_pred))
self.evaluate(tp_objs[0].merge_state(tp_objs[1:]))
self.assertEqual(self.evaluate(tp_objs[0].accumulator), 100.0)
def test_merge_state_precision(self):
p_objs = []
for _ in range(5):
p_obj = metrics.Precision()
p_objs.append(p_obj)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_true = np.concatenate((np.ones((10, 1)), np.zeros((10, 1))))
y_pred = np.concatenate((np.ones((10, 1)), np.ones((10, 1))))
self.evaluate(p_obj.update_state(y_true, y_pred))
self.evaluate(p_objs[0].merge_state(p_objs[1:]))
self.assertEqual(self.evaluate(p_objs[0].true_positives), 50.0)
self.assertEqual(self.evaluate(p_objs[0].false_positives), 50.0)
def test_merge_state_recall(self):
r_objs = []
for _ in range(5):
r_obj = metrics.Recall()
r_objs.append(r_obj)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_true = np.concatenate((np.ones((10, 1)), np.ones((10, 1))))
y_pred = np.concatenate((np.ones((10, 1)), np.zeros((10, 1))))
self.evaluate(r_obj.update_state(y_true, y_pred))
self.evaluate(r_objs[0].merge_state(r_objs[1:]))
self.assertEqual(self.evaluate(r_objs[0].true_positives), 50.0)
self.assertEqual(self.evaluate(r_objs[0].false_negatives), 50.0)
def test_merge_state_sensitivity_at_specificity(self):
sas_objs = []
for _ in range(5):
sas_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
sas_objs.append(sas_obj)
self.evaluate(tf.compat.v1.variables_initializer(sas_obj.variables))
y_true = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
np.zeros((5, 1)),
)
)
y_pred = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
)
)
self.evaluate(sas_obj.update_state(y_true, y_pred))
self.evaluate(sas_objs[0].merge_state(sas_objs[1:]))
self.assertEqual(self.evaluate(sas_objs[0].true_positives), 25.0)
self.assertEqual(self.evaluate(sas_objs[0].false_positives), 25.0)
self.assertEqual(self.evaluate(sas_objs[0].false_negatives), 25.0)
self.assertEqual(self.evaluate(sas_objs[0].true_negatives), 25.0)
def test_merge_state_specificity_at_sensitivity(self):
sas_objs = []
for _ in range(5):
sas_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
sas_objs.append(sas_obj)
self.evaluate(tf.compat.v1.variables_initializer(sas_obj.variables))
y_true = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
np.zeros((5, 1)),
)
)
y_pred = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
)
)
self.evaluate(sas_obj.update_state(y_true, y_pred))
self.evaluate(sas_objs[0].merge_state(sas_objs[1:]))
self.assertEqual(self.evaluate(sas_objs[0].true_positives), 25.0)
self.assertEqual(self.evaluate(sas_objs[0].false_positives), 25.0)
self.assertEqual(self.evaluate(sas_objs[0].false_negatives), 25.0)
self.assertEqual(self.evaluate(sas_objs[0].true_negatives), 25.0)
def test_merge_state_precision_at_recall(self):
par_objs = []
for _ in range(5):
par_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1)
par_objs.append(par_obj)
self.evaluate(tf.compat.v1.variables_initializer(par_obj.variables))
y_true = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
np.zeros((5, 1)),
)
)
y_pred = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
)
)
self.evaluate(par_obj.update_state(y_true, y_pred))
self.evaluate(par_objs[0].merge_state(par_objs[1:]))
self.assertEqual(self.evaluate(par_objs[0].true_positives), 25.0)
self.assertEqual(self.evaluate(par_objs[0].false_positives), 25.0)
self.assertEqual(self.evaluate(par_objs[0].false_negatives), 25.0)
self.assertEqual(self.evaluate(par_objs[0].true_negatives), 25.0)
def test_merge_state_recall_at_precision(self):
rap_objs = []
for _ in range(5):
rap_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1)
rap_objs.append(rap_obj)
self.evaluate(tf.compat.v1.variables_initializer(rap_obj.variables))
y_true = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
np.zeros((5, 1)),
)
)
y_pred = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
)
)
self.evaluate(rap_obj.update_state(y_true, y_pred))
self.evaluate(rap_objs[0].merge_state(rap_objs[1:]))
self.assertEqual(self.evaluate(rap_objs[0].true_positives), 25.0)
self.assertEqual(self.evaluate(rap_objs[0].false_positives), 25.0)
self.assertEqual(self.evaluate(rap_objs[0].false_negatives), 25.0)
self.assertEqual(self.evaluate(rap_objs[0].true_negatives), 25.0)
def test_merge_state_auc(self):
auc_objs = []
for _ in range(5):
auc_obj = metrics.AUC(num_thresholds=3)
auc_objs.append(auc_obj)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
y_true = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
np.zeros((5, 1)),
)
)
y_pred = np.concatenate(
(
np.ones((5, 1)),
np.zeros((5, 1)),
np.zeros((5, 1)),
np.ones((5, 1)),
)
)
self.evaluate(auc_obj.update_state(y_true, y_pred))
self.evaluate(auc_objs[0].merge_state(auc_objs[1:]))
self.assertEqual(self.evaluate(auc_objs[0].true_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_objs[0].false_positives[1]), 25.0)
self.assertEqual(self.evaluate(auc_objs[0].false_negatives[1]), 25.0)
self.assertEqual(self.evaluate(auc_objs[0].true_negatives[1]), 25.0)
def test_merge_state_mean_iou(self):
m_objs = []
for y_true, y_pred in zip(
[[0], [1], [1], [1]], [[0.5], [1.0], [1.0], [1.0]]
):
m_obj = metrics.MeanIoU(num_classes=2)
m_objs.append(m_obj)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
self.evaluate(m_obj.update_state(y_true, y_pred))
self.evaluate(m_objs[0].merge_state(m_objs[1:]))
self.assertArrayNear(self.evaluate(m_objs[0].total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_objs[0].total_cm)[1], [0, 3], 1e-1)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/metrics/confusion_metrics_test.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/confusion_metrics_test.py",
"repo_id": "tf-keras",
"token_count": 58907
} | 211 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adadelta optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers import optimizer
from tf_keras.saving.object_registration import register_keras_serializable
# isort: off
from tensorflow.python.util.tf_export import keras_export
@register_keras_serializable()
@keras_export(
"keras.optimizers.Adadelta",
"keras.optimizers.experimental.Adadelta",
"keras.dtensor.experimental.optimizers.Adadelta",
v1=[],
)
class Adadelta(optimizer.Optimizer):
r"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based
on adaptive learning rate per dimension to address two drawbacks:
- The continual decay of learning rates throughout training.
- The need for a manually selected global learning rate.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, the initial
learning rate can be set, as in most other TF-Keras optimizers.
Args:
learning_rate: Initial value for the learning rate: either a floating
point value, or a
`tf.keras.optimizers.schedules.LearningRateSchedule` instance.
Defaults to 0.001. Note that `Adadelta` tends to benefit from
higher initial learning rate values compared to other optimizers. To
match the exact form in the original paper, use 1.0.
rho: A `Tensor` or a floating point value. The decay rate. Defaults to
0.95.
epsilon: Small floating point value used to maintain numerical
stability. Defaults to 1e-7.
{{base_optimizer_keyword_args}}
Reference:
- [Zeiler, 2012](http://arxiv.org/abs/1212.5701)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=True,
name="Adadelta",
**kwargs
):
super().__init__(
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
name=name,
**kwargs
)
self._learning_rate = self._build_learning_rate(learning_rate)
self.rho = rho
self.epsilon = epsilon
def build(self, var_list):
super().build(var_list)
if hasattr(self, "_built") and self._built:
return
self._built = True
self._accumulated_grads = []
self._accumulated_delta_vars = []
for var in var_list:
self._accumulated_grads.append(
self.add_variable_from_reference(var, "accumulated_grad")
)
self._accumulated_delta_vars.append(
self.add_variable_from_reference(var, "accumulated_delta_var")
)
def update_step(self, grad, variable):
"""Update step given gradient and the associated model variable."""
lr = tf.cast(self.learning_rate, variable.dtype)
var_key = self._var_key(variable)
rho = self.rho
accumulated_grad = self._accumulated_grads[self._index_dict[var_key]]
accumulated_delta_var = self._accumulated_delta_vars[
self._index_dict[var_key]
]
def rms(x):
return tf.sqrt(x + self.epsilon)
if isinstance(grad, tf.IndexedSlices):
# Sparse gradients.
accumulated_grad.assign_add((rho - 1) * accumulated_grad)
accumulated_grad.scatter_add(
tf.IndexedSlices(
(1 - rho) * tf.square(grad.values), grad.indices
)
)
delta_var = (
-rms(accumulated_delta_var) * grad / rms(accumulated_grad)
)
accumulated_delta_var.assign(
rho * accumulated_delta_var + (1 - rho) * delta_var * delta_var
)
else:
# Dense gradients.
accumulated_grad.assign(
rho * accumulated_grad + (1 - rho) * grad * grad
)
delta_var = (
-rms(accumulated_delta_var) * grad / rms(accumulated_grad)
)
accumulated_delta_var.assign(
rho * accumulated_delta_var + (1 - rho) * delta_var * delta_var
)
variable.assign_add(lr * delta_var)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
self._learning_rate
),
"rho": self.rho,
"epsilon": self.epsilon,
}
)
return config
Adadelta.__doc__ = Adadelta.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| tf-keras/tf_keras/optimizers/adadelta.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/adadelta.py",
"repo_id": "tf-keras",
"token_count": 2707
} | 212 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adamax."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.optimizers.legacy import adamax
from tf_keras.testing_infra import test_combinations
def adamax_update_numpy(
param, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8
):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1 ** (t + 1))) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t
def adamax_sparse_update_numpy(
param,
indices,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
):
m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param)
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t))
param_t_slice = param[indices] - (
(alpha / (1 - beta1 ** (t + 1))) * (m_t_slice / (v_t_slice + epsilon))
)
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
param_t[indices] = param_t_slice
return param_t, m_t, v_t
def get_beta_accumulators(opt, dtype):
local_step = tf.cast(opt.iterations + 1, dtype)
beta_1_t = tf.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
return beta_1_power
class AdamaxOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def testResourceSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype)
m0, v0, m1, v1 = (
zero_slots(),
zero_slots(),
zero_slots(),
zero_slots(),
)
var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np),
tf.constant(grads0_np_indices),
tf.constant([3]),
)
grads1_np_indices = np.array([2, 1], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np),
tf.constant(grads1_np_indices),
tf.constant([3]),
)
opt = adamax.Adamax()
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0, 3.0], var0)
self.assertAllClose([4.0, 5.0, 6.0], var1)
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), beta1_power
)
update.run()
var0_np, m0, v0 = adamax_sparse_update_numpy(
var0_np, grads0_np_indices, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adamax_sparse_update_numpy(
var1_np, grads1_np_indices, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [tf.int32, tf.int64]:
with tf.Graph().as_default(), self.cached_session(
force_gpu=tf.test.is_gpu_available()
):
# If a GPU is available, tests that all optimizer ops can be
# placed on it (i.e. they have GPU kernels).
var = tf.Variable([[1.0], [2.0]])
indices = tf.constant([0, 1], dtype=index_dtype)
g_sum = lambda: tf.reduce_sum(tf.gather(var, indices))
optimizer = adamax.Adamax(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
repeated_index_update_var = tf.Variable(
[[1.0], [2.0]], dtype=dtype
)
aggregated_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)
grad_repeated_index = tf.IndexedSlices(
tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
tf.constant([1, 1]),
tf.constant([2, 1]),
)
grad_aggregated = tf.IndexedSlices(
tf.constant([0.2], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]),
)
repeated_update = adamax.Adamax().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
aggregated_update = adamax.Adamax().apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(
aggregated_update_var, repeated_index_update_var.eval()
)
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(
aggregated_update_var, repeated_index_update_var.eval()
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testBasic(self):
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with self.session(graph=tf.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0 = np.array([0.0, 0.0])
v0 = np.array([0.0, 0.0])
m1 = np.array([0.0, 0.0])
v1 = np.array([0.0, 0.0])
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adamax.Adamax()
if not tf.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
if not tf.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adamax_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adamax_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-2
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-2
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testBasicWithLearningRateDecay(self):
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with self.session(graph=tf.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.001
decay = 0.002
opt = adamax.Adamax(learning_rate=learning_rate, decay=decay)
if not tf.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
if not tf.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adamax_update_numpy(
var0_np, grads0_np, t, m0, v0, alpha=lr
)
var1_np, m1, v1 = adamax_update_numpy(
var1_np, grads1_np, t, m1, v1, alpha=lr
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-2
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-2
)
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adamax.Adamax(tf.constant(0.001))
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0)
self.assertAllClose([3.0, 4.0], var1)
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), beta1_power
)
update.run()
var0_np, m0, v0 = adamax_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adamax_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adamax.Adamax()
update1 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
update2 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
beta1_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0)
self.assertAllClose([3.0, 4.0], var1)
# Run 3 steps of intertwined Adamax1 and Adamax2.
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), beta1_power
)
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adamax_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adamax_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testSlotsUniqueEager(self):
v1 = tf.Variable(1.0)
v2 = tf.Variable(1.0)
opt = adamax.Adamax(1.0)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and
# v2.
self.assertLen({id(v) for v in opt.variables()}, 5)
def testConstructAdamaxWithLR(self):
opt = adamax.Adamax(lr=1.0)
opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0)
opt_3 = adamax.Adamax(learning_rate=0.1)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt_2.lr, tf.Variable)
self.assertIsInstance(opt_3.lr, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/legacy/adamax_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/adamax_test.py",
"repo_id": "tf-keras",
"token_count": 10260
} | 213 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class of optimizer."""
import abc
import platform
import re
import tensorflow.compat.v2 as tf
from absl import logging
from tf_keras import backend
from tf_keras import initializers
from tf_keras.dtensor import utils as dtensor_utils
from tf_keras.optimizers import utils as optimizer_utils
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
class _BaseOptimizer(tf.__internal__.tracking.AutoTrackable):
"""Optimizer base class, which only supports non-distribute use case."""
def __init__(
self,
name,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=True,
**kwargs,
):
self.name = name
self.weight_decay = weight_decay
self.clipnorm = clipnorm
self.global_clipnorm = global_clipnorm
self.clipvalue = clipvalue
self.use_ema = use_ema
# Optimizer only benefits from XLA when training on GPU. So if no
# GPU is found, we turn off XLA.
if (
jit_compile
and tf_utils.can_jit_compile()
and tf.config.list_physical_devices("GPU")
):
self.jit_compile = True
else:
self.jit_compile = False
if platform.system() == "Darwin" and platform.processor() == "arm":
logging.warning(
"At this time, the v2.11+ optimizer "
f"`tf.keras.optimizers.{self.__class__.__name__}` runs slowly "
"on M1/M2 Macs, please use the legacy TF-Keras optimizer "
"instead, located at "
f"`tf.keras.optimizers.legacy.{self.__class__.__name__}`."
)
if use_ema:
# Verify the arguments related to EMA.
if ema_momentum > 1 or ema_momentum < 0:
raise ValueError(
"`ema_momentum` must be in the range [0, 1]. "
f"Received: ema_momentum={ema_momentum}"
)
if ema_overwrite_frequency and (
not isinstance(ema_overwrite_frequency, int)
or ema_overwrite_frequency < 1
):
raise ValueError(
"`ema_overwrite_frequency` must be an integer > 1 or None. "
"Received: ema_overwrite_frequency="
f"{ema_overwrite_frequency}"
)
self.ema_momentum = ema_momentum
self.ema_overwrite_frequency = ema_overwrite_frequency
if self.clipnorm is not None and self.global_clipnorm is not None:
raise ValueError(
"At most one of `clipnorm` and `global_clipnorm` can "
f"be set. Received: clipnorm={self.clipnorm}, "
f"global_clipnorm={self.global_clipnorm}."
)
self._variables = []
# A dict mapping a model ShardedVariable id to an object that builds a
# ShardedVariable from the corresponding optimizer variables. See
# `add_variable_from_reference`.
self._sharded_variable_builders = self._no_dependency({})
self._create_iteration_variable()
self._process_kwargs(kwargs)
def _create_iteration_variable(self):
"""Create the iterations counter variable."""
with tf.init_scope():
# Lift the variable creation to init scope to avoid environment
# issue.
self._iterations = tf.Variable(
0, name="iteration", dtype=tf.int64, trainable=False
)
self._variables.append(self._iterations)
def _process_kwargs(self, kwargs):
# Remove the `is_legacy_optimizer` arg, which is for serialization only.
kwargs.pop("is_legacy_optimizer", None)
lr = kwargs.pop("lr", None)
if lr:
logging.warning(
"`lr` is deprecated in TF-Keras optimizer, please use "
"`learning_rate` or use the legacy optimizer, e.g.,"
f"tf.keras.optimizers.legacy.{self.__class__.__name__}."
)
legacy_kwargs = {
"decay",
"gradient_aggregator",
"gradient_transformers",
}
for k in kwargs:
if k in legacy_kwargs:
raise ValueError(
f"{k} is deprecated in the new TF-Keras optimizer, please "
"check the docstring for valid arguments, or use the "
"legacy optimizer, e.g., "
f"tf.keras.optimizers.legacy.{self.__class__.__name__}."
)
else:
raise TypeError(
f"{k} is not a valid argument, kwargs should be empty "
" for `optimizer_experimental.Optimizer`."
)
def _create_or_restore_slot_variable(self, **kwargs):
raise ValueError(
"You are trying to restore a checkpoint from a legacy TF-Keras "
"optimizer into a v2.11+ Optimizer, which can cause "
"errors. Please update the optimizer referenced in your code "
"to be an instance of "
"`tf.keras.optimizers.legacy.Optimizer`, e.g.: "
f"`tf.keras.optimizers.legacy.{self.__class__.__name__}`."
)
def _var_key(self, variable):
"""Get a unique identifier of the given variable."""
# Get the distributed variable if it exists.
# TODO(b/199214315): replace _unique_id with ref() after fixing ref()
# issues on AggregatingVariable.
return variable._unique_id
def _deduplicate_sparse_grad(self, grads):
"""Deduplicate sparse gradient.
For sparse gradients, i.e., gradient is of type `tf.IndexedSlices`,
it is possible that `gradient.indices` has duplicated indices.
This function adds up values for the duplicated indices, and returns
a `tf.IndexedSlices` with indices of unique values.
"""
processed_grads = []
for grad in grads:
if isinstance(grad, tf.IndexedSlices):
values = grad.values
indices = grad.indices
unique_indices, new_index_positions = tf.unique(indices)
summed_values = tf.math.unsorted_segment_sum(
values, new_index_positions, tf.shape(unique_indices)[0]
)
processed_grads.append(
tf.IndexedSlices(
summed_values, unique_indices, grad.dense_shape
)
)
else:
processed_grads.append(grad)
return processed_grads
@abc.abstractmethod
def update_step(self, gradient, variable):
"""Function to update variable value based on given gradients.
This method must be implemented in customized optimizers.
Args:
gradient: backpropagated gradient of the given variable.
variable: variable whose value needs to be updated.
Returns:
An `Operation` that applies the specified gradients.
"""
raise NotImplementedError
@tf.function(jit_compile=True)
def _update_step_xla(self, gradient, variable, key):
"""A wrapper of `update_step` to enable XLA acceleration.
Due to `tf.function` tracing mechanism, for (gradient, variable) pairs
of the same shape and dtype, the execution graph always invoke the first
pair it has seen. Thus, we need a `key` argument to make each (gradient,
variable) pair unique. In additions, XLA cannot understand string input,
so the key is an integer.
Args:
gradient: backpropagated gradient of the given variable.
variable: variable whose value needs to be updated.
key (int): a unique key that identifies the variable.
Returns:
An `Operation` that applies the specified gradients.
"""
return self._update_step(gradient, variable)
def _update_step(self, gradient, variable):
if getattr(variable, "_unique_id", None) is None:
# Variable has no `_unique_id` if called during `model.save()`, in
# which case we do not want to update the variable.
return
if self._var_key(variable) not in self._index_dict:
raise KeyError(
f"The optimizer cannot recognize variable {variable.name}. "
"This usually means you are trying to call the optimizer to "
"update different parts of the model separately. Please call "
"`optimizer.build(variables)` with the full list of trainable "
"variables before the training loop or use legacy optimizer "
f"`tf.keras.optimizers.legacy.{self.__class__.__name__}."
)
self.update_step(gradient, variable)
def compute_gradients(self, loss, var_list, tape=None):
"""Compute gradients of loss on trainable variables.
Args:
loss: `Tensor` or callable. If a callable, `loss` should take no
arguments and return the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable`
objects. Use callable when the variable list would otherwise be
incomplete before `minimize` since the variables are created at the
first time `loss` is called.
tape: (Optional) `tf.GradientTape`. If `loss` is provided as a
`Tensor`, the tape that computed the `loss` must be provided.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
"""
if not callable(loss) and tape is None:
raise ValueError(
"`tape` is required when a `Tensor` loss is passed. "
f"Received: loss={loss}, tape={tape}."
)
if tape is None:
tape = tf.GradientTape()
if callable(loss):
with tape:
if not callable(var_list):
tape.watch(var_list)
loss = loss()
if callable(var_list):
var_list = var_list()
grads = tape.gradient(loss, var_list)
return list(zip(grads, var_list))
def _clip_gradients(self, grads):
clipped_grads = []
if self.clipnorm and self.clipnorm > 0:
for g in grads:
if g is None:
clipped_grads.append(g)
else:
clipped_grads.append(tf.clip_by_norm(g, self.clipnorm))
return clipped_grads
if self.global_clipnorm and self.global_clipnorm > 0:
return tf.clip_by_global_norm(grads, self.global_clipnorm)[0]
if self.clipvalue and self.clipvalue > 0:
for g in grads:
if g is None:
clipped_grads.append(g)
else:
clipped_grads.append(
tf.clip_by_value(
g,
clip_value_min=-self.clipvalue,
clip_value_max=self.clipvalue,
)
)
return clipped_grads
return grads
@property
def iterations(self):
"""The number of training steps this `optimizer` has run.
By default, iterations would be incremented by one every time
`apply_gradients()` is called.
"""
return self._iterations
@iterations.setter
def iterations(self, variable):
if getattr(self, "_built", False):
raise RuntimeError(
"Cannot set `iterations` to a new Variable after "
"the Optimizer weights have been created. Here it is "
f"attempting to set `iterations` to {variable}."
"Usually this means you are trying to set `iterations`"
" after calling `apply_gradients()`. Please set "
"`iterations` before calling `apply_gradients()`."
)
self._iterations = variable
@property
def learning_rate(self):
if not hasattr(self, "_learning_rate") or self._learning_rate is None:
raise ValueError(
"Missing learning rate, please set self.learning_rate at"
" optimizer creation time."
)
lr = self._learning_rate
if isinstance(lr, learning_rate_schedule.LearningRateSchedule):
# If the optimizer takes in LearningRateSchedule, then each call to
# learning_rate would return `self._current_learning_rate`, which is
# updated at each call to `apply_gradients`.
return self._current_learning_rate
return lr
@learning_rate.setter
def learning_rate(self, learning_rate):
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
else:
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
raise TypeError(
"This optimizer was created with a `LearningRateSchedule`"
" object as its `learning_rate` constructor argument, "
"hence its learning rate is not settable. If you need the"
" learning rate to be settable, you should instantiate "
"the optimizer with a float `learning_rate` argument."
)
self._learning_rate.assign(learning_rate)
@property
@doc_controls.do_not_generate_docs
def lr(self):
"""Alias of `learning_rate()`.
`lr()` is heavily called in workflows using `optimizer_v2.OptimizerV2`,
so we keep it for backward compabitliy.
"""
return self.learning_rate
@lr.setter
def lr(self, learning_rate):
self.learning_rate = learning_rate
def _build_learning_rate(self, learning_rate):
with tf.init_scope():
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
# Create a variable to hold the current learning rate.
current_learning_rate = tf.convert_to_tensor(
learning_rate(self.iterations)
)
self._current_learning_rate = tf.Variable(
current_learning_rate,
name="current_learning_rate",
dtype=current_learning_rate.dtype,
trainable=False,
)
return learning_rate
return tf.Variable(
learning_rate,
name="learning_rate",
dtype=backend.floatx(),
trainable=False,
)
@abc.abstractmethod
def build(self, var_list):
"""Initialize the optimizer's variables, such as momemtum variables.
This function has to be implemented by subclass optimizers, and subclass
optimizers need to call `super().build(var_list)`.
Args:
var_list: List of model variables to build optimizers on. For example,
SGD optimizer with momentum will store one momentum variable
corresponding to each model variable.
"""
if getattr(self, "_built", False):
return
self._build_index_dict(var_list)
if self.use_ema:
self._model_variables_moving_average = []
for var in var_list:
# Make a copy of the model variables, we will use the copy to
# store the moving average of model variables.
self._model_variables_moving_average.append(
self.add_variable_from_reference(
var, "average", initial_value=var
)
)
def _build_index_dict(self, var_list):
"""Build variable to index dictionary.
Build a dictionary that maps variable to the index of it in the given
var_list.
Args:
var_list: List of variables to build index dict on.
Returns:
None
"""
self._index_dict = {}
for i, var in enumerate(var_list):
var_key = self._var_key(var)
self._index_dict[var_key] = i
def add_variable(self, shape, dtype=None, initializer="zeros", name=None):
"""Create an optimizer variable.
Args:
shape: A list of integers, a tuple of integers, or a 1-D Tensor of
type int32. Defaults to scalar if unspecified.
dtype: The DType of the optimizer variable to be created. Defaults to
`tf.keras.backend.floatx` if unspecified.
initializer: string or callable. Initializer instance.
name: The name of the optimizer variable to be created.
Returns:
An optimizer variable, in the format of tf.Variable.
"""
if isinstance(initializer, str):
initializer = initializers.get(initializer)
if dtype is None:
dtype = backend.floatx()
if shape is None:
shape = []
variable = tf.Variable(
initial_value=initializer(shape, dtype), name=name, trainable=False
)
self._variables.append(variable)
return variable
def add_variable_from_reference(
self, model_variable, variable_name, shape=None, initial_value=None
):
"""Create an optimizer variable from model variable.
Create an optimizer variable based on the information of model variable.
For example, in SGD optimizer momemtum, for each model variable, a
corresponding momemtum variable is created of the same shape and dtype.
Args:
model_variable: tf.Variable. The corresponding model variable to the
optimizer variable to be created.
variable_name: String. The name prefix of the optimizer variable to be
created. The create variables name will follow the pattern
`{variable_name}/{model_variable.name}`, e.g., `momemtum/dense_1`.
shape: List or Tuple, defaults to None. The shape of the optimizer
variable to be created. If None, the created variable will have the
same shape as `model_variable`.
initial_value: A Tensor, or Python object convertible to a Tensor,
defaults to None. The initial value of the optimizer variable, if
None, the initial value will be default to 0.
Returns:
An optimizer variable.
"""
if initial_value is None:
if shape is None:
if model_variable.shape.rank is None:
# When the rank is None, we cannot get a concrete
# `model_variable.shape`, we use dynamic shape.
initial_value = tf.zeros_like(
model_variable, dtype=model_variable.dtype
)
else:
# We cannot always use `zeros_like`, because some cases
# the shape exists while values don't.
initial_value = tf.zeros(
model_variable.shape, dtype=model_variable.dtype
)
else:
initial_value = tf.zeros(shape, dtype=model_variable.dtype)
variable = tf.Variable(
initial_value=initial_value,
name=f"{variable_name}/{model_variable._shared_name}",
dtype=model_variable.dtype,
trainable=False,
)
# If model_variable is a shard of a ShardedVariable, we should add a
# ShardedVariable for all related optimizer variables so that
# checkpointing is robust to different partitionings. Use unique_id to
# dedup ShardedVariables.
if hasattr(model_variable, "_sharded_container"):
sharded_variable = model_variable._sharded_container()
# Get or create builder object
sv_builder = self._sharded_variable_builders.setdefault(
(sharded_variable._unique_id, variable_name),
_ShardedVariableBuilder(len(sharded_variable.variables)),
)
sv_builder.add_shard(variable)
if sv_builder.has_all_shards():
self._variables.append(sv_builder.build())
else:
self._variables.append(variable)
return variable
def _trackable_children(self, save_type="checkpoint", **kwargs):
"""Override in order to coalesce and track `ShardedVariable`s.
If an optimizer variable's corresponding model variable is a shard of a
larger `ShardedVariable`, then we track the optimizer variable in
`self._variables` as a `ShardedVariable` via the logic in
`add_variable_from_reference`. However, most optimizer implementations
additionally keep their variables as attributes, which will be tracked
via `AutoTrackable` functionality and not accumulated into
`ShardedVariable`s.
So, to enable restoration of these attributes in possibly different
sharding configurations, we should save them as `ShardedVariable`s.
Here, any optimizer attributes that are variable shards of a larger
`ShardedVariable` are here replaced by the `ShardedVariable` itself,
which was created in `add_variable_from_reference`.
All non-sharded variables are kept as-is. If none of the model variables
are sharded, this reduces to `AutoTrackable._trackable_children()`.
"""
# Due to object-identity based matching logic in checkpointing, new
# python objects should not be created on each call to
# `_trackable_children`. So instead, only coalesce if not done before.
if not hasattr(self, "_coalesced_children"):
# This new attribute should not be tracked to avoid infinite
# recursion, so wrap in NoDependency
self._coalesced_children = self._no_dependency({})
children = super()._trackable_children(save_type, **kwargs)
for key, val in children.items():
if key not in [
"_variables",
"_index_dict",
"_learning_rate",
"_iterations",
]:
new_val = val
if isinstance(val, list):
# TODO(jmullenbach): handle arbitrary nesting
sv_vals = []
for var in val:
if hasattr(var, "_sharded_container"):
sv = var._sharded_container()
# Use unique id to check existence. `in` would
# attempt element-wise variable value
# comparison.
if not any(
sv._unique_id == other_sv._unique_id
for other_sv in sv_vals
):
sv_vals.append(sv)
else:
sv_vals.append(var)
new_val = tf.__internal__.tracking.wrap(sv_vals)
self._coalesced_children[key] = new_val
else:
self._coalesced_children[key] = val
return self._coalesced_children
def minimize(self, loss, var_list, tape=None):
"""Minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: `Tensor` or callable. If a callable, `loss` should take no
arguments and return the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`, or a callable returning the list or tuple of `Variable`
objects. Use callable when the variable list would otherwise be
incomplete before `minimize` since the variables are created at the
first time `loss` is called.
tape: (Optional) `tf.GradientTape`.
Returns:
None
"""
grads_and_vars = self.compute_gradients(loss, var_list, tape)
self.apply_gradients(grads_and_vars)
def _compute_current_learning_rate(self):
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
# Compute the current learning rate at the beginning of variable
# update.
if hasattr(self, "_current_learning_rate"):
self._current_learning_rate.assign(
self._learning_rate(self.iterations)
)
else:
current_learning_rate = tf.convert_to_tensor(
self._learning_rate(self.iterations)
)
self._current_learning_rate = tf.Variable(
current_learning_rate,
name="current_learning_rate",
dtype=current_learning_rate.dtype,
trainable=False,
)
def exclude_from_weight_decay(self, var_list=None, var_names=None):
"""Exclude variables from weight decay.
This method must be called before the optimizer's `build` method is
called. You can set specific variables to exclude out, or set a list of
strings as the anchor words, if any of which appear in a variable's
name, then the variable is excluded.
Args:
var_list: A list of `tf.Variable`s to exclude from weight decay.
var_names: A list of strings. If any string in `var_names` appear
in the model variable's name, then this model variable is
excluded from weight decay. For example, `var_names=['bias']`
excludes all bias variables from weight decay.
"""
if hasattr(self, "_built") and self._built:
raise ValueError(
"`exclude_from_weight_decay()` can only be configued before "
"the optimizer is built."
)
if var_list:
self._exclude_from_weight_decay = [
self._var_key(variable) for variable in var_list
]
else:
self._exclude_from_weight_decay = []
self._exclude_from_weight_decay_names = var_names or []
def _use_weight_decay(self, variable):
exclude_from_weight_decay = getattr(
self, "_exclude_from_weight_decay", []
)
exclude_from_weight_decay_names = getattr(
self, "_exclude_from_weight_decay_names", []
)
variable_id = self._var_key(variable)
for exclude_id in exclude_from_weight_decay:
if variable_id == exclude_id:
return False
for name in exclude_from_weight_decay_names:
if re.search(name, variable.name) is not None:
return False
return True
def apply_gradients(self, grads_and_vars, name=None):
"""Apply gradients to variables.
Args:
grads_and_vars: List of `(gradient, variable)` pairs.
name: string, defaults to None. The name of the namescope to
use when creating variables. If None, `self.name` will be used.
Returns:
A `tf.Variable`, representing the current iteration.
Raises:
TypeError: If `grads_and_vars` is malformed.
"""
self._compute_current_learning_rate()
grads_and_vars = list(grads_and_vars)
if len(grads_and_vars) == 0:
# It is possible that the grad is empty. In this case,
# `apply_gradients` is a no-op.
return self._iterations
grads, trainable_variables = zip(*grads_and_vars)
scope_name = name or self.name or "optimizer"
with tf.name_scope(scope_name):
with tf.init_scope():
# Lift variable creation to init scope to avoid environment
# issues.
self.build(trainable_variables)
grads_and_vars = optimizer_utils.filter_empty_gradients(
grads_and_vars
)
if len(list(grads_and_vars)) == 0:
# Check again after filtering gradients.
return self._iterations
grads, trainable_variables = zip(*grads_and_vars)
grads = self._clip_gradients(grads)
grads = self._deduplicate_sparse_grad(grads)
self._apply_weight_decay(trainable_variables)
grads_and_vars = list(zip(grads, trainable_variables))
iteration = self._internal_apply_gradients(grads_and_vars)
# Apply variable constraints after applying gradients.
for variable in trainable_variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
return iteration
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
for variable in variables:
if self._use_weight_decay(variable):
lr = tf.cast(self.learning_rate, variable.dtype)
wd = tf.cast(self.weight_decay, variable.dtype)
variable.assign_sub(variable * wd * lr)
def _internal_apply_gradients(self, grads_and_vars):
"""Helper function of apply gradients.
This is required for separating out distributed training logic.
Args:
grads_and_vars: List of (gradient, variable) pairs.
"""
if self.jit_compile:
for grad, var in grads_and_vars:
self._update_step_xla(grad, var, id(self._var_key(var)))
else:
for grad, var in grads_and_vars:
self._update_step(grad, var)
return self.iterations.assign_add(1)
def _update_model_variables_moving_average(self, var_list):
"""Update the stored moving average using the latest value."""
if self.use_ema:
for var in var_list:
average = self._model_variables_moving_average[
self._index_dict[self._var_key(var)]
]
average.assign(
self.ema_momentum * average + (1 - self.ema_momentum) * var
)
def _overwrite_model_variables_with_average_value(self, var_list):
"""Overwrite model variables with its moving average."""
for var in var_list:
average = self._model_variables_moving_average[
self._index_dict[self._var_key(var)]
]
var.assign(average)
def finalize_variable_values(self, var_list):
"""Set the final value of model's trainable variables.
Sometimes there are some extra steps before ending the variable updates,
such as overriding the model variables with its average value.
Args:
var_list: list of model variables.
"""
if self.use_ema:
# If the optimizer uses EMA, then when finalizing, we replace the
# model variable value with its moving average stored inside
# optimizer.
self._overwrite_model_variables_with_average_value(var_list)
def _serialize_hyperparameter(self, hyperparameter):
"""Serialize a hyperparameter that can be a numeric or callable."""
if isinstance(
hyperparameter, learning_rate_schedule.LearningRateSchedule
):
return learning_rate_schedule.serialize(hyperparameter)
if isinstance(hyperparameter, tf.Variable):
return hyperparameter.numpy()
if callable(hyperparameter):
return hyperparameter()
return hyperparameter
def get_config(self):
"""Returns the config of the optimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Subclass optimizer should override this method to include other
hyperparameters.
Returns:
Python dictionary.
"""
config = {
"name": self.name,
"weight_decay": self.weight_decay,
"clipnorm": self.clipnorm,
"global_clipnorm": self.global_clipnorm,
"clipvalue": self.clipvalue,
"use_ema": self.use_ema,
"ema_momentum": self.ema_momentum,
"ema_overwrite_frequency": self.ema_overwrite_frequency,
"jit_compile": self.jit_compile,
"is_legacy_optimizer": False,
}
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same optimizer from the config dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional
user-defined Python objects needed to recreate this optimizer.
Returns:
An optimizer instance.
"""
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"], custom_objects=custom_objects
)
return cls(**config)
@property
def variables(self):
"""Returns variables of this optimizer."""
return CallableList(self._variables)
def set_weights(self, weights):
"""Set the weights of the optimizer.
Args:
weights: a list of `tf.Variable`s or numpy arrays, the target values
of optimizer variables. It should have the same order as
`self._variables`.
"""
if not getattr(self, "_built", False):
raise ValueError(
"You are calling `set_weights()` on an optimizer that has not "
"yet been built. Please call "
"`optimizer.build(trainable_variables)` to create the "
"optimizer weights before calling `set_weights()`."
)
for variable, weight in zip(self._variables, weights):
if variable.shape != weight.shape:
raise ValueError(
f"Optimizer variable {self._var_key(variable)} has shape "
f"{str(variable.shape)} not compatible with provided "
f"weight shape {str(weight.shape)}."
)
variable.assign(weight)
def save_own_variables(self, store):
"""Get the state of this optimizer object."""
for i, variable in enumerate(self.variables):
store[str(i)] = variable.numpy()
def load_own_variables(self, store):
"""Set the state of this optimizer object."""
if len(store.keys()) != len(self.variables):
msg = (
f"Skipping variable loading for optimizer '{self.name}', "
f"because it has {len(self.variables)} variables whereas "
f"the saved optimizer has {len(store.keys())} variables. "
)
if len(self.variables) == 0:
msg += (
"This is likely because the optimizer has not been "
"called/built yet."
)
logging.warning(msg)
return
for i, variable in enumerate(self.variables):
variable.assign(store[str(i)])
base_optimizer_keyword_args = """name: String. The name to use
for momentum accumulator weights created by
the optimizer.
weight_decay: Float, defaults to None. If set, weight decay is applied.
clipnorm: Float. If set, the gradient of each weight is individually
clipped so that its norm is no higher than this value.
clipvalue: Float. If set, the gradient of each weight is clipped to be no
higher than this value.
global_clipnorm: Float. If set, the gradient of all weights is clipped so
that their global norm is no higher than this value.
use_ema: Boolean, defaults to False. If True, exponential moving average
(EMA) is applied. EMA consists of computing an exponential moving
average of the weights of the model (as the weight values change after
each training batch), and periodically overwriting the weights with
their moving average.
ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`.
This is the momentum to use when computing
the EMA of the model's weights:
`new_average = ema_momentum * old_average + (1 - ema_momentum) *
current_variable_value`.
ema_overwrite_frequency: Int or None, defaults to None. Only used if
`use_ema=True`. Every `ema_overwrite_frequency` steps of iterations,
we overwrite the model variable by its moving average.
If None, the optimizer
does not overwrite model variables in the middle of training, and you
need to explicitly overwrite the variables at the end of training
by calling `optimizer.finalize_variable_values()`
(which updates the model
variables in-place). When using the built-in `fit()` training loop,
this happens automatically after the last epoch,
and you don't need to do anything.
jit_compile: Boolean, defaults to True.
If True, the optimizer will use XLA
compilation. If no GPU device is found, this flag will be ignored.
mesh: optional `tf.experimental.dtensor.Mesh` instance. When provided,
the optimizer will be run in DTensor mode, e.g. state
tracking variable will be a DVariable, and aggregation/reduction will
happen in the global DTensor context.
**kwargs: keyword arguments only used for backward compatibility."""
@keras_export(
"keras.optimizers.Optimizer",
"keras.optimizers.experimental.Optimizer",
v1=[],
)
class Optimizer(_BaseOptimizer):
"""Abstract optimizer base class.
This class supports distributed training. If you want to implement your own
optimizer, please subclass this class instead of _BaseOptimizer.
Args:
{{base_optimizer_keyword_args}}
### Usage
```python
# Create an optimizer with the desired parameters.
opt = keras.optimizers.SGD(learning_rate=0.1)
var1, var2 = tf.Variable(1.0), tf.Variable(2.0)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# Call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Processing gradients before applying them
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.experimental.SGD(learning_rate=0.1)
var1, var2 = tf.Variable(1.0), tf.Variable(2.0)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = 3 * var1 * var1 + 2 * var2 * var2
grads = tape.gradient(loss, [var1, var2])
# Process the gradients.
grads[0] = grads[0] + 1
# Ask the optimizer to apply the gradients on variables.
opt.apply_gradients(zip(grads, [var1, var2]))
```
### Dynamic learning rate
Dynamic learning rate can be achieved by setting learning rate as a built-in
or customized `tf.keras.optimizers.schedules.LearningRateSchedule`.
Example:
>>> var = tf.Variable(np.random.random(size=(1,)))
>>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
... initial_learning_rate=.01, decay_steps=20, decay_rate=.1)
>>> opt = tf.keras.optimizers.experimental.SGD(learning_rate=learning_rate)
>>> loss = lambda: 3 * var
>>> opt.minimize(loss, var_list=[var])
### Gradients clipping
Users can clip the gradients before applying to variables by setting
`clipnorm`, `clipvalue` and `global_clipnorm`. Notice that `clipnorm` and
`global_clipnorm` can only have one being set.
Example:
>>> opt = tf.keras.optimizers.experimental.SGD(learning_rate=1, clipvalue=1)
>>> var1, var2 = tf.Variable(2.0), tf.Variable(2.0)
>>> with tf.GradientTape() as tape:
... loss = 2 * var1 + 2 * var2
>>> grads = tape.gradient(loss, [var1, var2])
>>> print([grads[0].numpy(), grads[1].numpy()])
[2.0, 2.0]
>>> opt.apply_gradients(zip(grads, [var1, var2]))
>>> # Without clipping, we should get [0, 0], but as gradients are clipped
>>> # to have max value 1, we get [1.0, 1.0].
>>> print([var1.numpy(), var2.numpy()])
[1.0, 1.0]
### Using weight decay.
Weight decay in certain scenarios can boost the model's performance. Keras
has built-in support for weight decay in all optimizers. Users can apply
weight decay by setting `weight_decay` argument.
>>> opt = tf.keras.optimizers.experimental.SGD(1, weight_decay=0.004)
>>> grads, var1, var2 = tf.zeros(()), tf.Variable(2.0), tf.Variable(2.0)
>>> # You can exclude variables from weight decay, in this case we
>>> # exclude `var2`.
>>> opt.exclude_from_weight_decay(var_list=[var2])
>>> opt.apply_gradients(zip([grads, grads], [var1, var2]))
>>> print([var1.numpy(), var2.numpy()])
[1.992, 2.0]
### Using exponential moving average.
Empirically it has been found that using the exponential moving average
(EMA) of the trained parameters of a deep network achieves a better
performance than using its trained parameters directly. TF-Keras optimizers
allows users to compute this moving average and overwrite the model
variables at desired time.
Example:
```python
# Create an SGD optimizer with EMA on. `ema_momentum` controls the decay
# rate of the moving average. `ema_momentum=1` means no decay and the stored
# moving average is always model variable's initial value before training.
# Reversely, `ema_momentum=0` is equivalent to not using EMA.
# `ema_overwrite_frequency=3` means every 3 iterations, we overwrite the
# trainable variables with their moving average values.
opt = tf.keras.optimizers.experimental.SGD(
learning_rate=1,
use_ema=True,
ema_momentum=0.5,
ema_overwrite_frequency=3)
var1, var2 = tf.Variable(2.0), tf.Variable(2.0)
with tf.GradientTape() as tape:
loss = var1 + var2
grads = tape.gradient(loss, [var1, var2])
# First iteration: [var1, var2] = [1.0, 1.0]
opt.apply_gradients(zip(grads, [var1, var2]))
print([var1, var2])
# Second iteration: [var1, var2] = [0.0, 0.0]
opt.apply_gradients(zip(grads, [var1, var2]))
print([var1, var2])
# Third iteration, without EMA, we should see [var1, var2] = [-1.0, -1.0],
# but overwriting results in [var1, var2] = [-0.125, -0.125]. The full
# calculation for the moving average of var1 is:
# var1=2*0.5**3+1*(1-0.5)*0.5**2+0*(1-0.5)*0.5**1+(-1)*(1-0.5)=-0.125.
opt.apply_gradients(zip(grads, [var1, var2]))
print([var1, var2])
```
When optimizer is constructed with `use_ema=True`, in custom training loop,
users can explicitly call `finalize_variable_values()` to overwrite
trainable variables with their EMA values. `finalize_variable_values()` is
by default called at the end of `model.fit()`.
### Use with `tf.distribute.Strategy`
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To aggregate gradients
yourself, call `apply_gradients` with `skip_gradients_aggregation` set to
True. This is useful if you need to process aggregated gradients.
```python
# This example is not runnable, it consists of dummy code for simple
# tutorial.
strategy = tf.distribute.experimental.TPUStrategy()
with strategy.scope():
opt = tf.keras.optimizers.experimental.SGD()
model = magic_function_that_returns_model()
gradients = magic_function_that_returns_gradients()
# Custom logic to aggregate gradients.
gradients = strategy.reduce("SUM", gradients, axis=None)
opt.apply_gradients(zip(gradients, model.trainable_variables),
skip_gradients_aggregation=True)
```
### Creating a custom optimizer
If you intend to create your own optimization algorithm, please inherit from
this class and override the following methods:
- `build`: Create your optimizer-related variables, such as `momentums` in
SGD optimizer.
- `update_step`: Implement your optimizer's updating logic.
- `get_config`: serialization of the optimizer, include all hyper
parameters.
Your optimizer would automatically be compatible with tensorflow distributed
training if you subclass `optimizer_experimental.Optimizer`.
"""
def __init__(
self,
name,
weight_decay=0,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=True,
**kwargs,
):
"""Create a new Optimizer."""
mesh = kwargs.pop("mesh", None)
self._mesh = mesh
super().__init__(
name,
weight_decay,
clipnorm,
clipvalue,
global_clipnorm,
use_ema,
ema_momentum,
ema_overwrite_frequency,
jit_compile,
**kwargs,
)
self._distribution_strategy = tf.distribute.get_strategy()
self._run_with_dtensor = dtensor_utils.running_with_dtensor_strategy()
def add_variable_from_reference(
self, model_variable, variable_name, shape=None, initial_value=None
):
if self._mesh:
if initial_value is None:
# Use tf.zeros_like which will propagate the layout information
# from the model weights if any.
initial_value = tf.zeros_like(model_variable)
elif isinstance(initial_value, tf.Tensor):
initial_value = tf.experimental.dtensor.copy_to_mesh(
initial_value,
tf.experimental.dtensor.Layout.replicated(
self._mesh, rank=initial_value.shape.rank
),
)
variable = tf.experimental.dtensor.DVariable(
initial_value=initial_value,
name=f"{variable_name}/{model_variable._shared_name}",
dtype=model_variable.dtype,
trainable=False,
)
self._variables.append(variable)
return variable
else:
strategy = tf.distribute.get_strategy()
with strategy.extended.colocate_vars_with(model_variable):
return super().add_variable_from_reference(
model_variable, variable_name, shape, initial_value
)
def _create_iteration_variable(self):
if self._mesh:
init_val = tf.constant(0, dtype=tf.int64)
init_val = tf.experimental.dtensor.copy_to_mesh(
init_val,
tf.experimental.dtensor.Layout.replicated(self._mesh, rank=0),
)
with tf.init_scope():
# Lift the variable creation to init scope to avoid environment
# issue.
self._iterations = tf.experimental.dtensor.DVariable(
init_val, name="iteration"
)
self._variables.append(self._iterations)
else:
super()._create_iteration_variable()
def _var_key(self, variable):
"""Get a unique identifier of the given variable."""
# Get the distributed variable if it exists.
# TODO(b/197554203): replace _distributed_container() with a public api.
if hasattr(variable, "_distributed_container"):
variable = variable._distributed_container()
elif (
tf_utils.is_extension_type(variable)
and hasattr(variable, "handle")
and hasattr(variable.handle, "_distributed_container")
):
# For ResourceVariables, the _distributed_container attribute
# is added to their handle tensors.
variable = variable.handle._distributed_container()
return super()._var_key(variable)
def aggregate_gradients(self, grads_and_vars):
"""Aggregate gradients on all devices.
By default, we will perform reduce_sum of gradients across devices.
Users can implement their own aggregation logic by overriding this
method.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
List of (gradient, variable) pairs.
"""
if self._mesh or self._run_with_dtensor:
logging.warning(
"Calling aggregate_gradients is unnecessary when the model "
"is used with DTensor, which includes aggregation of "
"replicated gradients as part of backward pass."
)
return grads_and_vars
else:
return optimizer_utils.all_reduce_sum_gradients(grads_and_vars)
def apply_gradients(
self,
grads_and_vars,
name=None,
skip_gradients_aggregation=False,
**kwargs,
):
"""Apply gradients to variables.
Args:
grads_and_vars: List of `(gradient, variable)` pairs.
name: string, defaults to None. The name of the namescope to
use when creating variables. If None, `self.name` will be used.
skip_gradients_aggregation: If true, gradients aggregation will not be
performed inside optimizer. Usually this arg is set to True when you
write custom code aggregating gradients outside the optimizer.
**kwargs: keyword arguments only used for backward compatibility.
Returns:
A `tf.Variable`, representing the current iteration.
Raises:
TypeError: If `grads_and_vars` is malformed.
RuntimeError: If called in a cross-replica context.
"""
if self._mesh or self._run_with_dtensor:
# Skip any usage of strategy logic for DTensor
return super().apply_gradients(grads_and_vars, name=name)
# `experimental_aggregate_gradients` is an arg in `apply_gradients` of
# v2 optimizer -- the reverse of `skip_gradients_aggregation`.
# We read it from kwargs for backward compatibility.
experimental_aggregate_gradients = kwargs.pop(
"experimental_aggregate_gradients", True
)
if not skip_gradients_aggregation and experimental_aggregate_gradients:
grads_and_vars = self.aggregate_gradients(grads_and_vars)
return super().apply_gradients(grads_and_vars, name=name)
def _apply_weight_decay(self, variables):
# Apply weight decay in distributed setup.
if self.weight_decay is None:
return
def distributed_apply_weight_decay(distribution, variables, **kwargs):
def weight_decay_fn(variable):
if self._use_weight_decay(variable):
lr = tf.cast(self.learning_rate, variable.dtype)
wd = tf.cast(self.weight_decay, variable.dtype)
variable.assign_sub(variable * wd * lr)
for variable in variables:
distribution.extended.update(
variable, weight_decay_fn, group=False
)
tf.__internal__.distribute.interim.maybe_merge_call(
distributed_apply_weight_decay,
self._distribution_strategy,
variables,
)
def _internal_apply_gradients(self, grads_and_vars):
if self._mesh or self._run_with_dtensor:
# Skip any usage of strategy logic for DTensor
return super()._internal_apply_gradients(grads_and_vars)
return tf.__internal__.distribute.interim.maybe_merge_call(
self._distributed_apply_gradients_fn,
self._distribution_strategy,
grads_and_vars,
)
def _overwrite_model_variables_with_average_value(self, var_list):
"""Overwrite model variables with their moving average values.
This function overwrites variables on each device.
Args:
var_list: list of model variables.
"""
if self._mesh or self._run_with_dtensor:
# Skip any usage of strategy logic for DTensor
super()._overwrite_model_variables_with_average_value(var_list)
strategy = self._distribution_strategy
# Override model variable by the stored average value on all devices.
for var in var_list:
average = self._model_variables_moving_average[
self._index_dict[self._var_key(var)]
]
strategy.extended.update(
var, lambda a, b: a.assign(b), args=(average,)
)
def _build_learning_rate(self, learning_rate):
if not self._mesh:
return super()._build_learning_rate(learning_rate)
# For DTensor
variable_creation = tf.experimental.dtensor.DVariable
init_value_convert_fn = lambda x: tf.experimental.dtensor.copy_to_mesh(
x, tf.experimental.dtensor.Layout.replicated(self._mesh, rank=0)
)
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
current_learning_rate = tf.convert_to_tensor(
learning_rate(self.iterations)
)
current_learning_rate = init_value_convert_fn(current_learning_rate)
# Create a variable to hold the current learning rate.
# Note that the init value `learning_rate(self.iterations)` should
# have the correct layout information from self.iterations.
self._current_learning_rate = variable_creation(
current_learning_rate,
name="learning_rate",
dtype=tf.float32,
)
return learning_rate
init_val = init_value_convert_fn(
tf.constant(learning_rate, dtype=tf.float32)
)
return variable_creation(
init_val,
name="learning_rate",
dtype=backend.floatx(),
trainable=False,
)
def _update_model_variables_moving_average(self, var_list):
"""Update the stored moving average using the latest value."""
if self.use_ema:
def update_average(average, var):
average.assign(
self.ema_momentum * average + (1 - self.ema_momentum) * var
)
for var in var_list:
average = self._model_variables_moving_average[
self._index_dict[self._var_key(var)]
]
self._distribution_strategy.extended.update(
average, update_average, args=(var,), group=False
)
def _distributed_apply_gradients_fn(
self, distribution, grads_and_vars, **kwargs
):
"""`apply_gradients` using a `DistributionStrategy`."""
def apply_grad_to_update_var(var, grad):
if self.jit_compile:
return self._update_step_xla(grad, var, id(self._var_key(var)))
else:
return self._update_step(grad, var)
for grad, var in grads_and_vars:
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False
)
if self.use_ema:
_, var_list = zip(*grads_and_vars)
self._update_model_variables_moving_average(var_list)
if self.ema_overwrite_frequency:
# Only when self.ema_overwrite_frequency is not None, we
# overwrite the model variables.
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
tf.cond(
tf.cast(should_overwrite_model_vars, tf.bool),
true_fn=lambda: self._overwrite_model_variables_with_average_value( # noqa: E501
var_list
),
false_fn=lambda: None,
)
return self.iterations.assign_add(1)
class RestoredOptimizer(Optimizer):
def __init__(self):
super().__init__("RestoredOptimizer")
def get_config(self):
raise NotImplementedError(
"Restoring functional Optimizers from SavedModels is not currently "
"supported. Please file a feature request if this limitation "
"bothers you."
)
class CallableList(list):
"""Temporary shim to support both `opt.variables()` and `opt.variables`."""
def __call__(self):
return self
class _ShardedVariableBuilder:
"""Accumulate variable shards into a `ShardedVariable`."""
def __init__(self, num_shards):
self.shards = [None] * num_shards
def add_shard(self, shard):
# Get shard index from name
shard_idx = int(shard.name.split("part_")[-1].split(":")[0])
if self.shards[shard_idx] is None:
self.shards[shard_idx] = shard
else:
raise ValueError(
"Cannot add duplicate optimizer variable from "
f"shard variable {shard.name}"
)
def has_all_shards(self):
return all([shard is not None for shard in self.shards])
def build(self):
return tf.__internal__.distribute.ShardedVariable(self.shards)
# Register the optimizer for loading from saved_model purpose.
# When `keras_2` is installed in same env, it raises assertion for duplicate
# registration with same name. Rename the symbol in this case.
try:
tf.__internal__.saved_model.load.register_revived_type(
"experimentalOptimizer",
lambda obj: isinstance(obj, Optimizer),
versions=[
tf.__internal__.saved_model.load.VersionedTypeRegistration(
object_factory=lambda proto: RestoredOptimizer(),
version=2,
min_producer_version=1,
min_consumer_version=1,
)
],
)
except AssertionError:
tf.__internal__.saved_model.load.register_revived_type(
"tf_keras_experimentalOptimizer",
lambda obj: isinstance(obj, Optimizer),
versions=[
tf.__internal__.saved_model.load.VersionedTypeRegistration(
object_factory=lambda proto: RestoredOptimizer(),
version=2,
min_producer_version=1,
min_consumer_version=1,
)
],
)
Optimizer.__doc__ = Optimizer.__doc__.replace(
"{{base_optimizer_keyword_args}}", base_optimizer_keyword_args
)
| tf-keras/tf_keras/optimizers/optimizer.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/optimizer.py",
"repo_id": "tf-keras",
"token_count": 27298
} | 214 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in WideNDeep model classes."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import backend
from tf_keras import layers as layer_module
from tf_keras.engine import base_layer
from tf_keras.engine import data_adapter
from tf_keras.engine import training as keras_training
from tf_keras.saving import serialization_lib
# isort: off
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.experimental.WideDeepModel",
v1=["keras.experimental.WideDeepModel", "keras.models.WideDeepModel"],
)
@deprecation.deprecated_endpoints("keras.experimental.WideDeepModel")
class WideDeepModel(keras_training.Model):
r"""Wide & Deep Model for regression and classification problems.
This model jointly train a linear and a dnn model.
Example:
```python
linear_model = LinearModel()
dnn_model = keras.Sequential([keras.layers.Dense(units=64),
keras.layers.Dense(units=1)])
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'],
loss='mse', metrics=['mse'])
# define dnn_inputs and linear_inputs as separate numpy arrays or
# a single numpy array if dnn_inputs is same as linear_inputs.
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
# or define a single `tf.data.Dataset` that contains a single tensor or
# separate tensors for dnn_inputs and linear_inputs.
dataset = tf.data.Dataset.from_tensors(([linear_inputs, dnn_inputs], y))
combined_model.fit(dataset, epochs)
```
Both linear and dnn model can be pre-compiled and trained separately
before jointly training:
Example:
```python
linear_model = LinearModel()
linear_model.compile('adagrad', 'mse')
linear_model.fit(linear_inputs, y, epochs)
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
dnn_model.compile('rmsprop', 'mse')
dnn_model.fit(dnn_inputs, y, epochs)
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'],
loss='mse', metrics=['mse'])
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
```
"""
def __init__(self, linear_model, dnn_model, activation=None, **kwargs):
"""Create a Wide & Deep Model.
Args:
linear_model: a premade LinearModel, its output must match the output
of the dnn model.
dnn_model: a `tf.keras.Model`, its output must match the output of the
linear model.
activation: Activation function. Set it to None to maintain a linear
activation.
**kwargs: The keyword arguments that are passed on to
BaseLayer.__init__. Allowed keyword arguments include `name`.
"""
super().__init__(**kwargs)
self.linear_model = linear_model
self.dnn_model = dnn_model
self.activation = activations.get(activation)
def call(self, inputs, training=None):
if not isinstance(inputs, (tuple, list)) or len(inputs) != 2:
linear_inputs = dnn_inputs = inputs
else:
linear_inputs, dnn_inputs = inputs
linear_output = self.linear_model(linear_inputs)
if self.dnn_model._expects_training_arg:
if training is None:
training = backend.learning_phase()
dnn_output = self.dnn_model(dnn_inputs, training=training)
else:
dnn_output = self.dnn_model(dnn_inputs)
output = tf.nest.map_structure(
lambda x, y: (x + y), linear_output, dnn_output
)
if self.activation:
return tf.nest.map_structure(self.activation, output)
return output
# This does not support gradient scaling and LossScaleOptimizer.
def train_step(self, data):
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses
)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
if isinstance(self.optimizer, (list, tuple)):
linear_vars = self.linear_model.trainable_variables
dnn_vars = self.dnn_model.trainable_variables
linear_grads, dnn_grads = tape.gradient(
loss, (linear_vars, dnn_vars)
)
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
else:
trainable_variables = self.trainable_variables
grads = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(grads, trainable_variables))
return {m.name: m.result() for m in self.metrics}
def _make_train_function(self):
# Only needed for graph mode and model_to_estimator.
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, "train_function", None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (
self._feed_inputs
+ self._feed_targets
+ self._feed_sample_weights
)
if not isinstance(backend.symbolic_learning_phase(), int):
inputs += [backend.symbolic_learning_phase()]
if isinstance(self.optimizer, (list, tuple)):
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
else:
linear_optimizer = self.optimizer
dnn_optimizer = self.optimizer
with backend.get_graph().as_default():
with backend.name_scope("training"):
# Training updates
updates = []
linear_updates = linear_optimizer.get_updates(
params=self.linear_model.trainable_weights,
loss=self.total_loss,
)
updates += linear_updates
dnn_updates = dnn_optimizer.get_updates(
params=self.dnn_model.trainable_weights,
loss=self.total_loss,
)
updates += dnn_updates
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result
for m in metrics
if hasattr(m, "_call_result")
]
with backend.name_scope("training"):
# Gets loss and metrics. Updates weights at each call.
fn = backend.function(
inputs,
[self.total_loss] + metrics_tensors,
updates=updates,
name="train_function",
**self._function_kwargs
)
setattr(self, "train_function", fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def get_config(self):
linear_config = serialization_lib.serialize_keras_object(
self.linear_model
)
dnn_config = serialization_lib.serialize_keras_object(self.dnn_model)
config = {
"linear_model": linear_config,
"dnn_model": dnn_config,
"activation": activations.serialize(self.activation),
}
base_config = base_layer.Layer.get_config(self)
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
linear_config = config.pop("linear_model")
linear_model = layer_module.deserialize(linear_config, custom_objects)
dnn_config = config.pop("dnn_model")
dnn_model = layer_module.deserialize(dnn_config, custom_objects)
activation = activations.deserialize(
config.pop("activation", None), custom_objects=custom_objects
)
return cls(
linear_model=linear_model,
dnn_model=dnn_model,
activation=activation,
**config
)
| tf-keras/tf_keras/premade_models/wide_deep.py/0 | {
"file_path": "tf-keras/tf_keras/premade_models/wide_deep.py",
"repo_id": "tf-keras",
"token_count": 4416
} | 215 |
# Description:
# Contains the TF-Keras save model API (internal TensorFlow version).
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
# TODO(scottzhu): Remove non-keras deps from TF.
default_visibility = [
"//tf_keras:friends",
"//third_party/tensorflow/python/distribute:__pkg__",
],
licenses = ["notice"],
)
py_library(
name = "saving",
srcs = [
"__init__.py",
"legacy/hdf5_format.py",
"legacy/model_config.py",
"legacy/save.py",
"legacy/saving_utils.py",
"pickle_utils.py",
"saving_api.py",
],
srcs_version = "PY3",
deps = [
":object_registration",
":serialization",
":serialization_lib",
"//:expect_h5py_installed",
"//:expect_tensorflow_installed",
"//:expect_yaml_installed",
"//tf_keras:backend",
"//tf_keras:losses",
"//tf_keras:regularizers",
"//tf_keras/engine:input_spec",
"//tf_keras/mixed_precision:autocast_variable",
"//tf_keras/optimizers",
"//tf_keras/protobuf:saved_metadata_proto_py_pb2",
"//tf_keras/saving/legacy/saved_model",
"//tf_keras/utils:engine_utils",
"//tf_keras/utils:metrics_utils",
"//tf_keras/utils:mode_keys",
],
)
py_library(
name = "saving_lib",
srcs = [
"saving_lib.py",
],
srcs_version = "PY3",
deps = [
":serialization_lib",
"//:expect_tensorflow_installed",
"//tf_keras/utils:generic_utils",
"//tf_keras/utils:io_utils",
],
)
tf_py_test(
name = "saving_lib_test",
size = "medium",
srcs = ["saving_lib_test.py"],
python_version = "PY3",
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/utils:generic_utils",
],
)
py_library(
name = "object_registration",
srcs = [
"object_registration.py",
],
srcs_version = "PY3",
)
py_library(
name = "serialization_lib",
srcs = [
"serialization_lib.py",
],
srcs_version = "PY3",
deps = [
":object_registration",
":serialization",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/saving/legacy/saved_model:utils",
],
)
py_library(
name = "serialization",
srcs = [
"legacy/serialization.py",
],
srcs_version = "PY3",
deps = [
":object_registration",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/utils:tf_contextlib",
"//tf_keras/utils:tf_inspect",
],
)
tf_py_test(
name = "object_registration_test",
size = "small",
srcs = ["object_registration_test.py"],
python_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
tf_py_test(
name = "metrics_serialization_test",
size = "medium",
srcs = ["legacy/metrics_serialization_test.py"],
python_version = "PY3",
shard_count = 8,
tags = [
"notsan", # TODO(b/170870790)
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "losses_serialization_test",
size = "medium",
srcs = ["legacy/losses_serialization_test.py"],
python_version = "PY3",
shard_count = 4,
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "pickle_utils_test",
size = "medium",
srcs = ["pickle_utils_test.py"],
python_version = "PY3",
shard_count = 4,
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "save_weights_test",
size = "medium",
srcs = ["legacy/save_weights_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"no_oss_py35", # b/147011479
"no_pip", # TODO(b/202022379)
"no_windows",
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "save_test",
size = "medium",
srcs = ["legacy/save_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"no_pip", # TODO(b/202022379)
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "saving_utils_test",
size = "medium",
srcs = ["legacy/saving_utils_test.py"],
python_version = "PY3",
tags = ["notsan"],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "serialization_lib_test",
size = "small",
srcs = ["serialization_lib_test.py"],
python_version = "PY3",
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/saving:serialization",
"//tf_keras/testing_infra:test_combinations",
],
)
| tf-keras/tf_keras/saving/BUILD/0 | {
"file_path": "tf-keras/tf_keras/saving/BUILD",
"repo_id": "tf-keras",
"token_count": 3072
} | 216 |
"""Saves the same model twice and ensures that they are serialized the same."""
import subprocess
import tensorflow.compat.v2 as tf
from absl import flags
from tensorflow.core.protobuf import saved_model_pb2
FLAGS = flags.FLAGS
class DeterminismTest(tf.test.TestCase):
def test_saving_is_deterministic(self):
create_saved_model = f"{FLAGS.test_srcdir}/create_test_saved_model.par"
saved_model_a_path = f"{FLAGS.test_tmpdir}/a"
saved_model_b_path = f"{FLAGS.test_tmpdir}/b"
save_a = subprocess.Popen(
[create_saved_model, "--output_path", saved_model_a_path]
)
save_b = subprocess.Popen(
[create_saved_model, "--output_path", saved_model_b_path]
)
save_a.wait()
save_b.wait()
saved_model_a = saved_model_pb2.SavedModel()
with tf.io.gfile.GFile(f"{saved_model_a_path}/saved_model.pb") as f:
saved_model_a.MergeFromString(f.read())
saved_model_b = saved_model_pb2.SavedModel()
with tf.io.gfile.GFile(f"{saved_model_b_path}/saved_model.pb") as f:
saved_model_b.MergeFromString(f.read())
self.assertProtoEquals(saved_model_a, saved_model_b)
| tf-keras/tf_keras/saving/legacy/saved_model/determinism_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/determinism_test.py",
"repo_id": "tf-keras",
"token_count": 568
} | 217 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving utility functions."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras import backend
from tf_keras.engine import sequential
from tf_keras.feature_column import dense_features
from tf_keras.optimizers.legacy import gradient_descent
from tf_keras.saving.legacy import saving_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class TraceModelCallTest(test_combinations.TestCase):
def _assert_all_close(self, expected, actual):
if not tf.executing_eagerly():
with self.cached_session() as sess:
backend._initialize_variables(sess)
self.assertAllClose(expected, actual)
else:
self.assertAllClose(expected, actual)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_trace_model_outputs(self):
input_dim = 5 if test_utils.get_model_type() == "functional" else None
model = test_utils.get_small_mlp(10, 3, input_dim)
inputs = tf.ones((8, 5))
if input_dim is None:
with self.assertRaisesRegex(
ValueError, ".*input shape is not availabl*"
):
saving_utils.trace_model_call(model)
model._set_inputs(inputs)
fn = saving_utils.trace_model_call(model)
signature_outputs = fn(inputs)
if model.output_names:
expected_outputs = {model.output_names[0]: model(inputs)}
else:
expected_outputs = {"output_1": model(inputs)}
self._assert_all_close(expected_outputs, signature_outputs)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_trace_model_outputs_after_fitting(self):
input_dim = 5 if test_utils.get_model_type() == "functional" else None
model = test_utils.get_small_mlp(10, 3, input_dim)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
x=np.random.random((8, 5)).astype(np.float32),
y=np.random.random((8, 3)).astype(np.float32),
epochs=2,
)
inputs = tf.ones((8, 5))
fn = saving_utils.trace_model_call(model)
signature_outputs = fn(inputs)
if model.output_names:
expected_outputs = {model.output_names[0]: model(inputs)}
else:
expected_outputs = {"output_1": model(inputs)}
self._assert_all_close(expected_outputs, signature_outputs)
@test_combinations.run_with_all_model_types(exclude_models="sequential")
@test_combinations.run_all_keras_modes
def test_trace_multi_io_model_outputs(self):
input_dim = 5
num_classes = 3
num_classes_b = 4
input_a = keras.layers.Input(shape=(input_dim,), name="input_a")
input_b = keras.layers.Input(shape=(input_dim,), name="input_b")
dense = keras.layers.Dense(num_classes, name="dense")
dense2 = keras.layers.Dense(num_classes_b, name="dense2")
dropout = keras.layers.Dropout(0.5, name="dropout")
branch_a = [input_a, dense]
branch_b = [input_b, dense, dense2, dropout]
model = test_utils.get_multi_io_model(branch_a, branch_b)
input_a_ts = tf.constant(
np.random.random((10, input_dim)).astype(np.float32)
)
input_b_ts = tf.constant(
np.random.random((10, input_dim)).astype(np.float32)
)
if test_utils.get_model_type() == "subclass":
with self.assertRaisesRegex(
ValueError, ".*input shape is not availabl*"
):
saving_utils.trace_model_call(model)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
x=[
np.random.random((8, input_dim)).astype(np.float32),
np.random.random((8, input_dim)).astype(np.float32),
],
y=[
np.random.random((8, num_classes)).astype(np.float32),
np.random.random((8, num_classes_b)).astype(np.float32),
],
epochs=2,
)
fn = saving_utils.trace_model_call(model)
# tf.function requires that the input structures match when calling a
# ConcreteFunction. For some reason V1 models defines the inputs as a
# list, while V2 models sets the inputs as a tuple.
if (
not tf.executing_eagerly()
and test_utils.get_model_type() != "functional"
):
signature_outputs = fn([input_a_ts, input_b_ts])
else:
signature_outputs = fn((input_a_ts, input_b_ts))
outputs = model([input_a_ts, input_b_ts])
if model.output_names:
expected_outputs = {
model.output_names[0]: outputs[0],
model.output_names[1]: outputs[1],
}
else:
expected_outputs = {"output_1": outputs[0], "output_2": outputs[1]}
self._assert_all_close(expected_outputs, signature_outputs)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_trace_features_layer(self):
columns = [tf.feature_column.numeric_column("x")]
model = sequential.Sequential([dense_features.DenseFeatures(columns)])
model_input = {"x": tf.constant([[1.0]])}
model.predict(model_input, steps=1)
fn = saving_utils.trace_model_call(model)
self.assertAllClose({"output_1": [[1.0]]}, fn(model_input))
columns = [
tf.feature_column.numeric_column("x"),
tf.feature_column.numeric_column("y"),
]
model = sequential.Sequential([dense_features.DenseFeatures(columns)])
model_input = {"x": tf.constant([[1.0]]), "y": tf.constant([[2.0]])}
model.predict(model_input, steps=1)
fn = saving_utils.trace_model_call(model)
self.assertAllClose({"output_1": [[1.0, 2.0]]}, fn(model_input))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_specify_input_signature(self):
model = test_utils.get_small_sequential_mlp(10, 3, None)
inputs = tf.ones((8, 5))
with self.assertRaisesRegex(
ValueError, ".*input shape is not availabl*"
):
saving_utils.trace_model_call(model)
fn = saving_utils.trace_model_call(
model, [tf.TensorSpec(shape=[None, 5], dtype=tf.float32)]
)
signature_outputs = fn(inputs)
if model.output_names:
expected_outputs = {model.output_names[0]: model(inputs)}
else:
expected_outputs = {"output_1": model(inputs)}
self._assert_all_close(expected_outputs, signature_outputs)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_subclassed_model_with_input_signature(self):
class Model(keras.Model):
def __init__(self):
super().__init__()
self.dense = keras.layers.Dense(3, name="dense")
@tf.function(
input_signature=[
[
tf.TensorSpec([None, 5], tf.float32),
tf.TensorSpec([None], tf.float32),
]
],
)
def call(self, inputs, *args):
x, y = inputs
return self.dense(x) + y
model = Model()
fn = saving_utils.trace_model_call(model)
x = tf.ones((8, 5), dtype=tf.float32)
y = tf.ones((3,), dtype=tf.float32)
expected_outputs = {"output_1": model([x, y])}
signature_outputs = fn([x, y])
self._assert_all_close(expected_outputs, signature_outputs)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_model_with_fixed_input_dim(self):
"""Ensure that the batch_dim is removed when saving.
When serving or retraining, it is important to reset the batch dim.
This can be an issue inside of tf.function. See b/132783590 for context.
"""
model = test_utils.get_small_mlp(10, 3, 5)
loss_object = keras.losses.MeanSquaredError()
optimizer = gradient_descent.SGD()
@tf.function
def train_step(data, labels):
with tf.GradientTape() as tape:
predictions = model(data)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
x = np.random.random((8, 5))
y = np.random.random((8, 3))
train_step(x, y)
fn = saving_utils.trace_model_call(model)
self.assertEqual(
fn.structured_input_signature[0][0].shape.as_list(),
tf.TensorShape([None, 5]).as_list(),
)
def _import_and_infer(save_dir, inputs):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session() as session:
model = tf.compat.v1.saved_model.load(
session, [tf.saved_model.SERVING], save_dir
)
signature = model.signature_def[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
assert set(inputs.keys()) == set(
signature.inputs.keys()
), f"expected {signature.inputs.keys()}, found {inputs.keys()}"
feed_dict = {}
for arg_name in inputs.keys():
feed_dict[
graph.get_tensor_by_name(signature.inputs[arg_name].name)
] = inputs[arg_name]
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = graph.get_tensor_by_name(
output_tensor_info.name
)
return session.run(output_dict, feed_dict=feed_dict)
class AutographedMetric(keras.metrics.Metric):
def build(self, input_shape):
pass
def update_state(self, values):
if tf.constant(False):
x = 1
else:
x = 2
return x
def reset_states(self):
pass
def result(self):
return tf.constant(0)
def GetMean(self):
return tf.constant(0)
def GetCount(self):
return tf.constant(0)
class BasicAutographedMetricLayer(keras.layers.Layer):
def build(self, input_shape):
self._metric = AutographedMetric()
def call(self, inp):
self._metric.update_state(inp)
# TODO(b/172853147): Test control flow here.
return inp
class BasicAutographedMetricModel(keras.models.Model):
def __init__(self):
super().__init__(name="test_model")
self._layer = BasicAutographedMetricLayer()
def call(self, inputs, **kwargs):
return self._layer(inputs)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class ModelSaveTest(test_combinations.TestCase):
def test_model_save_preserves_autograph(self):
model = BasicAutographedMetricModel()
inputs = tf.ones((8, 5))
model._set_inputs(inputs)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
tf.saved_model.save(model, save_dir)
if model.output_names:
output_name = model.output_names[0]
input_name = model.input_names[0]
else:
output_name = "output_1"
input_name = "input_1"
self.assertAllClose(
{output_name: model.predict_on_batch(inputs)},
_import_and_infer(save_dir, {input_name: np.ones((8, 5))}),
)
# Test v2 loading.
# TODO(mdan): tests using _import_and_infer should uniformly do this.
self.assertAllClose(
model.predict_on_batch(inputs),
tf.saved_model.load(save_dir)(inputs),
)
def test_model_save(self):
input_dim = 5
model = test_utils.get_small_mlp(10, 3, input_dim)
inputs = tf.ones((8, 5))
if test_utils.get_model_type() == "subclass":
model._set_inputs(inputs)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
tf.saved_model.save(model, save_dir)
if model.output_names:
output_name = model.output_names[0]
input_name = model.input_names[0]
else:
output_name = "output_1"
input_name = "input_1"
self.assertAllClose(
{output_name: model.predict_on_batch(inputs)},
_import_and_infer(save_dir, {input_name: np.ones((8, 5))}),
)
class ExtractModelMetricsTest(test_combinations.TestCase):
def test_extract_model_metrics(self):
# saving_utils.extract_model_metrics is used in V1 only API
# keras.experimental.export_saved_model.
with tf.Graph().as_default():
a = keras.layers.Input(shape=(3,), name="input_a")
b = keras.layers.Input(shape=(3,), name="input_b")
dense = keras.layers.Dense(4, name="dense")
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name="dropout")(c)
model = keras.models.Model([a, b], [d, e])
extract_metrics = saving_utils.extract_model_metrics(model)
self.assertEqual(None, extract_metrics)
extract_metric_names = [
"dense_binary_accuracy",
"dropout_binary_accuracy",
"dense_mean_squared_error",
"dropout_mean_squared_error",
]
if tf.__internal__.tf2.enabled():
extract_metric_names.extend(["dense_mae", "dropout_mae"])
else:
extract_metric_names.extend(
["dense_mean_absolute_error", "dropout_mean_absolute_error"]
)
model_metric_names = [
"loss",
"dense_loss",
"dropout_loss",
] + extract_metric_names
model.compile(
loss="mae",
metrics=[
keras.metrics.BinaryAccuracy(),
"mae",
keras.metrics.mean_squared_error,
],
optimizer=tf.compat.v1.train.RMSPropOptimizer(
learning_rate=0.01
),
)
extract_metrics = saving_utils.extract_model_metrics(model)
self.assertEqual(set(model_metric_names), set(model.metrics_names))
self.assertEqual(
set(extract_metric_names), set(extract_metrics.keys())
)
class UnbuiltModelSavingErrorMessageTest(test_combinations.TestCase):
def setUp(self):
super().setUp()
if not tf.__internal__.tf2.enabled():
self.skipTest("The test does not intend to cover TF1.")
def test_sequential(self):
model = sequential.Sequential([keras.layers.Dense(10)])
optimizer = gradient_descent.SGD()
model.compile(optimizer, loss="mse", steps_per_execution=10)
# Forward pass not called yet. Input shape not available and thus error.
with self.assertRaisesRegex(
ValueError,
"Model.*cannot be saved."
"*specify an input shape either by calling.*",
):
model.save(os.path.join(self.get_temp_dir(), "my_saved_model"))
def test_functional(self):
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=3)
# Functional model always has an input shape, so should save just fine.
model.save(os.path.join(self.get_temp_dir(), "my_saved_model"))
def test_subclass_forward_pass_by_layer_underscore_call(self):
class CustomModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = keras.layers.Dense(1)
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self.dense1(x, training=True)
loss = self.compiled_loss(y, y_pred)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.trainable_variables)
)
return {}
subclassed_model = CustomModel()
subclassed_model.compile(optimizer="adam", loss="mse")
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
subclassed_model.fit(x, y, epochs=1)
# Saving of this subclassed model is supposed to raise an error, even if
# `fit` has been called. This is because the model does not have
# `call()` overridden. Forward pass using `layer.__call__` works for
# training, but saving requires that `call()` be used.
with self.assertRaisesRegex(
ValueError,
r"Model.*cannot be saved.*as opposed to `model.call\(\).*",
):
subclassed_model.save(
os.path.join(self.get_temp_dir(), "my_saved_model")
)
def test_subclass_forward_pass_by_model_call(self):
class CustomModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = keras.layers.Dense(1)
def call(self, inputs):
return self.dense1(inputs)
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self.call(x)
loss = self.compiled_loss(y, y_pred)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.trainable_variables)
)
return {}
subclassed_model = CustomModel()
subclassed_model.compile(optimizer="adam", loss="mse")
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
subclassed_model.fit(x, y, epochs=1)
# Saving of this subclassed model is supposed to raise an error, even if
# `fit` has been called. This is because the model has `call()`
# overridden, but the forward pass uses `Model.call` as opposed to
# `Model.__call__`, and as a result the `Model` is not really built. The
# error message hints the user to use `Model.__call__`, i.e.,
# `Model(inputs)` instead.
with self.assertRaisesRegex(
ValueError,
r"Model.*cannot be saved.*as opposed to `model.call\(\).*",
):
subclassed_model.save(
os.path.join(self.get_temp_dir(), "my_saved_model")
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/saving/legacy/saving_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saving_utils_test.py",
"repo_id": "tf-keras",
"token_count": 9691
} | 218 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras test_utils."""
import unittest
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import models as keras_models
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class CombinationsTest(tf.test.TestCase):
def test_run_all_keras_modes(self):
test_params = []
class ExampleTest(parameterized.TestCase):
def runTest(self):
pass
@test_combinations.generate(
test_combinations.keras_mode_combinations()
)
def testBody(self):
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
test_params.append((mode, should_run_eagerly))
e = ExampleTest()
if not tf.__internal__.tf2.enabled():
e.testBody_test_mode_graph_runeagerly_False()
e.testBody_test_mode_eager_runeagerly_True()
e.testBody_test_mode_eager_runeagerly_False()
if not tf.__internal__.tf2.enabled():
self.assertLen(test_params, 3)
self.assertAllEqual(
test_params,
[
("graph", False),
("eager", True),
("eager", False),
],
)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(test_params, 6)
else:
self.assertLen(test_params, 2)
self.assertAllEqual(
test_params,
[
("eager", True),
("eager", False),
],
)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(test_params, 4)
def test_generate_keras_mode_eager_only(self):
result = test_combinations.keras_mode_combinations(mode=["eager"])
self.assertLen(result, 2)
self.assertEqual(result[0], {"mode": "eager", "run_eagerly": True})
self.assertEqual(result[1], {"mode": "eager", "run_eagerly": False})
def test_generate_keras_mode_skip_run_eagerly(self):
result = test_combinations.keras_mode_combinations(run_eagerly=[False])
if tf.__internal__.tf2.enabled():
self.assertLen(result, 1)
self.assertEqual(result[0], {"mode": "eager", "run_eagerly": False})
else:
self.assertLen(result, 2)
self.assertEqual(result[0], {"mode": "eager", "run_eagerly": False})
self.assertEqual(result[1], {"mode": "graph", "run_eagerly": False})
def test_run_all_keras_model_types(self):
model_types = []
models = []
class ExampleTest(parameterized.TestCase):
def runTest(self):
pass
@test_combinations.generate(
test_combinations.keras_model_type_combinations()
)
def testBody(self):
model_types.append(test_utils.get_model_type())
models.append(test_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
e.testBody_test_modeltype_functional()
e.testBody_test_modeltype_subclass()
e.testBody_test_modeltype_sequential()
self.assertLen(model_types, 3)
self.assertAllEqual(
model_types, ["functional", "subclass", "sequential"]
)
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras_models.Sequential)
self.assertNotIsInstance(models[1], keras_models.Sequential)
self.assertIsInstance(models[2], keras_models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 6)
def test_combine_combinations(self):
test_cases = []
@test_combinations.generate(
test_combinations.times(
test_combinations.keras_mode_combinations(),
test_combinations.keras_model_type_combinations(),
)
)
class ExampleTest(parameterized.TestCase):
def runTest(self):
pass
@parameterized.named_parameters(
dict(testcase_name="_arg", arg=True)
)
def testBody(self, arg):
del arg
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
test_cases.append(
(mode, should_run_eagerly, test_utils.get_model_type())
)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
expected_combinations = [
("eager", False, "functional"),
("eager", False, "sequential"),
("eager", False, "subclass"),
("eager", True, "functional"),
("eager", True, "sequential"),
("eager", True, "subclass"),
]
if not tf.__internal__.tf2.enabled():
expected_combinations.extend(
[
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
]
)
self.assertAllEqual(sorted(test_cases), expected_combinations)
class KerasParameterizedTest(test_combinations.TestCase):
def test_run_with_all_model_types(self):
model_types = []
models = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_with_all_model_types
def testBody(self):
model_types.append(test_utils.get_model_type())
models.append(test_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
e.testBody_functional()
e.testBody_subclass()
e.testBody_sequential()
self.assertLen(model_types, 3)
self.assertAllEqual(
model_types, ["functional", "subclass", "sequential"]
)
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
self.assertIsInstance(models[2], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 6)
def test_run_with_all_model_types_and_extra_params(self):
model_types = []
models = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_with_all_model_types
@parameterized.named_parameters(
[
dict(testcase_name="_0", with_brackets=True),
dict(testcase_name="_1", with_brackets=False),
]
)
def testBody(self, with_brackets):
with_brackets = (
"with_brackets" if with_brackets else "without_brackets"
)
model_types.append((with_brackets, test_utils.get_model_type()))
models.append(test_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
e.testBody_0_functional()
e.testBody_0_subclass()
e.testBody_0_sequential()
e.testBody_1_functional()
e.testBody_1_subclass()
e.testBody_1_sequential()
self.assertLen(model_types, 6)
self.assertAllEqual(
model_types,
[
("with_brackets", "functional"),
("with_brackets", "subclass"),
("with_brackets", "sequential"),
("without_brackets", "functional"),
("without_brackets", "subclass"),
("without_brackets", "sequential"),
],
)
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
self.assertIsInstance(models[2], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 12)
def test_run_with_all_model_types_exclude_one(self):
model_types = []
models = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_with_all_model_types(
exclude_models="sequential"
)
def testBody(self):
model_types.append(test_utils.get_model_type())
models.append(test_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
if hasattr(e, "testBody_functional"):
e.testBody_functional()
if hasattr(e, "testBody_subclass"):
e.testBody_subclass()
if hasattr(e, "testBody_sequential"):
e.testBody_sequential()
self.assertLen(model_types, 2)
self.assertAllEqual(model_types, ["functional", "subclass"])
# Validate that the models are what they should be
self.assertTrue(models[0]._is_graph_network)
self.assertFalse(models[1]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
self.assertNotIsInstance(models[1], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 4)
def test_run_with_all_model_types_exclude_multiple(self):
model_types = []
models = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_with_all_model_types(
exclude_models=["sequential", "functional"]
)
def testBody(self):
model_types.append(test_utils.get_model_type())
models.append(test_utils.get_small_mlp(1, 4, input_dim=3))
e = ExampleTest()
if hasattr(e, "testBody_functional"):
e.testBody_functional()
if hasattr(e, "testBody_subclass"):
e.testBody_subclass()
if hasattr(e, "testBody_sequential"):
e.testBody_sequential()
self.assertLen(model_types, 1)
self.assertAllEqual(model_types, ["subclass"])
# Validate that the models are what they should be
self.assertFalse(models[0]._is_graph_network)
self.assertNotIsInstance(models[0], keras.models.Sequential)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(model_types, 2)
def test_run_all_keras_modes(self):
l = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_all_keras_modes()
def testBody(self):
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
l.append((mode, should_run_eagerly))
e = ExampleTest()
if not tf.__internal__.tf2.enabled():
e.testBody_v1_session()
e.testBody_v2_eager()
e.testBody_v2_function()
if not tf.__internal__.tf2.enabled():
self.assertLen(l, 3)
self.assertAllEqual(
l,
[
("graph", False),
("eager", True),
("eager", False),
],
)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 6)
else:
self.assertLen(l, 2)
self.assertAllEqual(
l,
[
("eager", True),
("eager", False),
],
)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, 4)
def test_run_all_keras_modes_extra_params(self):
l = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_all_keras_modes()
@parameterized.named_parameters(
[
dict(testcase_name="_0", with_brackets=True),
dict(testcase_name="_1", with_brackets=False),
]
)
def testBody(self, with_brackets):
mode = "eager" if tf.executing_eagerly() else "graph"
with_brackets = (
"with_brackets" if with_brackets else "without_brackets"
)
should_run_eagerly = test_utils.should_run_eagerly()
l.append((with_brackets, mode, should_run_eagerly))
e = ExampleTest()
if not tf.__internal__.tf2.enabled():
e.testBody_0_v1_session()
e.testBody_1_v1_session()
e.testBody_0_v2_eager()
e.testBody_0_v2_function()
e.testBody_1_v2_eager()
e.testBody_1_v2_function()
expected_combinations = {
("with_brackets", "eager", True),
("with_brackets", "eager", False),
("without_brackets", "eager", True),
("without_brackets", "eager", False),
}
if not tf.__internal__.tf2.enabled():
expected_combinations = expected_combinations.union(
{
("with_brackets", "graph", False),
("without_brackets", "graph", False),
}
)
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_always_skip_v1(self):
l = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def testBody(self):
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
l.append((mode, should_run_eagerly))
e = ExampleTest()
if hasattr(e, "testBody_v1_session"):
e.testBody_v1_session()
if hasattr(e, "testBody_v2_eager"):
e.testBody_v2_eager()
if hasattr(e, "testBody_v2_function"):
e.testBody_v2_function()
self.assertLen(l, 2)
self.assertEqual(
set(l),
{
("eager", True),
("eager", False),
},
)
def test_run_all_keras_modes_with_all_model_types(self):
l = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def testBody(self):
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
l.append(
(mode, should_run_eagerly, test_utils.get_model_type())
)
e = ExampleTest()
e.testBody_v2_eager_functional()
e.testBody_v2_function_functional()
e.testBody_v2_eager_sequential()
e.testBody_v2_function_sequential()
e.testBody_v2_eager_subclass()
e.testBody_v2_function_subclass()
if not tf.__internal__.tf2.enabled():
e.testBody_v1_session_functional()
e.testBody_v1_session_sequential()
e.testBody_v1_session_subclass()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf.__internal__.tf2.enabled():
expected_combinations = expected_combinations.union(
{
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
}
)
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_model_types_with_all_keras_modes(self):
l = []
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types
def testBody(self):
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
l.append(
(mode, should_run_eagerly, test_utils.get_model_type())
)
e = ExampleTest()
e.testBody_functional_v2_eager()
e.testBody_functional_v2_function()
e.testBody_sequential_v2_eager()
e.testBody_sequential_v2_function()
e.testBody_subclass_v2_eager()
e.testBody_subclass_v2_function()
if not tf.__internal__.tf2.enabled():
e.testBody_functional_v1_session()
e.testBody_sequential_v1_session()
e.testBody_subclass_v1_session()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf.__internal__.tf2.enabled():
expected_combinations = expected_combinations.union(
{
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
}
)
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_with_all_model_types_annotate_class(self):
l = []
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@parameterized.named_parameters(
dict(testcase_name="_arg", arg=True)
)
def testBody(self, arg):
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
l.append(
(mode, should_run_eagerly, test_utils.get_model_type())
)
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_function_subclass()
if not tf.__internal__.tf2.enabled():
e.testBody_arg_v1_session_functional()
e.testBody_arg_v1_session_sequential()
e.testBody_arg_v1_session_subclass()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf.__internal__.tf2.enabled():
expected_combinations = expected_combinations.union(
{
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
}
)
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):
l = []
@test_combinations.run_with_all_model_types
class ExampleTest(test_combinations.TestCase):
def runTest(self):
pass
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
dict(testcase_name="_arg", arg=True)
)
def testBody(self, arg):
mode = "eager" if tf.executing_eagerly() else "graph"
should_run_eagerly = test_utils.should_run_eagerly()
l.append(
(mode, should_run_eagerly, test_utils.get_model_type())
)
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_function_subclass()
if not tf.__internal__.tf2.enabled():
e.testBody_arg_v1_session_functional()
e.testBody_arg_v1_session_sequential()
e.testBody_arg_v1_session_subclass()
expected_combinations = {
("eager", True, "functional"),
("eager", False, "functional"),
("eager", True, "sequential"),
("eager", False, "sequential"),
("eager", True, "subclass"),
("eager", False, "subclass"),
}
if not tf.__internal__.tf2.enabled():
expected_combinations = expected_combinations.union(
{
("graph", False, "functional"),
("graph", False, "sequential"),
("graph", False, "subclass"),
}
)
self.assertLen(l, len(expected_combinations))
self.assertEqual(set(l), expected_combinations)
ts = unittest.makeSuite(ExampleTest)
res = unittest.TestResult()
ts.run(res)
self.assertLen(l, len(expected_combinations) * 2)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(dict(testcase_name="argument", arg=True))
def test_run_all_keras_modes_extra_params_2(self, arg):
self.assertEqual(arg, True)
@test_combinations.run_with_all_model_types
@parameterized.named_parameters(dict(testcase_name="argument", arg=True))
def test_run_with_all_model_types_extra_params_2(self, arg):
self.assertEqual(arg, True)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/testing_infra/test_combinations_test.py/0 | {
"file_path": "tf-keras/tf_keras/testing_infra/test_combinations_test.py",
"repo_id": "tf-keras",
"token_count": 12702
} | 219 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving/loading function for keras Model."""
import os
import shutil
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.optimizers import optimizer_v1
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.tests import model_architectures
@test_combinations.run_with_all_saved_model_formats
class TestModelArchitectures(test_combinations.TestCase):
def _save_model_dir(self, dirname="saved_model"):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def get_test_data(self, input_shape, target_shape):
"""Generate test dataset for testing."""
if isinstance(input_shape, list):
x = [
np.random.random((2,) + input_shape[i][1:])
for i in range(len(input_shape))
]
else:
x = np.random.random((2,) + input_shape[1:])
if isinstance(target_shape, list):
y = [
np.random.random((2,) + target_shape[i][1:])
for i in range(len(target_shape))
]
else:
y = np.random.random((2,) + target_shape[1:])
return x, y
def get_custom_objects(self):
"""Define custom_objects."""
class CustomOpt(optimizer_v1.SGD):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
return {"CustomOpt": CustomOpt, "custom_loss": custom_loss}
@parameterized.named_parameters(*model_architectures.ALL_MODELS)
def test_basic_saving_and_loading(self, model_fn):
save_format = test_utils.get_save_format()
custom_objects = self.get_custom_objects()
if "subclassed_in_functional" in model_fn.__name__:
subclass_custom_objects = {
"MySubclassModel": model_architectures.MySubclassModel,
}
custom_objects.update(subclass_custom_objects)
elif "subclassed" in model_fn.__name__ and save_format == "h5":
self.skipTest(
"Saving the model to HDF5 format requires the model to be "
"a Functional model or a Sequential model."
)
saved_model_dir = self._save_model_dir()
model_data = model_fn()
model = model_data.model
x_test, y_test = self.get_test_data(
model_data.input_shape, model_data.target_shape
)
model.compile("rmsprop", "mse")
model.train_on_batch(x_test, y_test)
# Save model.
out1 = model.predict(x_test)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
# Load model.
loaded_model = keras.models.load_model(
saved_model_dir, custom_objects=custom_objects
)
out2 = loaded_model.predict(x_test)
self.assertAllClose(out1, out2, atol=1e-05)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/tests/model_architectures_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/model_architectures_test.py",
"repo_id": "tf-keras",
"token_count": 1587
} | 220 |
package(default_visibility = ["//tf_keras:__subpackages__"])
# Description:
# Tools for building the TensorFlow pip package.
COMMON_PIP_DEPS = [
"//tf_keras/api:tf_keras_api",
# The following targets are not included by //tf_keras:keras,
# eg to avoid circular dependency with TF, but they should still be included
# in the PIP package.
"//tf_keras/legacy_tf_layers:convolutional",
"//tf_keras/legacy_tf_layers:core",
"//tf_keras/legacy_tf_layers:layers_base",
"//tf_keras/legacy_tf_layers:normalization",
"//tf_keras/legacy_tf_layers:pooling",
"//tf_keras/layers/rnn:legacy_cell_wrappers",
"//tf_keras/layers/rnn:legacy_cells",
"//tf_keras/optimizers:legacy_learning_rate_decay",
# Need to include testing libraries in pip package so our pip
# release tests can run. (see py_test rule in keras.bzl for more context).
# Essentially, everything needed to run the test (except the test file itself)
# must be contained in the pip package since we strip away all deps.
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/benchmarks:keras_benchmark_lib_pip",
"//tf_keras/dtensor:integration_test_utils",
"//tf_keras/dtensor:test_util",
"//tf_keras/distribute:distribute_test_lib_pip",
"//tf_keras/integration_test:preprocessing_test_utils",
"//tf_keras/integration_test/models:models",
"//tf_keras/layers/preprocessing:preprocessing_test_utils",
"//tf_keras/layers/preprocessing/benchmarks:feature_column_benchmark",
"//tf_keras/mixed_precision:test_util",
"//tf_keras/tests:model_architectures",
"//tf_keras/tests:model_subclassing_test_util",
"//tf_keras/utils:dataset_creator",
"//tf_keras/utils:kpl_test_utils",
]
sh_binary(
name = "build_pip_package",
srcs = ["build_pip_package.sh"],
data = COMMON_PIP_DEPS,
)
| tf-keras/tf_keras/tools/pip_package/BUILD/0 | {
"file_path": "tf-keras/tf_keras/tools/pip_package/BUILD",
"repo_id": "tf-keras",
"token_count": 768
} | 221 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dataset_creator."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.distribute import multi_worker_testing_utils
from tf_keras.engine import data_adapter
from tf_keras.engine import sequential
from tf_keras.layers import core as core_layers
from tf_keras.optimizers.legacy import gradient_descent
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import dataset_creator
# isort: off
from tensorflow.python.distribute.cluster_resolver import (
SimpleClusterResolver,
)
from tensorflow.python.training.server_lib import (
ClusterSpec,
)
@test_utils.run_v2_only
class DatasetCreatorTest(tf.test.TestCase, parameterized.TestCase):
def test_dataset_creator(self):
with self.assertRaisesRegex(
TypeError, "`dataset_fn` for `DatasetCreator` must be a `callable`."
):
dataset_creator.DatasetCreator(2)
dataset_fn = lambda: 3
with self.assertRaisesRegex(
TypeError,
"The `callable` provided to `DatasetCreator` must return "
"a Dataset.",
):
dataset_creator.DatasetCreator(dataset_fn)()
dataset_fn = lambda: tf.data.Dataset.from_tensor_slices([1, 1])
got = dataset_creator.DatasetCreator(dataset_fn)()
self.assertEqual(
next(iter(got)),
next(iter(tf.data.Dataset.from_tensor_slices([1, 1]))),
)
def _get_dataset_fn(self):
def dataset_fn(input_context):
global_batch_size = 64
batch_size = input_context.get_per_replica_batch_size(
global_batch_size
)
dataset = tf.data.Dataset.from_tensors(([1.0], [1.0])).repeat()
dataset = dataset.shard(
input_context.num_input_pipelines,
input_context.input_pipeline_id,
)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2)
return dataset
return dataset_fn
@test_combinations.generate(
test_combinations.combine(use_input_options=[True, False])
)
def test_dataset_creator_model_fit_without_strategy(
self, use_input_options
):
model = sequential.Sequential([core_layers.Dense(10)])
model.compile(gradient_descent.SGD(), loss="mse")
input_options = (
tf.distribute.InputOptions() if use_input_options else None
)
history = model.fit(
dataset_creator.DatasetCreator(
self._get_dataset_fn(), input_options
),
epochs=10,
steps_per_epoch=10,
verbose=0,
)
self.assertLen(history.history["loss"], 10)
def _get_parameter_server_strategy(self):
cluster_def = multi_worker_testing_utils.create_in_process_cluster(
num_workers=2, num_ps=1, rpc_layer="grpc"
)
return tf.distribute.experimental.ParameterServerStrategy(
SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc")
)
@test_combinations.generate(
test_combinations.combine(use_input_options=[True, False])
)
def test_dataset_creator_usage_in_parameter_server_model_fit(
self, use_input_options
):
strategy = self._get_parameter_server_strategy()
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
model.compile(gradient_descent.SGD(), loss="mse")
input_options = (
tf.distribute.InputOptions() if use_input_options else None
)
history = model.fit(
dataset_creator.DatasetCreator(
self._get_dataset_fn(), input_options
),
epochs=10,
steps_per_epoch=10,
verbose=0,
)
self.assertLen(history.history["loss"], 10)
def test_dataset_creator_input_options(self):
dataset_fn = lambda _: tf.data.Dataset.from_tensor_slices([1, 1])
input_options = tf.distribute.InputOptions(
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2,
)
x = dataset_creator.DatasetCreator(
dataset_fn, input_options=input_options
)
with tf.distribute.MultiWorkerMirroredStrategy().scope():
data_handler = data_adapter.get_data_handler(
x,
steps_per_epoch=2,
model=sequential.Sequential([core_layers.Dense(10)]),
)
# Ensuring the resulting `DistributedDatasetsFromFunction` has the right
# options.
self.assertTrue(
data_handler._dataset._options.experimental_fetch_to_device
)
self.assertEqual(
data_handler._dataset._options.experimental_per_replica_buffer_size,
2,
)
def test_dataset_creator_input_options_with_cluster_coordinator(self):
dataset_fn = lambda _: tf.data.Dataset.from_tensor_slices([1, 1])
input_options = tf.distribute.InputOptions(
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2,
)
x = dataset_creator.DatasetCreator(
dataset_fn, input_options=input_options
)
strategy = self._get_parameter_server_strategy()
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
model._cluster_coordinator = (
tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy
)
)
data_handler = data_adapter.get_data_handler(
x, steps_per_epoch=2, model=model
)
iter_rv = iter(data_handler._dataset)._values[0]
iter_rv._rebuild_on(model._cluster_coordinator._cluster.workers[0])
distributed_iterator = iter_rv._get_values()
# Ensuring the resulting `DistributedIterator` has the right options.
self.assertTrue(
distributed_iterator._options.experimental_fetch_to_device
)
self.assertEqual(
distributed_iterator._options.experimental_per_replica_buffer_size,
2,
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/dataset_creator_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/dataset_creator_test.py",
"repo_id": "tf-keras",
"token_count": 3162
} | 222 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test related utilities for KPL + tf.distribute."""
import random
import tempfile
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import string_lookup
class DistributeKplTestUtils(tf.test.TestCase):
"""Utils for test of tf.distribute + KPL."""
FEATURE_VOCAB = [
"avenger",
"ironman",
"batman",
"hulk",
"spiderman",
"kingkong",
"wonder_woman",
]
LABEL_VOCAB = ["yes", "no"]
def define_kpls_for_training(self, use_adapt):
"""Function that defines KPL used for unit tests of tf.distribute.
Args:
use_adapt: if adapt will be called. False means there will be
precomputed statistics.
Returns:
feature_mapper: a simple keras model with one keras StringLookup layer
which maps feature to index.
label_mapper: similar to feature_mapper, but maps label to index.
"""
if use_adapt:
feature_lookup_layer = string_lookup.StringLookup(num_oov_indices=1)
feature_lookup_layer.adapt(self.FEATURE_VOCAB)
label_lookup_layer = string_lookup.StringLookup(
num_oov_indices=0, mask_token=None
)
label_lookup_layer.adapt(self.LABEL_VOCAB)
else:
feature_lookup_layer = string_lookup.StringLookup(
vocabulary=self.FEATURE_VOCAB, num_oov_indices=1
)
label_lookup_layer = string_lookup.StringLookup(
vocabulary=self.LABEL_VOCAB, num_oov_indices=0, mask_token=None
)
raw_feature_input = keras.layers.Input(
shape=(3,), dtype=tf.string, name="feature", ragged=True
)
feature_id_input = feature_lookup_layer(raw_feature_input)
feature_mapper = keras.Model(
{"features": raw_feature_input}, feature_id_input
)
raw_label_input = keras.layers.Input(
shape=(1,), dtype=tf.string, name="label"
)
label_id_input = label_lookup_layer(raw_label_input)
label_mapper = keras.Model({"label": raw_label_input}, label_id_input)
return feature_mapper, label_mapper
def dataset_fn(self, feature_mapper, label_mapper):
"""Function that generates dataset for test of tf.distribute + KPL.
Args:
feature_mapper: a simple keras model with one keras StringLookup layer
which maps feature to index.
label_mapper: similar to feature_mapper, but maps label to index.
Returns:
Generated dataset for test of tf.distribute + KPL.
"""
def feature_and_label_gen():
# Generator of dataset.
while True:
features = random.sample(self.FEATURE_VOCAB, 3)
label = ["yes"] if self.FEATURE_VOCAB[0] in features else ["no"]
yield {"features": features, "label": label}
raw_dataset = (
tf.data.Dataset.from_generator(
feature_and_label_gen,
output_signature={
"features": tf.TensorSpec([3], tf.string),
"label": tf.TensorSpec([1], tf.string),
},
)
.shuffle(100)
.batch(32)
)
train_dataset = raw_dataset.map(
lambda x: (
{"features": feature_mapper(x["features"])},
label_mapper(x["label"]),
)
)
return train_dataset
def define_model(self):
"""A simple model for test of tf.distribute + KPL."""
# Create the model. The input needs to be compatible with KPLs.
model_input = keras.layers.Input(
shape=(3,), dtype=tf.int64, name="model_input"
)
# input_dim includes a mask token and an oov token.
emb_output = keras.layers.Embedding(
input_dim=len(self.FEATURE_VOCAB) + 2, output_dim=20
)(model_input)
emb_output = tf.reduce_mean(emb_output, axis=1)
dense_output = keras.layers.Dense(units=1, activation="sigmoid")(
emb_output
)
model = keras.Model({"features": model_input}, dense_output)
return model
def define_reverse_lookup_layer(self):
"""Create string reverse lookup layer for serving."""
label_inverse_lookup_layer = string_lookup.StringLookup(
num_oov_indices=0,
mask_token=None,
vocabulary=self.LABEL_VOCAB,
invert=True,
)
return label_inverse_lookup_layer
def create_serving_signature(
self, model, feature_mapper, label_inverse_lookup_layer
):
"""Create serving signature for the given model."""
@tf.function
def serve_fn(raw_features):
raw_features = tf.expand_dims(raw_features, axis=0)
transformed_features = model.feature_mapper(raw_features)
outputs = model(transformed_features)
outputs = tf.squeeze(outputs, axis=0)
outputs = tf.cast(tf.greater(outputs, 0.5), tf.int64)
decoded_outputs = model.label_inverse_lookup_layer(outputs)
return tf.squeeze(decoded_outputs, axis=0)
model.feature_mapper = feature_mapper
model.label_inverse_lookup_layer = label_inverse_lookup_layer
# serving does NOT have batch dimension
return serve_fn.get_concrete_function(
tf.TensorSpec(shape=(3), dtype=tf.string, name="example")
)
def test_save_load_serving_model(
self, model, feature_mapper, label_inverse_lookup_layer
):
"""Test save/load/serving model."""
serving_fn = self.create_serving_signature(
model, feature_mapper, label_inverse_lookup_layer
)
saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
model.save(
saved_model_dir,
save_format="tf",
signatures={"serving_default": serving_fn},
)
# Test the saved_model.
loaded_serving_fn = keras.saving.legacy.save.load_model(
saved_model_dir
).signatures["serving_default"]
# check the result w/ and w/o avenger.
prediction0 = loaded_serving_fn(
tf.constant(["avenger", "ironman", "avenger"])
)["output_0"]
self.assertIn(prediction0.numpy().decode("UTF-8"), ("yes", "no"))
prediction1 = loaded_serving_fn(
tf.constant(["ironman", "ironman", "unknown"])
)["output_0"]
self.assertIn(prediction1.numpy().decode("UTF-8"), ("yes", "no"))
| tf-keras/tf_keras/utils/kpl_test_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/kpl_test_utils.py",
"repo_id": "tf-keras",
"token_count": 3322
} | 223 |
package(
licenses = ["notice"], # Apache 2.0
)
exports_files([
"LICENSE",
"six.BUILD",
])
package(default_visibility = ["//visibility:public"])
| tf-keras/third_party/BUILD/0 | {
"file_path": "tf-keras/third_party/BUILD",
"repo_id": "tf-keras",
"token_count": 65
} | 224 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import keras_tuner
import numpy as np
import pytest
import tensorflow as tf
import autokeras as ak
from autokeras import test_utils
def get_tuner_class(*args, **kwargs):
pipeline = mock.Mock()
pipeline.transform_x.side_effect = lambda x: x
tuner = mock.Mock()
tuner.get_best_pipeline.return_value = pipeline
tuner_class = mock.Mock()
tuner_class.return_value = tuner
return tuner_class
def test_auto_model_objective_is_kt_objective(tmp_path):
auto_model = ak.AutoModel(
ak.ImageInput(), ak.RegressionHead(), directory=tmp_path
)
assert isinstance(auto_model.objective, keras_tuner.Objective)
def test_auto_model_max_trial_field_as_specified(tmp_path):
auto_model = ak.AutoModel(
ak.ImageInput(), ak.RegressionHead(), directory=tmp_path, max_trials=10
)
assert auto_model.max_trials == 10
def test_auto_model_directory_field_as_specified(tmp_path):
auto_model = ak.AutoModel(
ak.ImageInput(), ak.RegressionHead(), directory=tmp_path
)
assert auto_model.directory == tmp_path
def test_auto_model_project_name_field_as_specified(tmp_path):
auto_model = ak.AutoModel(
ak.ImageInput(),
ak.RegressionHead(),
directory=tmp_path,
project_name="auto_model",
)
assert auto_model.project_name == "auto_model"
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_evaluate(tuner_fn, tmp_path):
x_train = np.random.rand(100, 32)
y_train = np.random.rand(100, 1)
input_node = ak.Input()
output_node = input_node
output_node = ak.DenseBlock()(output_node)
output_node = ak.RegressionHead()(output_node)
auto_model = ak.AutoModel(
input_node, output_node, directory=tmp_path, max_trials=1
)
auto_model.fit(
x_train, y_train, epochs=1, validation_data=(x_train, y_train)
)
auto_model.evaluate(tf.data.Dataset.from_tensor_slices((x_train, y_train)))
assert tuner_fn.called
def get_single_io_auto_model(tmp_path):
return ak.AutoModel(
ak.ImageInput(), ak.RegressionHead(), directory=tmp_path, max_trials=2
)
@mock.patch("autokeras.auto_model.get_tuner_class", side_effect=get_tuner_class)
def test_auto_model_predict(tuner_fn, tmp_path):
x_train = np.random.rand(100, 32, 32, 3)
y_train = np.random.rand(100, 1)
auto_model = get_single_io_auto_model(tmp_path)
auto_model.fit(x_train, y_train, epochs=2, validation_split=0.2)
auto_model.predict(x_train)
assert tuner_fn.called
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_final_fit_concat(tuner_fn, tmp_path):
tuner = tuner_fn.return_value.return_value
x_train = np.random.rand(100, 32, 32, 3)
y_train = np.random.rand(100, 1)
auto_model = get_single_io_auto_model(tmp_path)
auto_model.fit(x_train, y_train, epochs=2, validation_split=0.2)
assert tuner.search.call_args_list[0][1]["validation_split"]
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_final_fit_not_concat(tuner_fn, tmp_path):
tuner = tuner_fn.return_value.return_value
x_train = np.random.rand(100, 32, 32, 3)
y_train = np.random.rand(100, 1)
auto_model = get_single_io_auto_model(tmp_path)
auto_model.fit(
x_train, y_train, epochs=2, validation_data=(x_train, y_train)
)
assert not tuner.search.call_args_list[0][1]["validation_split"]
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_overwrite(tuner_fn, tmp_path):
tuner_class = tuner_fn.return_value
x_train = np.random.rand(100, 32, 32, 3)
y_train = np.random.rand(100, 1)
auto_model = get_single_io_auto_model(tmp_path)
auto_model.fit(
x_train, y_train, epochs=2, validation_data=(x_train, y_train)
)
assert not tuner_class.call_args_list[0][1]["overwrite"]
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_export_model(tuner_fn, tmp_path):
tuner_class = tuner_fn.return_value
tuner = tuner_class.return_value
x_train = np.random.rand(100, 32, 32, 3)
y_train = np.random.rand(100, 1)
auto_model = get_single_io_auto_model(tmp_path)
auto_model.fit(
x_train, y_train, epochs=2, validation_data=(x_train, y_train)
)
auto_model.export_model()
assert tuner.get_best_model.called
def get_multi_io_auto_model(tmp_path):
return ak.AutoModel(
[ak.ImageInput(), ak.ImageInput()],
[ak.RegressionHead(), ak.RegressionHead()],
directory=tmp_path,
max_trials=2,
overwrite=False,
)
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_multi_io_with_tf_dataset_doesnt_crash(tuner_fn, tmp_path):
auto_model = get_multi_io_auto_model(tmp_path)
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices(((x1, x1), (y1, y1)))
auto_model.fit(dataset, epochs=2)
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_single_nested_dataset_doesnt_crash(tuner_fn, tmp_path):
auto_model = ak.AutoModel(
ak.ImageInput(),
ak.RegressionHead(),
directory=tmp_path,
max_trials=2,
overwrite=False,
)
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices(((x1,), y1))
auto_model.fit(dataset, epochs=2)
def dataset_error(x, y, validation_data, message, tmp_path):
auto_model = get_multi_io_auto_model(tmp_path)
with pytest.raises(ValueError) as info:
auto_model.fit(x, y, epochs=2, validation_data=validation_data)
assert message in str(info.value)
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_data_io_consistency_input(tuner_fn, tmp_path):
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices(((x1,), (y1, y1)))
dataset_error(dataset, None, dataset, "Expected x to have", tmp_path)
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_data_io_consistency_output(tuner_fn, tmp_path):
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices(((x1, x1), (y1,)))
dataset_error(dataset, None, dataset, "Expected y to have", tmp_path)
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_data_io_consistency_validation(tuner_fn, tmp_path):
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices(((x1, x1), (y1, y1)))
val_dataset = tf.data.Dataset.from_tensor_slices(((x1,), (y1, y1)))
dataset_error(
dataset,
None,
val_dataset,
"Expected x in validation_data to have",
tmp_path,
)
@mock.patch("autokeras.auto_model.get_tuner_class")
def test_dataset_and_y(tuner_fn, tmp_path):
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
x = tf.data.Dataset.from_tensor_slices((x1, x1))
y = tf.data.Dataset.from_tensor_slices((y1, y1))
val_dataset = tf.data.Dataset.from_tensor_slices(((x1,), (y1, y1)))
dataset_error(x, y, val_dataset, "Expected y to be None", tmp_path)
@mock.patch("autokeras.auto_model.get_tuner_class", side_effect=get_tuner_class)
def test_multi_input_predict(tuner_fn, tmp_path):
auto_model = get_multi_io_auto_model(tmp_path)
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices(((x1, x1), (y1, y1)))
auto_model.fit(dataset, None, epochs=2, validation_data=dataset)
dataset2 = tf.data.Dataset.from_tensor_slices(((x1, x1),))
auto_model.predict(dataset2)
@mock.patch("autokeras.auto_model.get_tuner_class", side_effect=get_tuner_class)
def test_multi_input_predict2(tuner_fn, tmp_path):
auto_model = get_multi_io_auto_model(tmp_path)
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices(((x1, x1), (y1, y1)))
auto_model.fit(dataset, None, epochs=2, validation_data=dataset)
dataset2 = tf.data.Dataset.from_tensor_slices((x1, x1))
auto_model.predict(dataset2)
@mock.patch("autokeras.auto_model.get_tuner_class", side_effect=get_tuner_class)
def test_single_input_predict_doesnt_crash(tuner_fn, tmp_path):
auto_model = get_single_io_auto_model(tmp_path)
x1 = test_utils.generate_data()
y1 = test_utils.generate_data(shape=(1,))
dataset = tf.data.Dataset.from_tensor_slices((x1, y1))
auto_model.fit(dataset, None, epochs=2, validation_data=dataset)
dataset2 = tf.data.Dataset.from_tensor_slices((x1, y1))
auto_model.predict(dataset2)
def test_invalid_tuner_name_error(tmp_path):
with pytest.raises(ValueError) as info:
ak.AutoModel(
ak.ImageInput(),
ak.RegressionHead(),
directory=tmp_path,
tuner="unknown",
)
assert "Expected the tuner argument to be one of" in str(info.value)
def test_no_validation_data_nor_split_error(tmp_path):
auto_model = ak.AutoModel(
ak.ImageInput(), ak.RegressionHead(), directory=tmp_path
)
with pytest.raises(ValueError) as info:
auto_model.fit(
x=np.random.rand(100, 32, 32, 3),
y=np.random.rand(100, 1),
validation_split=0,
)
assert "Either validation_data or a non-zero" in str(info.value)
@mock.patch("autokeras.auto_model.get_tuner_class", side_effect=get_tuner_class)
def test_predict_tuple_x_and_tuple_y_predict_doesnt_crash(tuner_fn, tmp_path):
auto_model = ak.AutoModel(
ak.ImageInput(), ak.RegressionHead(), directory=tmp_path
)
dataset = tf.data.Dataset.from_tensor_slices(
((np.random.rand(100, 32, 32, 3),), (np.random.rand(100, 1),))
)
auto_model.fit(dataset)
auto_model.predict(dataset)
| autokeras/autokeras/auto_model_test.py/0 | {
"file_path": "autokeras/autokeras/auto_model_test.py",
"repo_id": "autokeras",
"token_count": 4560
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Analyser(object):
"""Analyze the dataset for useful information.
Analyser is used by the input nodes and the heads of the hypermodel. It
analyzes the dataset to get useful information, e.g., the shape of the
data, the data type of the dataset. The information will be used by the
input nodes and heads to construct the data pipeline and to build the Keras
Model.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.shape = None
self.dtype = None
self.num_samples = 0
self.batch_size = None
def update(self, data):
"""Update the statistics with a batch of data.
# Arguments
data: tf.Tensor. One batch of data from tf.data.Dataset.
"""
if self.dtype is None:
self.dtype = data.dtype
if self.shape is None:
self.shape = data.shape.as_list()
if self.batch_size is None:
self.batch_size = data.shape.as_list()[0]
self.num_samples += data.shape.as_list()[0]
def finalize(self):
"""Process recorded information after all updates."""
raise NotImplementedError
| autokeras/autokeras/engine/analyser.py/0 | {
"file_path": "autokeras/autokeras/engine/analyser.py",
"repo_id": "autokeras",
"token_count": 630
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autokeras import preprocessors
from autokeras.engine import hyper_preprocessor
from autokeras.utils import utils
def serialize(encoder):
return utils.serialize_keras_object(encoder)
def deserialize(config, custom_objects=None):
return utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="preprocessors",
)
class DefaultHyperPreprocessor(hyper_preprocessor.HyperPreprocessor):
"""HyperPreprocessor without Hyperparameters to tune.
It would always return the same preprocessor. No hyperparameters to be
tuned.
# Arguments
preprocessor: The Preprocessor to return when calling build.
"""
def __init__(self, preprocessor, *args, **kwargs):
super().__init__(*args, **kwargs)
self.preprocessor = preprocessor
def build(self, hp, dataset):
return self.preprocessor
def get_config(self):
config = super().get_config()
config.update(
{"preprocessor": preprocessors.serialize(self.preprocessor)}
)
return config
@classmethod
def from_config(cls, config):
config["preprocessor"] = preprocessors.deserialize(
config["preprocessor"]
)
return super().from_config(config)
| autokeras/autokeras/hyper_preprocessors.py/0 | {
"file_path": "autokeras/autokeras/hyper_preprocessors.py",
"repo_id": "autokeras",
"token_count": 654
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from autokeras import preprocessors
from autokeras.preprocessors import encoders
from autokeras.utils import data_utils
def test_one_hot_encoder_deserialize_transforms_to_np():
encoder = encoders.OneHotEncoder(["a", "b", "c"])
encoder.fit(np.array(["a", "b", "a"]))
encoder = preprocessors.deserialize(preprocessors.serialize(encoder))
one_hot = encoder.transform(
tf.data.Dataset.from_tensor_slices([["a"], ["c"], ["b"]]).batch(2)
)
for data in one_hot:
assert data.shape[1:] == [3]
def test_one_hot_encoder_decode_to_same_string():
encoder = encoders.OneHotEncoder(["a", "b", "c"])
result = encoder.postprocess(np.eye(3))
assert np.array_equal(result, np.array([["a"], ["b"], ["c"]]))
def test_label_encoder_decode_to_same_string():
encoder = encoders.LabelEncoder(["a", "b"])
result = encoder.postprocess([[0], [1]])
assert np.array_equal(result, np.array([["a"], ["b"]]))
def test_label_encoder_encode_to_correct_shape():
encoder = encoders.LabelEncoder(["a", "b"])
dataset = tf.data.Dataset.from_tensor_slices([["a"], ["b"]]).batch(32)
result = encoder.transform(dataset)
assert data_utils.dataset_shape(result).as_list() == [None, 1]
| autokeras/autokeras/preprocessors/encoders_test.py/0 | {
"file_path": "autokeras/autokeras/preprocessors/encoders_test.py",
"repo_id": "autokeras",
"token_count": 680
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import autokeras as ak
from autokeras import test_utils
@mock.patch("autokeras.AutoModel.fit")
def test_img_clf_fit_call_auto_model_fit(fit, tmp_path):
auto_model = ak.ImageClassifier(directory=tmp_path, seed=test_utils.SEED)
auto_model.fit(
x=test_utils.generate_data(num_instances=100, shape=(32, 32, 3)),
y=test_utils.generate_one_hot_labels(num_instances=100, num_classes=10),
)
assert fit.is_called
@mock.patch("autokeras.AutoModel.fit")
def test_img_reg_fit_call_auto_model_fit(fit, tmp_path):
auto_model = ak.ImageRegressor(directory=tmp_path, seed=test_utils.SEED)
auto_model.fit(
x=test_utils.generate_data(num_instances=100, shape=(32, 32, 3)),
y=test_utils.generate_data(num_instances=100, shape=(1,)),
)
assert fit.is_called
@mock.patch("autokeras.AutoModel.fit")
def test_img_seg_fit_call_auto_model_fit(fit, tmp_path):
auto_model = ak.tasks.image.ImageSegmenter(
directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(
x=test_utils.generate_data(num_instances=100, shape=(32, 32, 3)),
y=test_utils.generate_data(num_instances=100, shape=(32, 32)),
)
assert fit.is_called
@mock.patch("autokeras.AutoModel.fit")
def test_img_obj_det_fit_call_auto_model_fit(fit, tmp_path):
auto_model = ak.tasks.image.ImageObjectDetector(
directory=tmp_path, seed=test_utils.SEED
)
images, labels = test_utils.get_object_detection_data()
auto_model.fit(
x=images,
y=labels,
)
assert fit.is_called
| autokeras/autokeras/tasks/image_test.py/0 | {
"file_path": "autokeras/autokeras/tasks/image_test.py",
"repo_id": "autokeras",
"token_count": 846
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autokeras.tuners import greedy
IMAGE_CLASSIFIER = [
{
"image_block_1/block_type": "vanilla",
"image_block_1/normalize": True,
"image_block_1/augment": False,
"image_block_1/conv_block_1/kernel_size": 3,
"image_block_1/conv_block_1/num_blocks": 1,
"image_block_1/conv_block_1/num_layers": 2,
"image_block_1/conv_block_1/max_pooling": True,
"image_block_1/conv_block_1/separable": False,
"image_block_1/conv_block_1/dropout": 0.25,
"image_block_1/conv_block_1/filters_0_0": 32,
"image_block_1/conv_block_1/filters_0_1": 64,
"classification_head_1/spatial_reduction_1/reduction_type": "flatten",
"classification_head_1/dropout": 0.5,
"optimizer": "adam",
"learning_rate": 1e-3,
},
{
"image_block_1/block_type": "resnet",
"image_block_1/normalize": True,
"image_block_1/augment": True,
"image_block_1/image_augmentation_1/horizontal_flip": True,
"image_block_1/image_augmentation_1/vertical_flip": True,
"image_block_1/image_augmentation_1/contrast_factor": 0.0,
"image_block_1/image_augmentation_1/rotation_factor": 0.0,
"image_block_1/image_augmentation_1/translation_factor": 0.1,
"image_block_1/image_augmentation_1/zoom_factor": 0.0,
"image_block_1/res_net_block_1/pretrained": False,
"image_block_1/res_net_block_1/version": "resnet50",
"image_block_1/res_net_block_1/imagenet_size": True,
"classification_head_1/spatial_reduction_1/reduction_type": "global_avg", # noqa: E501
"classification_head_1/dropout": 0,
"optimizer": "adam",
"learning_rate": 1e-3,
},
{
"image_block_1/block_type": "efficient",
"image_block_1/normalize": True,
"image_block_1/augment": True,
"image_block_1/image_augmentation_1/horizontal_flip": True,
"image_block_1/image_augmentation_1/vertical_flip": False,
"image_block_1/image_augmentation_1/contrast_factor": 0.0,
"image_block_1/image_augmentation_1/rotation_factor": 0.0,
"image_block_1/image_augmentation_1/translation_factor": 0.1,
"image_block_1/image_augmentation_1/zoom_factor": 0.0,
"image_block_1/efficient_net_block_1/pretrained": True,
"image_block_1/efficient_net_block_1/version": "b7",
"image_block_1/efficient_net_block_1/trainable": True,
"image_block_1/efficient_net_block_1/imagenet_size": True,
"classification_head_1/spatial_reduction_1/reduction_type": "global_avg", # noqa: E501
"classification_head_1/dropout": 0,
"optimizer": "adam",
"learning_rate": 2e-5,
},
]
TEXT_CLASSIFIER = [
{
"text_block_1/block_type": "vanilla",
"classification_head_1/dropout": 0,
"text_block_1/max_tokens": 5000,
"text_block_1/conv_block_1/separable": False,
"text_block_1/text_to_int_sequence_1/output_sequence_length": 512,
"text_block_1/embedding_1/pretraining": "none",
"text_block_1/embedding_1/embedding_dim": 64,
"text_block_1/embedding_1/dropout": 0.25,
"text_block_1/conv_block_1/kernel_size": 5,
"text_block_1/conv_block_1/num_blocks": 1,
"text_block_1/conv_block_1/num_layers": 1,
"text_block_1/conv_block_1/max_pooling": False,
"text_block_1/conv_block_1/dropout": 0,
"text_block_1/conv_block_1/filters_0_0": 256,
"text_block_1/spatial_reduction_1/reduction_type": "global_max",
"text_block_1/dense_block_1/num_layers": 1,
"text_block_1/dense_block_1/use_batchnorm": False,
"text_block_1/dense_block_1/dropout": 0.5,
"text_block_1/dense_block_1/units_0": 256,
"optimizer": "adam",
"learning_rate": 1e-3,
},
{
"text_block_1/block_type": "transformer",
"classification_head_1/dropout": 0,
"optimizer": "adam",
"learning_rate": 1e-3,
"text_block_1/max_tokens": 20000,
"text_block_1/text_to_int_sequence_1/output_sequence_length": 200,
"text_block_1/transformer_1/pretraining": "none",
"text_block_1/transformer_1/embedding_dim": 32,
"text_block_1/transformer_1/num_heads": 2,
"text_block_1/transformer_1/dense_dim": 32,
"text_block_1/transformer_1/dropout": 0.25,
"text_block_1/spatial_reduction_1/reduction_type": "global_avg",
"text_block_1/dense_block_1/num_layers": 1,
"text_block_1/dense_block_1/use_batchnorm": False,
"text_block_1/dense_block_1/dropout": 0.5,
"text_block_1/dense_block_1/units_0": 20,
},
{
"text_block_1/block_type": "bert",
"classification_head_1/dropout": 0,
"optimizer": "adam_weight_decay",
"learning_rate": 2e-5,
"text_block_1/bert_block_1/max_sequence_length": 512,
"text_block_1/max_tokens": 20000,
},
]
STRUCTURED_DATA_CLASSIFIER = [
{
"structured_data_block_1/normalize": True,
"structured_data_block_1/dense_block_1/num_layers": 2,
"structured_data_block_1/dense_block_1/use_batchnorm": False,
"structured_data_block_1/dense_block_1/dropout": 0,
"structured_data_block_1/dense_block_1/units_0": 32,
"structured_data_block_1/dense_block_1/units_1": 32,
"classification_head_1/dropout": 0.0,
"optimizer": "adam",
"learning_rate": 0.001,
}
]
STRUCTURED_DATA_REGRESSOR = [
{
"structured_data_block_1/normalize": True,
"structured_data_block_1/dense_block_1/num_layers": 2,
"structured_data_block_1/dense_block_1/use_batchnorm": False,
"structured_data_block_1/dense_block_1/dropout": 0,
"structured_data_block_1/dense_block_1/units_0": 32,
"structured_data_block_1/dense_block_1/units_1": 32,
"regression_head_1/dropout": 0.0,
"optimizer": "adam",
"learning_rate": 0.001,
}
]
class ImageClassifierTuner(greedy.Greedy):
def __init__(self, **kwargs):
super().__init__(initial_hps=IMAGE_CLASSIFIER, **kwargs)
class TextClassifierTuner(greedy.Greedy):
def __init__(self, **kwargs):
super().__init__(initial_hps=TEXT_CLASSIFIER, **kwargs)
class StructuredDataClassifierTuner(greedy.Greedy):
def __init__(self, **kwargs):
super().__init__(initial_hps=STRUCTURED_DATA_CLASSIFIER, **kwargs)
class StructuredDataRegressorTuner(greedy.Greedy):
def __init__(self, **kwargs):
super().__init__(initial_hps=STRUCTURED_DATA_REGRESSOR, **kwargs)
| autokeras/autokeras/tuners/task_specific.py/0 | {
"file_path": "autokeras/autokeras/tuners/task_specific.py",
"repo_id": "autokeras",
"token_count": 3403
} | 5 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras import datasets
import autokeras as ak
from benchmark.experiments import experiment
class ImageClassifierExperiment(experiment.Experiment):
def get_auto_model(self):
return ak.ImageClassifier(
max_trials=10, directory=self.tmp_dir, overwrite=True
)
class MNIST(ImageClassifierExperiment):
def __init__(self):
super().__init__(name="MNIST")
@staticmethod
def load_data():
return datasets.mnist.load_data()
class CIFAR10(ImageClassifierExperiment):
def __init__(self):
super().__init__(name="CIFAR10")
@staticmethod
def load_data():
return datasets.cifar10.load_data()
| autokeras/benchmark/experiments/image.py/0 | {
"file_path": "autokeras/benchmark/experiments/image.py",
"repo_id": "autokeras",
"token_count": 420
} | 6 |
<jupyter_start><jupyter_code>!pip install autokeras
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import autokeras as ak<jupyter_output><empty_output><jupyter_text>A Simple ExampleThe first step is to prepare your data. Here we use the MNIST dataset as an example<jupyter_code>(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape) # (60000, 28, 28)
print(y_train.shape) # (60000,)
print(y_train[:3]) # array([7, 2, 1], dtype=uint8)<jupyter_output><empty_output><jupyter_text>The second step is to run the ImageClassifier.It is recommended have more trials for more complicated datasets.This is just a quick demo of MNIST, so we set max_trials to 1.For the same reason, we set epochs to 10.You can also leave the epochs unspecified for an adaptive number of epochs.<jupyter_code># Initialize the image classifier.
clf = ak.ImageClassifier(overwrite=True, max_trials=1)
# Feed the image classifier with training data.
clf.fit(x_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(x_test)
print(predicted_y)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))<jupyter_output><empty_output><jupyter_text>Validation DataBy default, AutoKeras use the last 20% of training data as validation data. Asshown in the example below, you can use validation_split to specify thepercentage.<jupyter_code>clf.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=10,
)<jupyter_output><empty_output><jupyter_text>You can also use your own validation set instead of splitting it from thetraining data with validation_data.<jupyter_code>split = 50000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
clf.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=10,
)<jupyter_output><empty_output><jupyter_text>Customized Search SpaceFor advanced users, you may customize your search space by using AutoModelinstead of ImageClassifier. You can configure the ImageBlock for somehigh-level configurations, e.g., block_type for the type of neural network tosearch, normalize for whether to do data normalization, augment for whether todo data augmentation. You can also do not specify these arguments, which wouldleave the different choices to be tuned automatically. See the followingexample for detail.<jupyter_code>input_node = ak.ImageInput()
output_node = ak.ImageBlock(
# Only search ResNet architectures.
block_type="resnet",
# Normalize the dataset.
normalize=True,
# Do not do data augmentation.
augment=False,
)(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
clf.fit(x_train, y_train, epochs=10)<jupyter_output><empty_output><jupyter_text>The usage of AutoModel is similar to the functional API of Keras. Basically, you arebuilding a graph, whose edges are blocks and the nodes are intermediate outputs ofblocks. To add an edge from input_node to output_node with output_node =ak.[some_block]([block_args])(input_node).You can even also use more fine grained blocks to customize the search space evenfurther. See the following example.<jupyter_code>input_node = ak.ImageInput()
output_node = ak.Normalization()(input_node)
output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node)
output_node = ak.ResNetBlock(version="v2")(output_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
clf.fit(x_train, y_train, epochs=10)<jupyter_output><empty_output><jupyter_text>Data FormatThe AutoKeras ImageClassifier is quite flexible for the data format.For the image, it accepts data formats both with and without the channeldimension. The images in the MNIST dataset do not have the channel dimension.Each image is a matrix with shape (28, 28). AutoKeras also accepts images ofthree dimensions with the channel dimension at last, e.g., (32, 32, 3), (28,28, 1).For the classification labels, AutoKeras accepts both plain labels, i.e.strings or integers, and one-hot encoded encoded labels, i.e. vectors of 0s and1s.So if you prepare your data in the following way, the ImageClassifier shouldstill work.<jupyter_code>(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Reshape the images to have the channel dimension.
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
# One-hot encode the labels.
eye = np.eye(10)
y_train = eye[y_train]
y_test = eye[y_test]
print(x_train.shape) # (60000, 28, 28, 1)
print(y_train.shape) # (60000, 10)
print(y_train[:3])
# array([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
# [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]])<jupyter_output><empty_output><jupyter_text>We also support using tf.data.Dataset format for the training data.<jupyter_code>train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,)))
test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,)))
clf = ak.ImageClassifier(overwrite=True, max_trials=1)
# Feed the tensorflow Dataset to the classifier.
clf.fit(train_set, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(test_set)
# Evaluate the best model with testing data.
print(clf.evaluate(test_set))<jupyter_output><empty_output> | autokeras/docs/ipynb/image_classification.ipynb/0 | {
"file_path": "autokeras/docs/ipynb/image_classification.ipynb",
"repo_id": "autokeras",
"token_count": 1910
} | 7 |
site_name: AutoKeras
theme:
favicon: '/img/favicon.png'
logo: '/img/logo_white.svg'
name: 'material'
docs_dir: sources
repo_url: https://github.com/keras-team/autokeras
site_url: http://autokeras.com
edit_uri: ""
site_description: 'Documentation for AutoKeras.'
markdown_extensions:
- codehilite
- pymdownx.superfences:
custom_fences:
- name: mermaid
class: mermaid
format: !!python/name:pymdownx.superfences.fence_div_format
- pymdownx.emoji:
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
- admonition
extra:
analytics:
provider: google
property: G-GTF9QP8DFD
extra_css:
- stylesheets/extra.css
extra_javascript:
- https://unpkg.com/[email protected]/dist/mermaid.min.js
nav:
- Home: index.md
- Installation: install.md
- Tutorials:
- Overview: tutorial/overview.md
- Image Classification: tutorial/image_classification.md
- Image Regression: tutorial/image_regression.md
- Text Classification: tutorial/text_classification.md
- Text Regression: tutorial/text_regression.md
- Structured Data Classification: tutorial/structured_data_classification.md
- Structured Data Regression: tutorial/structured_data_regression.md
- TimeSeriesForecaster: tutorial/timeseries_forecaster.md
- Multi-Modal and Multi-Task: tutorial/multi.md
- Customized Model: tutorial/customized.md
- Export Model: tutorial/export.md
- Load Data from Disk: tutorial/load.md
- FAQ: tutorial/faq.md
- Extensions:
- TensorFlow Cloud: extensions/tf_cloud.md
- TRAINS: extensions/trains.md
- Docker: docker.md
- Contributing Guide: contributing.md
- Documentation:
- ImageClassifier: image_classifier.md
- ImageRegressor: image_regressor.md
- TextClassifier: text_classifier.md
- TextRegressor: text_regressor.md
- StructuredDataClassifier: structured_data_classifier.md
- StructuredDataRegressor: structured_data_regressor.md
- AutoModel: auto_model.md
- Base Class: base.md
- Node: node.md
- Block: block.md
- Utils: utils.md
- Benchmarks: benchmarks.md
- About: about.md
| autokeras/docs/mkdocs.yml/0 | {
"file_path": "autokeras/docs/mkdocs.yml",
"repo_id": "autokeras",
"token_count": 821
} | 8 |
# Auto-Keras Docker
## Download Auto-Keras Docker image
The following command download Auto-Keras docker image to your machine.
```
docker pull haifengjin/autokeras:latest
```
Image releases are tagged using the following format:
| Tag | Description|
| ------------- |:-------------:|
|latest|Auto-Keras image|
|devel| Auto-Keras image that tracks Github repository|
## Start Auto-Keras Docker container
```
docker run -it --shm-size 2G haifengjin/autokeras /bin/bash
```
In case you need more memory to run the container, change the value of `shm-size`. ([Docker run reference](https://docs.docker.com/engine/reference/run/#general-form))
## Run application :
To run a local script `file.py` using Auto-Keras within the container, mount the host directory `-v hostDir:/app`.
```
docker run -it -v hostDir:/app --shm-size 2G haifengjin/autokeras python file.py
```
## Example :
Let's download the mnist example and run it within the container.
Download the example :
```
curl https://raw.githubusercontent.com/keras-team/autokeras/master/examples/mnist.py --output mnist.py
```
Run the mnist example :
```
docker run -it -v "$(pwd)":/app --shm-size 2G haifengjin/autokeras python /app/mnist.py
```
| autokeras/docs/templates/docker.md/0 | {
"file_path": "autokeras/docs/templates/docker.md",
"repo_id": "autokeras",
"token_count": 406
} | 9 |
from tensorflow.keras.datasets import cifar10
import autokeras as ak
# Prepare the dataset.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Initialize the ImageClassifier.
clf = ak.ImageClassifier(max_trials=3)
# Search for the best model.
clf.fit(x_train, y_train, epochs=5)
# Evaluate on the testing data.
print("Accuracy: {accuracy}".format(accuracy=clf.evaluate(x_test, y_test)[1]))
| autokeras/examples/cifar10.py/0 | {
"file_path": "autokeras/examples/cifar10.py",
"repo_id": "autokeras",
"token_count": 156
} | 10 |
#!/usr/bin/env bash
cd docs
python autogen.py
mkdocs build
cd ..
sh shell/format.sh
echo "autokeras.com" > docs/site/CNAME
git checkout -b gh-pages-temp
git add -f docs/site
git commit -m "gh-pages update"
git subtree split --prefix docs/site -b gh-pages
git push -f origin gh-pages:gh-pages
git branch -D gh-pages
git checkout master
git branch -D gh-pages-temp
| autokeras/shell/docs.sh/0 | {
"file_path": "autokeras/shell/docs.sh",
"repo_id": "autokeras",
"token_count": 127
} | 11 |
"""Enables dynamic setting of underlying Keras module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
_KERAS_BACKEND = None
_KERAS_LAYERS = None
_KERAS_MODELS = None
_KERAS_UTILS = None
def get_submodules_from_kwargs(kwargs):
backend = kwargs.get('backend', _KERAS_BACKEND)
layers = kwargs.get('layers', _KERAS_LAYERS)
models = kwargs.get('models', _KERAS_MODELS)
utils = kwargs.get('utils', _KERAS_UTILS)
for key in kwargs.keys():
if key not in ['backend', 'layers', 'models', 'utils']:
raise TypeError('Invalid keyword argument: %s', key)
return backend, layers, models, utils
def correct_pad(backend, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
# Arguments
input_size: An integer or tuple/list of 2 integers.
kernel_size: An integer or tuple/list of 2 integers.
# Returns
A tuple.
"""
img_dim = 2 if backend.image_data_format() == 'channels_first' else 1
input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]))
__version__ = '1.0.8'
from . import vgg16
from . import vgg19
from . import inception_v3
from . import inception_resnet_v2
from . import xception
from . import mobilenet
from . import mobilenet_v2
from . import mobilenet_v3
from . import densenet
from . import nasnet
from . import resnet
from . import resnet_v2
from . import resnext
from . import efficientnet
| keras-applications/keras_applications/__init__.py/0 | {
"file_path": "keras-applications/keras_applications/__init__.py",
"repo_id": "keras-applications",
"token_count": 718
} | 12 |
"""
Adapted from keras example cifar10_cnn.py
Train NASNet-CIFAR on the CIFAR10 small images dataset.
"""
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import CSVLogger
from keras.optimizers import Adam
from keras_contrib.applications.nasnet import NASNetCIFAR, preprocess_input
import numpy as np
weights_file = 'NASNet-CIFAR-10.h5'
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.5),
cooldown=0,
patience=5,
min_lr=0.5e-5)
csv_logger = CSVLogger('NASNet-CIFAR-10.csv')
model_checkpoint = ModelCheckpoint(weights_file,
monitor='val_predictions_acc',
save_best_only=True,
save_weights_only=True, mode='max')
batch_size = 128
nb_classes = 10
nb_epoch = 600
data_augmentation = True
# input image dimensions
img_rows, img_cols = 32, 32
# The CIFAR10 images are RGB.
img_channels = 3
# The data, shuffled and split between train and test sets:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# preprocess input
X_train = preprocess_input(X_train)
X_test = preprocess_input(X_test)
# For training, the auxilary branch must be used to correctly train NASNet
model = NASNetCIFAR((img_rows, img_cols, img_channels), use_auxilary_branch=True)
model.summary()
optimizer = Adam(lr=1e-3, clipnorm=5)
model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy'],
optimizer=optimizer, metrics=['accuracy'], loss_weights=[1.0, 0.4])
# model.load_weights('NASNet-CIFAR-10.h5', by_name=True)
if not data_augmentation:
print('Not using data augmentation.')
model.fit(X_train, [Y_train, Y_train],
batch_size=batch_size,
epochs=nb_epoch,
validation_data=(X_test, [Y_test, Y_test]),
shuffle=True,
verbose=2,
callbacks=[lr_reducer, csv_logger, model_checkpoint])
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)
# wrap the ImageDataGenerator to yield
# two label batches [y, y] for each input batch X
# When training a NASNet model, we have to use its auxilary training head
# Therefore the model is technically a 1 input - 2 output model, and requires
# the label to be duplicated for the auxilary head
def image_data_generator_wrapper(image_datagenerator, batch_size):
iterator = datagen.flow(X_train, Y_train, batch_size=batch_size)
while True:
X, y = next(iterator) # get the next batch
yield X, [y, y] # duplicate the labels for each batch
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(image_data_generator_wrapper(datagen, batch_size),
steps_per_epoch=X_train.shape[0] // batch_size,
validation_data=(X_test, [Y_test, Y_test]),
epochs=nb_epoch, verbose=2,
callbacks=[lr_reducer, csv_logger, model_checkpoint])
scores = model.evaluate(X_test, [Y_test, Y_test], batch_size=batch_size)
for score, metric_name in zip(scores, model.metrics_names):
print("%s : %0.4f" % (metric_name, score))
| keras-contrib/examples/cifar10_nasnet.py/0 | {
"file_path": "keras-contrib/examples/cifar10_nasnet.py",
"repo_id": "keras-contrib",
"token_count": 1900
} | 13 |
from keras.backend import cntk_backend as KCN
def moments(x, axes, shift=None, keep_dims=False):
''' Calculates and returns the mean and variance of the input '''
mean, variant = KCN._moments(x, axes=axes, shift=shift, keep_dims=keep_dims)
return mean, variant
| keras-contrib/keras_contrib/backend/cntk_backend.py/0 | {
"file_path": "keras-contrib/keras_contrib/backend/cntk_backend.py",
"repo_id": "keras-contrib",
"token_count": 99
} | 14 |
from __future__ import absolute_import
import numpy as np
from keras import backend as K
from keras.initializers import Initializer, Orthogonal
class ConvolutionAware(Initializer):
"""
Initializer that generates orthogonal convolution filters in the fourier
space. If this initializer is passed a shape that is not 3D or 4D,
orthogonal initialization will be used.
# Arguments
eps_std: Standard deviation for the random normal noise used to break
symmetry in the inverse fourier transform.
seed: A Python integer. Used to seed the random generator.
# References
Armen Aghajanyan, https://arxiv.org/abs/1702.06295
"""
def __init__(self, eps_std=0.05, seed=None):
self.eps_std = eps_std
self.seed = seed
self.orthogonal = Orthogonal()
def __call__(self, shape):
rank = len(shape)
if self.seed is not None:
np.random.seed(self.seed)
fan_in, fan_out = _compute_fans(shape, K.image_data_format())
variance = 2 / fan_in
if rank == 3:
row, stack_size, filters_size = shape
transpose_dimensions = (2, 1, 0)
kernel_shape = (row,)
correct_ifft = lambda shape, s=[None]: np.fft.irfft(shape, s[0])
correct_fft = np.fft.rfft
elif rank == 4:
row, column, stack_size, filters_size = shape
transpose_dimensions = (2, 3, 0, 1)
kernel_shape = (row, column)
correct_ifft = np.fft.irfft2
correct_fft = np.fft.rfft2
elif rank == 5:
x, y, z, stack_size, filters_size = shape
transpose_dimensions = (3, 4, 0, 1, 2)
kernel_shape = (x, y, z)
correct_fft = np.fft.rfftn
correct_ifft = np.fft.irfftn
else:
return K.variable(self.orthogonal(shape), dtype=K.floatx())
kernel_fourier_shape = correct_fft(np.zeros(kernel_shape)).shape
init = []
for i in range(filters_size):
basis = self._create_basis(
stack_size, np.prod(kernel_fourier_shape))
basis = basis.reshape((stack_size,) + kernel_fourier_shape)
filters = [correct_ifft(x, kernel_shape) +
np.random.normal(0, self.eps_std, kernel_shape) for
x in basis]
init.append(filters)
# Format of array is now: filters, stack, row, column
init = np.array(init)
init = self._scale_filters(init, variance)
return init.transpose(transpose_dimensions)
def _create_basis(self, filters, size):
if size == 1:
return np.random.normal(0.0, self.eps_std, (filters, size))
nbb = filters // size + 1
li = []
for i in range(nbb):
a = np.random.normal(0.0, 1.0, (size, size))
a = self._symmetrize(a)
u, _, v = np.linalg.svd(a)
li.extend(u.T.tolist())
p = np.array(li[:filters], dtype=K.floatx())
return p
def _symmetrize(self, a):
return a + a.T - np.diag(a.diagonal())
def _scale_filters(self, filters, variance):
c_var = np.var(filters)
p = np.sqrt(variance / c_var)
return filters * p
def get_config(self):
return {
'eps_std': self.eps_std,
'seed': self.seed
}
def _compute_fans(shape, data_format='channels_last'):
"""Computes the number of input and output units for a weight shape.
# Arguments
shape: Integer shape tuple.
data_format: Image data format to use for convolution kernels.
Note that all kernels in Keras are standardized on the
`channels_last` ordering (even when inputs are set
to `channels_first`).
# Returns
A tuple of scalars, `(fan_in, fan_out)`.
# Raises
ValueError: in case of invalid `data_format` argument.
"""
if len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
elif len(shape) in {3, 4, 5}:
# Assuming convolution kernels (1D, 2D or 3D).
# TH kernel shape: (depth, input_depth, ...)
# TF kernel shape: (..., input_depth, depth)
if data_format == 'channels_first':
receptive_field_size = np.prod(shape[2:])
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
elif data_format == 'channels_last':
receptive_field_size = np.prod(shape[:-2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
else:
raise ValueError('Invalid data_format: ' + data_format)
else:
# No specific assumptions.
fan_in = np.sqrt(np.prod(shape))
fan_out = np.sqrt(np.prod(shape))
return fan_in, fan_out
| keras-contrib/keras_contrib/initializers/convaware.py/0 | {
"file_path": "keras-contrib/keras_contrib/initializers/convaware.py",
"repo_id": "keras-contrib",
"token_count": 2311
} | 15 |
from .dssim import DSSIMObjective
from .jaccard import jaccard_distance
from .crf_losses import crf_loss, crf_nll
| keras-contrib/keras_contrib/losses/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/losses/__init__.py",
"repo_id": "keras-contrib",
"token_count": 43
} | 16 |
from __future__ import print_function
import numpy as np
from keras_contrib.utils import test_utils
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils import to_categorical
def get_test_data():
np.random.seed(1337)
(x_train, y_train), _ = test_utils.get_test_data(num_train=1000,
num_test=200,
input_shape=(10,),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
return x_train, y_train
def get_model(input_dim, num_hidden, output_dim):
model = Sequential()
model.add(Dense(num_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(output_dim))
model.add(Activation('softmax'))
return model
def _test_optimizer(optimizer, target=0.75):
x_train, y_train = get_test_data()
model = get_model(x_train.shape[1], 10, y_train.shape[1])
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
assert history.history['acc'][-1] >= target
config = optimizers.serialize(optimizer)
custom_objects = {optimizer.__class__.__name__: optimizer.__class__}
optim = optimizers.deserialize(config, custom_objects)
new_config = optimizers.serialize(optim)
assert config == new_config
| keras-contrib/keras_contrib/tests/optimizers.py/0 | {
"file_path": "keras-contrib/keras_contrib/tests/optimizers.py",
"repo_id": "keras-contrib",
"token_count": 761
} | 17 |
import pytest
import numpy as np
from keras import backend as K
from keras_contrib import constraints
test_values = [0.1, 0.5, 3, 8, 1e-7]
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100. - 50.
example_array[0, 0] = 0. # 0 could possibly cause trouble
def test_clip():
clip_instance = constraints.clip()
clipped = clip_instance(K.variable(example_array))
assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.01))
clip_instance = constraints.clip(0.1)
clipped = clip_instance(K.variable(example_array))
assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.1))
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/constraints_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/constraints_test.py",
"repo_id": "keras-contrib",
"token_count": 268
} | 18 |
import time
import numpy as np
import tensorflow as tf
from absl import flags
import keras_core
FLAGS = flags.FLAGS
flags.DEFINE_string(
"benchmark_name",
None,
"The name of benchmark to run. If None, all benchmarks in the file will be "
"run.",
)
flags.DEFINE_integer(
"num_samples",
1000,
"Number of input data samples.",
)
flags.DEFINE_integer(
"batch_size",
20,
"Batch size of data.",
)
flags.DEFINE_bool(
"jit_compile",
True,
"If True, the benchmark will run with XLA compilation.",
)
class BenchmarkMetricsCallback:
def __init__(self, start_batch=1, stop_batch=None):
self.start_batch = start_batch
self.stop_batch = stop_batch
self.state = {}
def on_train_batch_begin(self, batch, logs=None):
if batch == self.start_batch:
self.state["benchmark_begin"] = time.time()
def on_train_batch_end(self, batch, logs=None):
if batch == self.stop_batch:
self.state["benchmark_end"] = time.time()
throughput = (self.stop_batch - self.start_batch + 1) / (
self.state["benchmark_end"] - self.state["benchmark_begin"]
)
self.state["throughput"] = throughput
def on_predict_batch_begin(self, batch, logs=None):
if batch == self.start_batch:
self.state["benchmark_begin"] = time.time()
def on_predict_batch_end(self, batch, logs=None):
if batch == self.stop_batch:
self.state["benchmark_end"] = time.time()
throughput = (self.stop_batch - self.start_batch + 1) / (
self.state["benchmark_end"] - self.state["benchmark_begin"]
)
self.state["throughput"] = throughput
class KerasCoreBenchmarkMetricsCallback(keras_core.callbacks.Callback):
def __init__(self, start_batch=1, stop_batch=None):
self._callback = BenchmarkMetricsCallback(start_batch, stop_batch)
def on_train_batch_begin(self, batch, logs=None):
self._callback.on_train_batch_begin(batch, logs)
def on_train_batch_end(self, batch, logs=None):
self._callback.on_train_batch_end(batch, logs)
def on_predict_batch_begin(self, batch, logs=None):
self._callback.on_predict_batch_begin(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
self._callback.on_predict_batch_end(batch, logs)
class TFKerasBenchmarkMetricsCallback(tf.keras.callbacks.Callback):
def __init__(self, start_batch=1, stop_batch=None):
self._callback = BenchmarkMetricsCallback(start_batch, stop_batch)
def on_train_batch_begin(self, batch, logs=None):
self._callback.on_train_batch_begin(batch, logs)
def on_train_batch_end(self, batch, logs=None):
self._callback.on_train_batch_end(batch, logs)
def on_predict_batch_begin(self, batch, logs=None):
self._callback.on_predict_batch_begin(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
self._callback.on_predict_batch_end(batch, logs)
class LayerBenchmark:
def __init__(
self,
layer_name,
init_args,
input_shape,
flat_call_inputs=True,
jit_compile=True,
keras_core_layer=None,
tf_keras_layer=None,
):
self.layer_name = layer_name
_keras_core_layer_class = getattr(keras_core.layers, layer_name)
_tf_keras_layer_class = getattr(tf.keras.layers, layer_name)
if keras_core_layer is None:
# Sometimes you want to initialize the keras_core layer and tf_keras
# layer in a different way. For example, `Bidirectional` layer,
# which takes in `keras_core.layers.Layer` and
# `tf.keras.layer.Layer` separately.
self._keras_core_layer = _keras_core_layer_class(**init_args)
else:
self._keras_core_layer = keras_core_layer
if tf_keras_layer is None:
self._tf_keras_layer = _tf_keras_layer_class(**init_args)
else:
self._tf_keras_layer = tf_keras_layer
self.input_shape = input_shape
self._keras_core_model = self._build_keras_core_model(
input_shape, flat_call_inputs
)
self._tf_keras_model = self._build_tf_keras_model(
input_shape, flat_call_inputs
)
self._keras_core_model.compile(
loss="mse", optimizer="sgd", jit_compile=jit_compile
)
self._tf_keras_model.compile(
loss="mse", optimizer="sgd", jit_compile=jit_compile
)
self.flat_call_inputs = flat_call_inputs
self.jit_compile = jit_compile
self.input_shape = input_shape
def _build_keras_core_model(self, input_shape, flat_call_inputs=True):
inputs = []
if not isinstance(input_shape[0], (tuple, list)):
input_shape = [input_shape]
for shape in input_shape:
inputs.append(keras_core.Input(shape=shape))
if flat_call_inputs:
outputs = self._keras_core_layer(*inputs)
else:
outputs = self._keras_core_layer(inputs)
return keras_core.Model(inputs=inputs, outputs=outputs)
def _build_tf_keras_model(self, input_shape, flat_call_inputs=True):
inputs = []
if not isinstance(input_shape[0], (tuple, list)):
input_shape = [input_shape]
for shape in input_shape:
inputs.append(tf.keras.Input(shape=shape))
if flat_call_inputs:
outputs = self._tf_keras_layer(*inputs)
else:
outputs = self._tf_keras_layer(inputs)
return tf.keras.Model(inputs=inputs, outputs=outputs)
def benchmark_predict(self, num_samples, batch_size, data=None):
if data is None:
# Generate default data if not provided.
if isinstance(self.input_shape[0], (tuple, list)):
# The layer has multiple inputs.
data = []
for data_shape in self.input_shape:
data_shape = [num_samples] + list(data_shape)
data.append(np.random.normal(size=data_shape))
else:
data_shape = [num_samples] + list(self.input_shape)
data = np.random.normal(size=data_shape)
num_iterations = num_samples // batch_size - 1
callback = KerasCoreBenchmarkMetricsCallback(stop_batch=num_iterations)
tf_keras_callback = TFKerasBenchmarkMetricsCallback(
stop_batch=num_iterations
)
self._keras_core_model.predict(
data,
batch_size=batch_size,
callbacks=[callback],
)
self._tf_keras_model.predict(
data,
batch_size=batch_size,
callbacks=[tf_keras_callback],
)
keras_core_throughput = (
callback._callback.state["throughput"] * batch_size
)
tf_keras_throughput = (
tf_keras_callback._callback.state["throughput"] * batch_size
)
print(
f"Keras Core throughput of forward pass of {self.layer_name}: "
f"{keras_core_throughput:.2f} samples/sec."
)
print(
f"TF Keras throughput of forward pass of {self.layer_name}: "
f"{tf_keras_throughput:.2f} samples/sec."
)
def benchmark_train(self, num_samples, batch_size, data=None, label=None):
if data is None:
# Generate default data if not provided.
if isinstance(self.input_shape[0], (tuple, list)):
# The layer has multiple inputs.
data = []
for data_shape in self.input_shape:
data_shape = [num_samples] + list(data_shape)
data.append(np.random.normal(size=data_shape))
else:
data_shape = [num_samples] + list(self.input_shape)
data = [np.random.normal(size=data_shape)]
if label is None:
# Generate default label if not provided.
if self.flat_call_inputs:
# Scale by a small factor to avoid zero gradients.
label = (
keras_core.backend.convert_to_numpy(
self._keras_core_layer(*data)
)
* 1.001
)
else:
label = (
keras_core.backend.convert_to_numpy(
self._keras_core_layer(data)
)
* 1.001
)
num_iterations = num_samples // batch_size - 1
callback = KerasCoreBenchmarkMetricsCallback(stop_batch=num_iterations)
tf_keras_callback = TFKerasBenchmarkMetricsCallback(
stop_batch=num_iterations
)
self._keras_core_model.fit(
data,
label,
batch_size=batch_size,
callbacks=[callback],
)
self._tf_keras_model.fit(
data,
label,
batch_size=batch_size,
callbacks=[tf_keras_callback],
)
keras_core_throughput = (
callback._callback.state["throughput"] * batch_size
)
tf_keras_throughput = (
tf_keras_callback._callback.state["throughput"] * batch_size
)
print(
f"Keras Core throughput of forward & backward pass of "
f"{self.layer_name}: {keras_core_throughput:.2f} samples/sec."
)
print(
f"TF Keras throughput of forward & backward pass of "
f"{self.layer_name}: {tf_keras_throughput:.2f} samples/sec."
)
| keras-core/benchmarks/layer_benchmark/base_benchmark.py/0 | {
"file_path": "keras-core/benchmarks/layer_benchmark/base_benchmark.py",
"repo_id": "keras-core",
"token_count": 4698
} | 19 |
"""Benchmark Keras performance with torch custom training loop.
In this file we use a convolution model. Training loop is written in the
vanilla torch way, and we compare the performance between building model with
Keras and torch.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import keras_core
from benchmarks.torch_ctl_benchmark.benchmark_utils import train_loop
from keras_core import layers
num_classes = 2
input_shape = (3, 256, 256)
batch_size = 128
num_batches = 20
num_epochs = 1
x_train = np.random.normal(
size=(num_batches * batch_size, *input_shape)
).astype(np.float32)
y_train = np.random.randint(0, num_classes, size=(num_batches * batch_size,))
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
# Create a DataLoader
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 32, kernel_size=(3, 3))
self.activation = torch.nn.ReLU()
self.max_pool = torch.nn.MaxPool2d((2, 2))
self.flatten = torch.nn.Flatten()
self.dense = torch.nn.LazyLinear(num_classes)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
x = self.max_pool(x)
x = self.flatten(x)
x = self.dense(x)
x = self.softmax(x)
return x
def run_keras_core_custom_training_loop():
keras_model = keras_core.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dense(num_classes),
layers.Softmax(),
]
)
optimizer = optim.Adam(keras_model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
train_loop(
keras_model,
train_loader,
num_epochs=num_epochs,
optimizer=optimizer,
loss_fn=loss_fn,
framework="keras_core",
)
def run_torch_custom_training_loop():
torch_model = TorchModel()
optimizer = optim.Adam(torch_model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
train_loop(
torch_model,
train_loader,
num_epochs=num_epochs,
optimizer=optimizer,
loss_fn=loss_fn,
framework="torch",
)
if __name__ == "__main__":
run_keras_core_custom_training_loop()
run_torch_custom_training_loop()
| keras-core/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py/0 | {
"file_path": "keras-core/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py",
"repo_id": "keras-core",
"token_count": 1182
} | 20 |
"""
Title: Bidirectional LSTM on IMDB
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/05/03
Last modified: 2020/05/03
Description: Train a 2-layer bidirectional LSTM on the IMDB movie review sentiment classification dataset.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import keras_core as keras
from keras_core import layers
max_features = 20000 # Only consider the top 20k words
maxlen = 200 # Only consider the first 200 words of each movie review
"""
## Build the model
"""
# Input for variable-length sequences of integers
inputs = keras.Input(shape=(None,), dtype="int32")
# Embed each integer in a 128-dimensional vector
x = layers.Embedding(max_features, 128)(inputs)
# Add 2 bidirectional LSTMs
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)
x = layers.Bidirectional(layers.LSTM(64))(x)
# Add a classifier
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()
"""
## Load the IMDB movie review sentiment data
"""
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(
num_words=max_features
)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
# Use pad_sequence to standardize sequence length:
# this will truncate sequences longer than 200 words and zero-pad sequences shorter than 200 words.
x_train = keras.utils.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.utils.pad_sequences(x_val, maxlen=maxlen)
"""
## Train and evaluate the model
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/bidirectional-lstm-imdb)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/bidirectional_lstm_imdb).
"""
model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
model.fit(
x_train, y_train, batch_size=32, epochs=2, validation_data=(x_val, y_val)
)
| keras-core/examples/keras_io/nlp/bidirectional_lstm_imdb.py/0 | {
"file_path": "keras-core/examples/keras_io/nlp/bidirectional_lstm_imdb.py",
"repo_id": "keras-core",
"token_count": 666
} | 21 |
"""
Title: Imbalanced classification: credit card fraud detection
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/05/28
Last modified: 2020/04/17
Description: Demonstration of how to handle highly imbalanced classification problems.
Accelerator: GPU
"""
"""
## Introduction
This example looks at the
[Kaggle Credit Card Fraud Detection](https://www.kaggle.com/mlg-ulb/creditcardfraud/)
dataset to demonstrate how
to train a classification model on data with highly imbalanced classes.
"""
"""
## First, vectorize the CSV data
"""
import numpy as np
import keras_core as keras
# Get the real data from https://www.kaggle.com/mlg-ulb/creditcardfraud/
fname = "/Users/fchollet/Downloads/creditcard.csv"
all_features = []
all_targets = []
with open(fname) as f:
for i, line in enumerate(f):
if i == 0:
print("HEADER:", line.strip())
continue # Skip header
fields = line.strip().split(",")
all_features.append([float(v.replace('"', "")) for v in fields[:-1]])
all_targets.append([int(fields[-1].replace('"', ""))])
if i == 1:
print("EXAMPLE FEATURES:", all_features[-1])
features = np.array(all_features, dtype="float32")
targets = np.array(all_targets, dtype="uint8")
print("features.shape:", features.shape)
print("targets.shape:", targets.shape)
"""
## Prepare a validation set
"""
num_val_samples = int(len(features) * 0.2)
train_features = features[:-num_val_samples]
train_targets = targets[:-num_val_samples]
val_features = features[-num_val_samples:]
val_targets = targets[-num_val_samples:]
print("Number of training samples:", len(train_features))
print("Number of validation samples:", len(val_features))
"""
## Analyze class imbalance in the targets
"""
counts = np.bincount(train_targets[:, 0])
print(
"Number of positive samples in training data: {} ({:.2f}% of total)".format(
counts[1], 100 * float(counts[1]) / len(train_targets)
)
)
weight_for_0 = 1.0 / counts[0]
weight_for_1 = 1.0 / counts[1]
"""
## Normalize the data using training set statistics
"""
mean = np.mean(train_features, axis=0)
train_features -= mean
val_features -= mean
std = np.std(train_features, axis=0)
train_features /= std
val_features /= std
"""
## Build a binary classification model
"""
model = keras.Sequential(
[
keras.layers.Dense(
256, activation="relu", input_shape=(train_features.shape[-1],)
),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dropout(0.3),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dropout(0.3),
keras.layers.Dense(1, activation="sigmoid"),
]
)
model.summary()
"""
## Train the model with `class_weight` argument
"""
metrics = [
keras.metrics.FalseNegatives(name="fn"),
keras.metrics.FalsePositives(name="fp"),
keras.metrics.TrueNegatives(name="tn"),
keras.metrics.TruePositives(name="tp"),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
]
model.compile(
optimizer=keras.optimizers.Adam(1e-2),
loss="binary_crossentropy",
metrics=metrics,
)
callbacks = [
keras.callbacks.ModelCheckpoint("fraud_model_at_epoch_{epoch}.keras")
]
class_weight = {0: weight_for_0, 1: weight_for_1}
model.fit(
train_features,
train_targets,
batch_size=2048,
epochs=30,
verbose=2,
callbacks=callbacks,
validation_data=(val_features, val_targets),
class_weight=class_weight,
)
"""
## Conclusions
At the end of training, out of 56,961 validation transactions, we are:
- Correctly identifying 66 of them as fraudulent
- Missing 9 fraudulent transactions
- At the cost of incorrectly flagging 441 legitimate transactions
In the real world, one would put an even higher weight on class 1,
so as to reflect that False Negatives are more costly than False Positives.
Next time your credit card gets declined in an online purchase -- this is why.
Example available on HuggingFace.
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/imbalanced_classification) | [](https://huggingface.co/spaces/keras-io/Credit_Card_Fraud_Detection) |
"""
| keras-core/examples/keras_io/tensorflow/structured_data/imbalanced_classification.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/structured_data/imbalanced_classification.py",
"repo_id": "keras-core",
"token_count": 1655
} | 22 |
"""
Title: Low-light image enhancement using MIRNet
Author: [Soumik Rakshit](http://github.com/soumik12345)
Converted to Keras Core by: [Soumik Rakshit](http://github.com/soumik12345)
Date created: 2021/09/11
Last modified: 2023/07/15
Description: Implementing the MIRNet architecture for low-light image enhancement.
Accelerator: GPU
"""
"""
## Introduction
With the goal of recovering high-quality image content from its degraded version, image
restoration enjoys numerous applications, such as in
photography, security, medical imaging, and remote sensing. In this example, we implement the
**MIRNet** model for low-light image enhancement, a fully-convolutional architecture that
learns an enriched set of
features that combines contextual information from multiple scales, while
simultaneously preserving the high-resolution spatial details.
### References:
- [Learning Enriched Features for Real Image Restoration and Enhancement](https://arxiv.org/abs/2003.06792)
- [The Retinex Theory of Color Vision](http://www.cnbc.cmu.edu/~tai/cp_papers/E.Land_Retinex_Theory_ScientifcAmerican.pdf)
- [Two deterministic half-quadratic regularization algorithms for computed imaging](https://ieeexplore.ieee.org/document/413553)
"""
"""
## Downloading LOLDataset
The **LoL Dataset** has been created for low-light image enhancement.
It provides 485 images for training and 15 for testing. Each image pair in the dataset
consists of a low-light input image and its corresponding well-exposed reference image.
"""
"""shell
pip install -q git+https://github.com/keras-team/keras-core
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import random
import numpy as np
from glob import glob
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import keras_core as keras
from keras_core import layers
import tensorflow as tf
"""shell
wget https://huggingface.co/datasets/geekyrakshit/LoL-Dataset/resolve/main/lol_dataset.zip
unzip -q lol_dataset.zip && rm lol_dataset.zip
"""
"""
## Creating a TensorFlow Dataset
We use 300 image pairs from the LoL Dataset's training set for training,
and we use the remaining 185 image pairs for validation.
We generate random crops of size `128 x 128` from the image pairs to be
used for both training and validation.
"""
random.seed(10)
IMAGE_SIZE = 128
BATCH_SIZE = 4
MAX_TRAIN_IMAGES = 300
def read_image(image_path):
image = tf.io.read_file(image_path)
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.cast(image, dtype=tf.float32) / 255.0
return image
def random_crop(low_image, enhanced_image):
low_image_shape = tf.shape(low_image)[:2]
low_w = tf.random.uniform(
shape=(), maxval=low_image_shape[1] - IMAGE_SIZE + 1, dtype=tf.int32
)
low_h = tf.random.uniform(
shape=(), maxval=low_image_shape[0] - IMAGE_SIZE + 1, dtype=tf.int32
)
low_image_cropped = low_image[
low_h : low_h + IMAGE_SIZE, low_w : low_w + IMAGE_SIZE
]
enhanced_image_cropped = enhanced_image[
low_h : low_h + IMAGE_SIZE, low_w : low_w + IMAGE_SIZE
]
# in order to avoid `NONE` during shape inference
low_image_cropped.set_shape([IMAGE_SIZE, IMAGE_SIZE, 3])
enhanced_image_cropped.set_shape([IMAGE_SIZE, IMAGE_SIZE, 3])
return low_image_cropped, enhanced_image_cropped
def load_data(low_light_image_path, enhanced_image_path):
low_light_image = read_image(low_light_image_path)
enhanced_image = read_image(enhanced_image_path)
low_light_image, enhanced_image = random_crop(
low_light_image, enhanced_image
)
return low_light_image, enhanced_image
def get_dataset(low_light_images, enhanced_images):
dataset = tf.data.Dataset.from_tensor_slices(
(low_light_images, enhanced_images)
)
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[
:MAX_TRAIN_IMAGES
]
train_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[
:MAX_TRAIN_IMAGES
]
val_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[
MAX_TRAIN_IMAGES:
]
val_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[
MAX_TRAIN_IMAGES:
]
test_low_light_images = sorted(glob("./lol_dataset/eval15/low/*"))
test_enhanced_images = sorted(glob("./lol_dataset/eval15/high/*"))
train_dataset = get_dataset(train_low_light_images, train_enhanced_images)
val_dataset = get_dataset(val_low_light_images, val_enhanced_images)
print("Train Dataset:", train_dataset.element_spec)
print("Val Dataset:", val_dataset.element_spec)
"""
## MIRNet Model
Here are the main features of the MIRNet model:
- A feature extraction model that computes a complementary set of features across multiple
spatial scales, while maintaining the original high-resolution features to preserve
precise spatial details.
- A regularly repeated mechanism for information exchange, where the features across
multi-resolution branches are progressively fused together for improved representation
learning.
- A new approach to fuse multi-scale features using a selective kernel network
that dynamically combines variable receptive fields and faithfully preserves
the original feature information at each spatial resolution.
- A recursive residual design that progressively breaks down the input signal
in order to simplify the overall learning process, and allows the construction
of very deep networks.

"""
"""
### Selective Kernel Feature Fusion
The Selective Kernel Feature Fusion or SKFF module performs dynamic adjustment of
receptive fields via two operations: **Fuse** and **Select**. The Fuse operator generates
global feature descriptors by combining the information from multi-resolution streams.
The Select operator uses these descriptors to recalibrate the feature maps (of different
streams) followed by their aggregation.
**Fuse**: The SKFF receives inputs from three parallel convolution streams carrying
different scales of information. We first combine these multi-scale features using an
element-wise sum, on which we apply Global Average Pooling (GAP) across the spatial
dimension. Next, we apply a channel- downscaling convolution layer to generate a compact
feature representation which passes through three parallel channel-upscaling convolution
layers (one for each resolution stream) and provides us with three feature descriptors.
**Select**: This operator applies the softmax function to the feature descriptors to
obtain the corresponding activations that are used to adaptively recalibrate multi-scale
feature maps. The aggregated features are defined as the sum of product of the corresponding
multi-scale feature and the feature descriptor.

"""
def selective_kernel_feature_fusion(
multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3
):
channels = list(multi_scale_feature_1.shape)[-1]
combined_feature = layers.Add()(
[multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3]
)
gap = layers.GlobalAveragePooling2D()(combined_feature)
channel_wise_statistics = layers.Reshape((1, 1, channels))(gap)
compact_feature_representation = layers.Conv2D(
filters=channels // 8, kernel_size=(1, 1), activation="relu"
)(channel_wise_statistics)
feature_descriptor_1 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_descriptor_2 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_descriptor_3 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_1 = multi_scale_feature_1 * feature_descriptor_1
feature_2 = multi_scale_feature_2 * feature_descriptor_2
feature_3 = multi_scale_feature_3 * feature_descriptor_3
aggregated_feature = layers.Add()([feature_1, feature_2, feature_3])
return aggregated_feature
"""
### Dual Attention Unit
The Dual Attention Unit or DAU is used to extract features in the convolutional streams.
While the SKFF block fuses information across multi-resolution branches, we also need a
mechanism to share information within a feature tensor, both along the spatial and the
channel dimensions which is done by the DAU block. The DAU suppresses less useful
features and only allows more informative ones to pass further. This feature
recalibration is achieved by using **Channel Attention** and **Spatial Attention**
mechanisms.
The **Channel Attention** branch exploits the inter-channel relationships of the
convolutional feature maps by applying squeeze and excitation operations. Given a feature
map, the squeeze operation applies Global Average Pooling across spatial dimensions to
encode global context, thus yielding a feature descriptor. The excitation operator passes
this feature descriptor through two convolutional layers followed by the sigmoid gating
and generates activations. Finally, the output of Channel Attention branch is obtained by
rescaling the input feature map with the output activations.
The **Spatial Attention** branch is designed to exploit the inter-spatial dependencies of
convolutional features. The goal of Spatial Attention is to generate a spatial attention
map and use it to recalibrate the incoming features. To generate the spatial attention
map, the Spatial Attention branch first independently applies Global Average Pooling and
Max Pooling operations on input features along the channel dimensions and concatenates
the outputs to form a resultant feature map which is then passed through a convolution
and sigmoid activation to obtain the spatial attention map. This spatial attention map is
then used to rescale the input feature map.

"""
class ChannelPooling(layers.Layer):
def __init__(self, axis=-1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axis = axis
self.concat = layers.Concatenate(axis=self.axis)
def call(self, inputs):
average_pooling = tf.expand_dims(
tf.reduce_mean(inputs, axis=-1), axis=-1
)
max_pooling = tf.expand_dims(tf.reduce_max(inputs, axis=-1), axis=-1)
return self.concat([average_pooling, max_pooling])
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
def spatial_attention_block(input_tensor):
compressed_feature_map = ChannelPooling(axis=-1)(input_tensor)
feature_map = layers.Conv2D(1, kernel_size=(1, 1))(compressed_feature_map)
feature_map = keras.activations.sigmoid(feature_map)
return input_tensor * feature_map
def channel_attention_block(input_tensor):
channels = list(input_tensor.shape)[-1]
average_pooling = layers.GlobalAveragePooling2D()(input_tensor)
feature_descriptor = layers.Reshape((1, 1, channels))(average_pooling)
feature_activations = layers.Conv2D(
filters=channels // 8, kernel_size=(1, 1), activation="relu"
)(feature_descriptor)
feature_activations = layers.Conv2D(
filters=channels, kernel_size=(1, 1), activation="sigmoid"
)(feature_activations)
return input_tensor * feature_activations
def dual_attention_unit_block(input_tensor):
channels = list(input_tensor.shape)[-1]
feature_map = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(input_tensor)
feature_map = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(
feature_map
)
channel_attention = channel_attention_block(feature_map)
spatial_attention = spatial_attention_block(feature_map)
concatenation = layers.Concatenate(axis=-1)(
[channel_attention, spatial_attention]
)
concatenation = layers.Conv2D(channels, kernel_size=(1, 1))(concatenation)
return layers.Add()([input_tensor, concatenation])
"""
### Multi-Scale Residual Block
The Multi-Scale Residual Block is capable of generating a spatially-precise output by
maintaining high-resolution representations, while receiving rich contextual information
from low-resolutions. The MRB consists of multiple (three in this paper)
fully-convolutional streams connected in parallel. It allows information exchange across
parallel streams in order to consolidate the high-resolution features with the help of
low-resolution features, and vice versa. The MIRNet employs a recursive residual design
(with skip connections) to ease the flow of information during the learning process. In
order to maintain the residual nature of our architecture, residual resizing modules are
used to perform downsampling and upsampling operations that are used in the Multi-scale
Residual Block.

"""
# Recursive Residual Modules
def down_sampling_module(input_tensor):
channels = list(input_tensor.shape)[-1]
main_branch = layers.Conv2D(
channels, kernel_size=(1, 1), activation="relu"
)(input_tensor)
main_branch = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(main_branch)
main_branch = layers.MaxPooling2D()(main_branch)
main_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(main_branch)
skip_branch = layers.MaxPooling2D()(input_tensor)
skip_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(skip_branch)
return layers.Add()([skip_branch, main_branch])
def up_sampling_module(input_tensor):
channels = list(input_tensor.shape)[-1]
main_branch = layers.Conv2D(
channels, kernel_size=(1, 1), activation="relu"
)(input_tensor)
main_branch = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(main_branch)
main_branch = layers.UpSampling2D()(main_branch)
main_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(main_branch)
skip_branch = layers.UpSampling2D()(input_tensor)
skip_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(skip_branch)
return layers.Add()([skip_branch, main_branch])
# MRB Block
def multi_scale_residual_block(input_tensor, channels):
# features
level1 = input_tensor
level2 = down_sampling_module(input_tensor)
level3 = down_sampling_module(level2)
# DAU
level1_dau = dual_attention_unit_block(level1)
level2_dau = dual_attention_unit_block(level2)
level3_dau = dual_attention_unit_block(level3)
# SKFF
level1_skff = selective_kernel_feature_fusion(
level1_dau,
up_sampling_module(level2_dau),
up_sampling_module(up_sampling_module(level3_dau)),
)
level2_skff = selective_kernel_feature_fusion(
down_sampling_module(level1_dau),
level2_dau,
up_sampling_module(level3_dau),
)
level3_skff = selective_kernel_feature_fusion(
down_sampling_module(down_sampling_module(level1_dau)),
down_sampling_module(level2_dau),
level3_dau,
)
# DAU 2
level1_dau_2 = dual_attention_unit_block(level1_skff)
level2_dau_2 = up_sampling_module((dual_attention_unit_block(level2_skff)))
level3_dau_2 = up_sampling_module(
up_sampling_module(dual_attention_unit_block(level3_skff))
)
# SKFF 2
skff_ = selective_kernel_feature_fusion(
level1_dau_2, level2_dau_2, level3_dau_2
)
conv = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(skff_)
return layers.Add()([input_tensor, conv])
"""
### MIRNet Model
"""
def recursive_residual_group(input_tensor, num_mrb, channels):
conv1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(
input_tensor
)
for _ in range(num_mrb):
conv1 = multi_scale_residual_block(conv1, channels)
conv2 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(conv1)
return layers.Add()([conv2, input_tensor])
def mirnet_model(num_rrg, num_mrb, channels):
input_tensor = keras.Input(shape=[None, None, 3])
x1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(
input_tensor
)
for _ in range(num_rrg):
x1 = recursive_residual_group(x1, num_mrb, channels)
conv = layers.Conv2D(3, kernel_size=(3, 3), padding="same")(x1)
output_tensor = layers.Add()([input_tensor, conv])
return keras.Model(input_tensor, output_tensor)
model = mirnet_model(num_rrg=3, num_mrb=2, channels=64)
"""
## Training
- We train MIRNet using **Charbonnier Loss** as the loss function and **Adam
Optimizer** with a learning rate of `1e-4`.
- We use **Peak Signal Noise Ratio** or PSNR as a metric which is an expression for the
ratio between the maximum possible value (power) of a signal and the power of distorting
noise that affects the quality of its representation.
"""
def charbonnier_loss(y_true, y_pred):
return tf.reduce_mean(tf.sqrt(tf.square(y_true - y_pred) + tf.square(1e-3)))
def peak_signal_noise_ratio(y_true, y_pred):
return tf.image.psnr(y_pred, y_true, max_val=255.0)
optimizer = keras.optimizers.Adam(learning_rate=1e-4)
model.compile(
optimizer=optimizer,
loss=charbonnier_loss,
metrics=[peak_signal_noise_ratio],
)
history = model.fit(
train_dataset,
validation_data=val_dataset,
epochs=50,
callbacks=[
keras.callbacks.ReduceLROnPlateau(
monitor="val_peak_signal_noise_ratio",
factor=0.5,
patience=5,
verbose=1,
min_delta=1e-7,
mode="max",
)
],
)
def plot_history(value):
plt.plot(history.history[value], label=f"train_{value}")
plt.plot(history.history[f"val_{value}"], label=f"val_{value}")
plt.xlabel("Epochs")
plt.ylabel(value)
plt.title(f"Train and Validation {value} Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("loss")
plot_history("peak_signal_noise_ratio")
"""
## Inference
"""
def plot_results(images, titles, figure_size=(12, 12)):
fig = plt.figure(figsize=figure_size)
for i in range(len(images)):
fig.add_subplot(1, len(images), i + 1).set_title(titles[i])
_ = plt.imshow(images[i])
plt.axis("off")
plt.show()
def infer(original_image):
image = keras.utils.img_to_array(original_image)
image = image.astype("float32") / 255.0
image = np.expand_dims(image, axis=0)
output = model.predict(image, verbose=0)
output_image = output[0] * 255.0
output_image = output_image.clip(0, 255)
output_image = output_image.reshape(
(np.shape(output_image)[0], np.shape(output_image)[1], 3)
)
output_image = Image.fromarray(np.uint8(output_image))
original_image = Image.fromarray(np.uint8(original_image))
return output_image
"""
### Inference on Test Images
We compare the test images from LOLDataset enhanced by MIRNet with images
enhanced via the `PIL.ImageOps.autocontrast()` function.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/lowlight-enhance-mirnet)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/Enhance_Low_Light_Image).
"""
for low_light_image in random.sample(test_low_light_images, 6):
original_image = Image.open(low_light_image)
enhanced_image = infer(original_image)
plot_results(
[original_image, ImageOps.autocontrast(original_image), enhanced_image],
["Original", "PIL Autocontrast", "MIRNet Enhanced"],
(20, 12),
)
| keras-core/examples/keras_io/tensorflow/vision/mirnet.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/mirnet.py",
"repo_id": "keras-core",
"token_count": 7028
} | 23 |
"""
Title: Compact Convolutional Transformers
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com), [Guillaume Baquiast](https://www.linkedin.com/in/guillaume-baquiast-478965ba/)
Date created: 2021/06/30
Last modified: 2023/08/07
Description: Compact Convolutional Transformers for efficient image classification.
Accelerator: GPU
"""
"""
As discussed in the [Vision Transformers (ViT)](https://arxiv.org/abs/2010.11929) paper,
a Transformer-based architecture for vision typically requires a larger dataset than
usual, as well as a longer pre-training schedule. [ImageNet-1k](http://imagenet.org/)
(which has about a million images) is considered to fall under the medium-sized data regime with
respect to ViTs. This is primarily because, unlike CNNs, ViTs (or a typical
Transformer-based architecture) do not have well-informed inductive biases (such as
convolutions for processing images). This begs the question: can't we combine the
benefits of convolution and the benefits of Transformers
in a single network architecture? These benefits include parameter-efficiency, and
self-attention to process long-range and global dependencies (interactions between
different regions in an image).
In [Escaping the Big Data Paradigm with Compact Transformers](https://arxiv.org/abs/2104.05704),
Hassani et al. present an approach for doing exactly this. They proposed the
**Compact Convolutional Transformer** (CCT) architecture. In this example, we will work on an
implementation of CCT and we will see how well it performs on the CIFAR-10 dataset.
If you are unfamiliar with the concept of self-attention or Transformers, you can read
[this chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-11/r-3/312)
from François Chollet's book *Deep Learning with Python*. This example uses
code snippets from another example,
[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
"""
"""
## Imports
"""
from keras_core import layers
import keras_core as keras
import matplotlib.pyplot as plt
import numpy as np
"""
## Hyperparameters and constants
"""
positional_emb = True
conv_layers = 2
projection_dim = 128
num_heads = 2
transformer_units = [
projection_dim,
projection_dim,
]
transformer_layers = 2
stochastic_depth_rate = 0.1
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 128
num_epochs = 30
image_size = 32
"""
## Load CIFAR-10 dataset
"""
num_classes = 10
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
"""
## The CCT tokenizer
The first recipe introduced by the CCT authors is the tokenizer for processing the
images. In a standard ViT, images are organized into uniform *non-overlapping* patches.
This eliminates the boundary-level information present in between different patches. This
is important for a neural network to effectively exploit the locality information. The
figure below presents an illustration of how images are organized into patches.

We already know that convolutions are quite good at exploiting locality information. So,
based on this, the authors introduce an all-convolution mini-network to produce image
patches.
"""
class CCTTokenizer(layers.Layer):
def __init__(
self,
kernel_size=3,
stride=1,
padding=1,
pooling_kernel_size=3,
pooling_stride=2,
num_conv_layers=conv_layers,
num_output_channels=[64, 128],
positional_emb=positional_emb,
**kwargs,
):
super().__init__(**kwargs)
# This is our tokenizer.
self.conv_model = keras.Sequential()
for i in range(num_conv_layers):
self.conv_model.add(
layers.Conv2D(
num_output_channels[i],
kernel_size,
stride,
padding="valid",
use_bias=False,
activation="relu",
kernel_initializer="he_normal",
)
)
self.conv_model.add(layers.ZeroPadding2D(padding))
self.conv_model.add(
layers.MaxPooling2D(pooling_kernel_size, pooling_stride, "same")
)
self.positional_emb = positional_emb
def call(self, images):
outputs = self.conv_model(images)
# After passing the images through our mini-network the spatial dimensions
# are flattened to form sequences.
reshaped = keras.ops.reshape(
outputs,
(
-1,
keras.ops.shape(outputs)[1] * keras.ops.shape(outputs)[2],
keras.ops.shape(outputs)[-1],
),
)
return reshaped
"""
Positional embeddings are optional in CCT. If we want to use them, we can use
the Layer defined below.
"""
class PositionEmbedding(keras.layers.Layer):
def __init__(
self,
sequence_length,
initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
if sequence_length is None:
raise ValueError(
"`sequence_length` must be an Integer, received `None`."
)
self.sequence_length = int(sequence_length)
self.initializer = keras.initializers.get(initializer)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"initializer": keras.initializers.serialize(self.initializer),
}
)
return config
def build(self, input_shape):
feature_size = input_shape[-1]
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.sequence_length, feature_size],
initializer=self.initializer,
trainable=True,
)
super().build(input_shape)
def call(self, inputs, start_index=0):
shape = keras.ops.shape(inputs)
feature_length = shape[-1]
sequence_length = shape[-2]
# trim to match the length of the input sequence, which might be less
# than the sequence_length of the layer.
position_embeddings = keras.ops.convert_to_tensor(
self.position_embeddings
)
position_embeddings = keras.ops.slice(
position_embeddings,
(start_index, 0),
(sequence_length, feature_length),
)
return keras.ops.broadcast_to(position_embeddings, shape)
def compute_output_shape(self, input_shape):
return input_shape
"""
## Sequence Pooling
Another recipe introduced in CCT is attention pooling or sequence pooling. In ViT, only
the feature map corresponding to the class token is pooled and is then used for the
subsequent classification task (or any other downstream task).
"""
class SequencePooling(layers.Layer):
def __init__(self):
super().__init__()
self.attention = layers.Dense(1)
def call(self, x):
attention_weights = keras.ops.softmax(self.attention(x), axis=1)
attention_weights = keras.ops.transpose(
attention_weights, axes=(0, 2, 1)
)
weighted_representation = keras.ops.matmul(attention_weights, x)
return keras.ops.squeeze(weighted_representation, -2)
"""
## Stochastic depth for regularization
[Stochastic depth](https://arxiv.org/abs/1603.09382) is a regularization technique that
randomly drops a set of layers. During inference, the layers are kept as they are. It is
very much similar to [Dropout](https://jmlr.org/papers/v15/srivastava14a.html) but only
that it operates on a block of layers rather than individual nodes present inside a
layer. In CCT, stochastic depth is used just before the residual blocks of a Transformers
encoder.
"""
# Referred from: github.com:rwightman/pytorch-image-models.
class StochasticDepth(layers.Layer):
def __init__(self, drop_prop, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prop
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_prob
shape = (keras.ops.shape(x)[0],) + (1,) * (len(x.shape) - 1)
random_tensor = keep_prob + keras.random.uniform(shape, 0, 1)
random_tensor = keras.ops.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
"""
## MLP for the Transformers encoder
"""
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=keras.ops.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
"""
## Data augmentation
In the [original paper](https://arxiv.org/abs/2104.05704), the authors use
[AutoAugment](https://arxiv.org/abs/1805.09501) to induce stronger regularization. For
this example, we will be using the standard geometric augmentations like random cropping
and flipping.
"""
# Note the rescaling layer. These layers have pre-defined inference behavior.
data_augmentation = keras.Sequential(
[
layers.Rescaling(scale=1.0 / 255),
layers.RandomCrop(image_size, image_size),
layers.RandomFlip("horizontal"),
],
name="data_augmentation",
)
"""
## The final CCT model
In CCT, outputs from the Transformers encoder are weighted and then passed on to the final task-specific layer (in
this example, we do classification).
"""
def create_cct_model(
image_size=image_size,
input_shape=input_shape,
num_heads=num_heads,
projection_dim=projection_dim,
transformer_units=transformer_units,
):
inputs = layers.Input(input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Encode patches.
cct_tokenizer = CCTTokenizer()
encoded_patches = cct_tokenizer(augmented)
# Apply positional embedding.
if positional_emb:
sequence_length = encoded_patches.shape[1]
encoded_patches += PositionEmbedding(sequence_length=sequence_length)(
encoded_patches
)
# Calculate Stochastic Depth probabilities.
dpr = [x for x in np.linspace(0, stochastic_depth_rate, transformer_layers)]
# Create multiple layers of the Transformer block.
for i in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
attention_output = StochasticDepth(dpr[i])(attention_output)
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-5)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
x3 = StochasticDepth(dpr[i])(x3)
encoded_patches = layers.Add()([x3, x2])
# Apply sequence pooling.
representation = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)
weighted_representation = SequencePooling()(representation)
# Classify outputs.
logits = layers.Dense(num_classes)(weighted_representation)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=logits)
return model
"""
## Model training and evaluation
"""
def run_experiment(model):
optimizer = keras.optimizers.AdamW(learning_rate=0.001, weight_decay=0.0001)
model.compile(
optimizer=optimizer,
loss=keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=0.1
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
checkpoint_filepath = "/tmp/checkpoint.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
return history
cct_model = create_cct_model()
history = run_experiment(cct_model)
"""
Let's now visualize the training progress of the model.
"""
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
"""
The CCT model we just trained has just **0.4 million** parameters, and it gets us to
~79% top-1 accuracy within 30 epochs. The plot above shows no signs of overfitting as
well. This means we can train this network for longer (perhaps with a bit more
regularization) and may obtain even better performance. This performance can further be
improved by additional recipes like cosine decay learning rate schedule, other data augmentation
techniques like [AutoAugment](https://arxiv.org/abs/1805.09501),
[MixUp](https://arxiv.org/abs/1710.09412) or
[Cutmix](https://arxiv.org/abs/1905.04899). With these modifications, the authors present
95.1% top-1 accuracy on the CIFAR-10 dataset. The authors also present a number of
experiments to study how the number of convolution blocks, Transformers layers, etc.
affect the final performance of CCTs.
For a comparison, a ViT model takes about **4.7 million** parameters and **100
epochs** of training to reach a top-1 accuracy of 78.22% on the CIFAR-10 dataset. You can
refer to
[this notebook](https://colab.research.google.com/gist/sayakpaul/1a80d9f582b044354a1a26c5cb3d69e5/image_classification_with_vision_transformer.ipynb)
to know about the experimental setup.
The authors also demonstrate the performance of Compact Convolutional Transformers on
NLP tasks and they report competitive results there.
"""
| keras-core/examples/keras_io/vision/cct.py/0 | {
"file_path": "keras-core/examples/keras_io/vision/cct.py",
"repo_id": "keras-core",
"token_count": 5689
} | 24 |
"""
Title: Video Classification with Transformers
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Converted to Keras Core by: [Soumik Rakshit](http://github.com/soumik12345)
Date created: 2021/06/08
Last modified: 2023/22/07
Description: Training a video classifier with hybrid transformers.
Accelerator: GPU
"""
"""
This example is a follow-up to the
[Video Classification with a CNN-RNN Architecture](https://keras.io/examples/vision/video_classification/)
example. This time, we will be using a Transformer-based model
([Vaswani et al.](https://arxiv.org/abs/1706.03762)) to classify videos. You can follow
[this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-11)
in case you need an introduction to Transformers (with code). After reading this
example, you will know how to develop hybrid Transformer-based models for video
classification that operate on CNN feature maps.
"""
"""shell
pip install -q git+https://github.com/keras-team/keras-core
pip install -q git+https://github.com/tensorflow/docs
"""
"""
## Data collection
As done in the [predecessor](https://keras.io/examples/vision/video_classification/) to
this example, we will be using a subsampled version of the
[UCF101 dataset](https://www.crcv.ucf.edu/data/UCF101.php),
a well-known benchmark dataset. In case you want to operate on a larger subsample or
even the entire dataset, please refer to
[this notebook](https://colab.research.google.com/github/sayakpaul/Action-Recognition-in-TensorFlow/blob/main/Data_Preparation_UCF101.ipynb).
"""
"""shell
wget -q https://github.com/sayakpaul/Action-Recognition-in-TensorFlow/releases/download/v1.0.0/ucf101_top5.tar.gz
tar -xf ucf101_top5.tar.gz
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"]
import keras_core as keras
from keras_core import layers
from keras_core.applications.densenet import DenseNet121
from tensorflow_docs.vis import embed
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import imageio
import cv2
"""
## Define hyperparameters
"""
MAX_SEQ_LENGTH = 20
NUM_FEATURES = 1024
IMG_SIZE = 128
EPOCHS = 5
"""
## Data preparation
We will mostly be following the same data preparation steps in this example, except for
the following changes:
* We reduce the image size to 128x128 instead of 224x224 to speed up computation.
* Instead of using a pre-trained [InceptionV3](https://arxiv.org/abs/1512.00567) network,
we use a pre-trained
[DenseNet121](http://openaccess.thecvf.com/content_cvpr_2017/papers/Huang_Densely_Connected_Convolutional_CVPR_2017_paper.pdf)
for feature extraction.
* We directly pad shorter videos to length `MAX_SEQ_LENGTH`.
First, let's load up the
[DataFrames](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html).
"""
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
print(f"Total videos for training: {len(train_df)}")
print(f"Total videos for testing: {len(test_df)}")
center_crop_layer = layers.CenterCrop(IMG_SIZE, IMG_SIZE)
def crop_center(frame):
cropped = center_crop_layer(frame[None, ...])
cropped = keras.ops.convert_to_numpy(cropped)
cropped = keras.ops.squeeze(cropped)
return cropped
# Following method is modified from this tutorial:
# https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def load_video(path, max_frames=0, offload_to_cpu=False):
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = frame[:, :, [2, 1, 0]]
frame = crop_center(frame)
if offload_to_cpu and keras.backend.backend() == "torch":
frame = frame.to("cpu")
frames.append(frame)
if len(frames) == max_frames:
break
finally:
cap.release()
if offload_to_cpu and keras.backend.backend() == "torch":
return np.array([frame.to("cpu").numpy() for frame in frames])
return np.array(frames)
def build_feature_extractor():
feature_extractor = DenseNet121(
weights="imagenet",
include_top=False,
pooling="avg",
input_shape=(IMG_SIZE, IMG_SIZE, 3),
)
preprocess_input = keras.applications.densenet.preprocess_input
inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))
preprocessed = preprocess_input(inputs)
outputs = feature_extractor(preprocessed)
return keras.Model(inputs, outputs, name="feature_extractor")
feature_extractor = build_feature_extractor()
# Label preprocessing with StringLookup.
label_processor = keras.layers.StringLookup(
num_oov_indices=0, vocabulary=np.unique(train_df["tag"]), mask_token=None
)
print(label_processor.get_vocabulary())
def prepare_all_videos(df, root_dir):
num_samples = len(df)
video_paths = df["video_name"].values.tolist()
labels = df["tag"].values
labels = label_processor(labels[..., None]).numpy()
# `frame_features` are what we will feed to our sequence model.
frame_features = np.zeros(
shape=(num_samples, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32"
)
# For each video.
for idx, path in enumerate(video_paths):
# Gather all its frames and add a batch dimension.
frames = load_video(os.path.join(root_dir, path))
# Pad shorter videos.
if len(frames) < MAX_SEQ_LENGTH:
diff = MAX_SEQ_LENGTH - len(frames)
padding = np.zeros((diff, IMG_SIZE, IMG_SIZE, 3))
frames = np.concatenate(frames, padding)
frames = frames[None, ...]
# Initialize placeholder to store the features of the current video.
temp_frame_features = np.zeros(
shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32"
)
# Extract features from the frames of the current video.
for i, batch in enumerate(frames):
video_length = batch.shape[0]
length = min(MAX_SEQ_LENGTH, video_length)
for j in range(length):
if np.mean(batch[j, :]) > 0.0:
temp_frame_features[i, j, :] = feature_extractor.predict(
batch[None, j, :]
)
else:
temp_frame_features[i, j, :] = 0.0
frame_features[idx,] = temp_frame_features.squeeze()
return frame_features, labels
"""
Calling `prepare_all_videos()` on `train_df` and `test_df` takes ~20 minutes to
complete. For this reason, to save time, here we download already preprocessed NumPy arrays:
"""
"""shell
!wget -q https://git.io/JZmf4 -O top5_data_prepared.tar.gz
!tar -xf top5_data_prepared.tar.gz
"""
train_data, train_labels = np.load("train_data.npy"), np.load(
"train_labels.npy"
)
test_data, test_labels = np.load("test_data.npy"), np.load("test_labels.npy")
print(f"Frame features in train set: {train_data.shape}")
"""
## Building the Transformer-based model
We will be building on top of the code shared in
[this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-11) of
[Deep Learning with Python (Second ed.)](https://www.manning.com/books/deep-learning-with-python)
by François Chollet.
First, self-attention layers that form the basic blocks of a Transformer are
order-agnostic. Since videos are ordered sequences of frames, we need our
Transformer model to take into account order information.
We do this via **positional encoding**.
We simply embed the positions of the frames present inside videos with an
[`Embedding` layer](https://keras.io/api/layers/core_layers/embedding). We then
add these positional embeddings to the precomputed CNN feature maps.
"""
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, output_dim, **kwargs):
super().__init__(**kwargs)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim
)
self.sequence_length = sequence_length
self.output_dim = output_dim
def call(self, inputs):
# The inputs are of shape: `(batch_size, frames, num_features)`
inputs = keras.backend.cast(inputs, self.compute_dtype)
length = keras.backend.shape(inputs)[1]
positions = keras.ops.numpy.arange(start=0, stop=length, step=1)
embedded_positions = self.position_embeddings(positions)
return inputs + embedded_positions
"""
Now, we can create a subclassed layer for the Transformer.
"""
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim, dropout=0.3
)
self.dense_proj = keras.Sequential(
[
layers.Dense(dense_dim, activation=keras.activations.gelu),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
attention_output = self.attention(inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
"""
## Utility functions for training
"""
def get_compiled_model(shape):
sequence_length = MAX_SEQ_LENGTH
embed_dim = NUM_FEATURES
dense_dim = 4
num_heads = 1
classes = len(label_processor.get_vocabulary())
inputs = keras.Input(shape=shape)
x = PositionalEmbedding(
sequence_length, embed_dim, name="frame_position_embedding"
)(inputs)
x = TransformerEncoder(
embed_dim, dense_dim, num_heads, name="transformer_layer"
)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(classes, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
def run_experiment():
filepath = "/tmp/video_classifier.weights.h5"
checkpoint = keras.callbacks.ModelCheckpoint(
filepath, save_weights_only=True, save_best_only=True, verbose=1
)
model = get_compiled_model(train_data.shape[1:])
history = model.fit(
train_data,
train_labels,
validation_split=0.15,
epochs=EPOCHS,
callbacks=[checkpoint],
)
model.load_weights(filepath)
_, accuracy = model.evaluate(test_data, test_labels)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
return model
"""
## Model training and inference
"""
trained_model = run_experiment()
"""
**Note**: This model has ~4.23 Million parameters, which is way more than the sequence
model (99918 parameters) we used in the prequel of this example. This kind of
Transformer model works best with a larger dataset and a longer pre-training schedule.
"""
def prepare_single_video(frames):
frame_features = np.zeros(
shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32"
)
# Pad shorter videos.
if len(frames) < MAX_SEQ_LENGTH:
diff = MAX_SEQ_LENGTH - len(frames)
padding = np.zeros((diff, IMG_SIZE, IMG_SIZE, 3))
frames = np.concatenate(frames, padding)
frames = frames[None, ...]
# Extract features from the frames of the current video.
for i, batch in enumerate(frames):
video_length = batch.shape[0]
length = min(MAX_SEQ_LENGTH, video_length)
for j in range(length):
if np.mean(batch[j, :]) > 0.0:
frame_features[i, j, :] = feature_extractor.predict(
batch[None, j, :]
)
else:
frame_features[i, j, :] = 0.0
return frame_features
def predict_action(path):
class_vocab = label_processor.get_vocabulary()
frames = load_video(os.path.join("test", path), offload_to_cpu=True)
frame_features = prepare_single_video(frames)
probabilities = trained_model.predict(frame_features)[0]
plot_x_axis, plot_y_axis = [], []
for i in np.argsort(probabilities)[::-1]:
plot_x_axis.append(class_vocab[i])
plot_y_axis.append(probabilities[i])
print(f" {class_vocab[i]}: {probabilities[i] * 100:5.2f}%")
plt.bar(plot_x_axis, plot_y_axis, label=plot_x_axis)
plt.xlabel("class_label")
plt.xlabel("Probability")
plt.show()
return frames
# This utility is for visualization.
# Referenced from:
# https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def to_gif(images):
converted_images = images.astype(np.uint8)
imageio.mimsave("animation.gif", converted_images, fps=10)
return embed.embed_file("animation.gif")
test_video = np.random.choice(test_df["video_name"].values.tolist())
print(f"Test video path: {test_video}")
test_frames = predict_action(test_video)
to_gif(test_frames[:MAX_SEQ_LENGTH])
"""
The performance of our model is far from optimal, because it was trained on a
small dataset.
"""
| keras-core/examples/keras_io/vision/video_transformers.py/0 | {
"file_path": "keras-core/examples/keras_io/vision/video_transformers.py",
"repo_id": "keras-core",
"token_count": 5347
} | 25 |
import warnings
def _convert_conv_tranpose_padding_args_from_keras_to_jax(
kernel_size, stride, dilation_rate, padding, output_padding
):
"""Convert the padding arguments from Keras to the ones used by JAX.
JAX starts with an shape of size `(input-1) * stride - kernel_size + 2`,
then adds `left_pad` on the left, and `right_pad` on the right.
In Keras, the `padding` argument determines a base shape, to which
`output_padding` is added on the right. If `output_padding` is None, it will
be given a default value.
"""
assert padding.lower() in {"valid", "same"}
kernel_size = (kernel_size - 1) * dilation_rate + 1
if padding.lower() == "valid":
# If output_padding is None, we fill it so that the shape of the ouput
# is `(input-1)*s + max(kernel_size, stride)`
output_padding = (
max(kernel_size, stride) - kernel_size
if output_padding is None
else output_padding
)
left_pad = kernel_size - 1
right_pad = kernel_size - 1 + output_padding
else:
if output_padding is None:
# When output_padding is None, we want the shape of the ouput to
# be `input * s`, therefore a total padding of
# `stride + kernel_size - 2`
pad_len = stride + kernel_size - 2
else:
# When output_padding is filled, we want the shape of the ouput to
# be `(input-1)*stride + kernel_size%2 + output_padding`
pad_len = kernel_size + kernel_size % 2 - 2 + output_padding
left_pad = min(pad_len // 2 + pad_len % 2, kernel_size - 1)
right_pad = pad_len - left_pad
return left_pad, right_pad
def _convert_conv_tranpose_padding_args_from_keras_to_torch(
kernel_size, stride, dilation_rate, padding, output_padding
):
"""Convert the padding arguments from Keras to the ones used by Torch.
Torch starts with an output shape of `(input-1) * stride + kernel_size`,
then removes `torch_padding` from both sides, and adds
`torch_output_padding` on the right.
Because in Torch the output_padding can only be added to the right,
consistency with Tensorflow is not always possible. In particular this is
the case when both the Torch padding and output_padding values are stricly
positive.
"""
assert padding.lower() in {"valid", "same"}
original_kernel_size = kernel_size
kernel_size = (kernel_size - 1) * dilation_rate + 1
if padding.lower() == "valid":
# If output_padding is None, we fill it so that the shape of the ouput
# is `(i-1)*s + max(k, s)`
output_padding = (
max(kernel_size, stride) - kernel_size
if output_padding is None
else output_padding
)
torch_padding = 0
torch_output_padding = output_padding
else:
# When output_padding is None, we want the shape of the ouput to be
# `input * s`, otherwise we use the value provided.
output_padding = (
stride - kernel_size % 2
if output_padding is None
else output_padding
)
torch_padding = max(
-((kernel_size % 2 - kernel_size + output_padding) // 2), 0
)
torch_output_padding = (
2 * torch_padding + kernel_size % 2 - kernel_size + output_padding
)
if torch_padding > 0 and torch_output_padding > 0:
warnings.warn(
f"You might experience inconsistencies accross backends when "
f"calling conv transpose with kernel_size={original_kernel_size}, "
f"stride={stride}, dilation_rate={dilation_rate}, "
f"padding={padding}, output_padding={output_padding}."
)
if torch_output_padding >= stride:
raise ValueError(
f"The padding arguments (padding={padding}) and "
f"output_padding={output_padding}) lead to a Torch "
f"output_padding ({torch_output_padding}) that is greater than "
f"strides ({stride}). This is not supported. You can change the "
f"padding arguments, kernel or stride, or run on another backend. "
)
return torch_padding, torch_output_padding
def compute_conv_transpose_padding_args_for_jax(
input_shape,
kernel_shape,
strides,
padding,
output_padding,
dilation_rate,
):
num_spatial_dims = len(input_shape) - 2
kernel_spatial_shape = kernel_shape[:-2]
jax_padding = []
for i in range(num_spatial_dims):
output_padding_i = (
output_padding
if output_padding is None or isinstance(output_padding, int)
else output_padding[i]
)
strides_i = strides if isinstance(strides, int) else strides[i]
dilation_rate_i = (
dilation_rate
if isinstance(dilation_rate, int)
else dilation_rate[i]
)
(
pad_left,
pad_right,
) = _convert_conv_tranpose_padding_args_from_keras_to_jax(
kernel_size=kernel_spatial_shape[i],
stride=strides_i,
dilation_rate=dilation_rate_i,
padding=padding,
output_padding=output_padding_i,
)
jax_padding.append((pad_left, pad_right))
return jax_padding
def compute_conv_transpose_padding_args_for_torch(
input_shape,
kernel_shape,
strides,
padding,
output_padding,
dilation_rate,
):
num_spatial_dims = len(input_shape) - 2
kernel_spatial_shape = kernel_shape[:-2]
torch_paddings = []
torch_output_paddings = []
for i in range(num_spatial_dims):
output_padding_i = (
output_padding
if output_padding is None or isinstance(output_padding, int)
else output_padding[i]
)
strides_i = strides if isinstance(strides, int) else strides[i]
dilation_rate_i = (
dilation_rate
if isinstance(dilation_rate, int)
else dilation_rate[i]
)
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
kernel_size=kernel_spatial_shape[i],
stride=strides_i,
dilation_rate=dilation_rate_i,
padding=padding,
output_padding=output_padding_i,
)
torch_paddings.append(torch_padding)
torch_output_paddings.append(torch_output_padding)
return torch_paddings, torch_output_paddings
def _get_output_shape_given_tf_padding(
input_size, kernel_size, strides, padding, output_padding, dilation_rate
):
if input_size is None:
return None
assert padding.lower() in {"valid", "same"}
kernel_size = (kernel_size - 1) * dilation_rate + 1
if padding.lower() == "valid":
output_padding = (
max(kernel_size, strides) - kernel_size
if output_padding is None
else output_padding
)
return (input_size - 1) * strides + kernel_size + output_padding
else:
if output_padding is None:
return input_size * strides
else:
return (input_size - 1) * strides + kernel_size % 2 + output_padding
def compute_conv_transpose_output_shape(
input_shape,
kernel_size,
filters,
strides,
padding,
output_padding=None,
data_format="channels_last",
dilation_rate=1,
):
num_spatial_dims = len(input_shape) - 2
kernel_spatial_shape = kernel_size
if isinstance(output_padding, int):
output_padding = (output_padding,) * len(kernel_spatial_shape)
if isinstance(strides, int):
strides = (strides,) * num_spatial_dims
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,) * num_spatial_dims
if data_format == "channels_last":
input_spatial_shape = input_shape[1:-1]
else:
input_spatial_shape = input_shape[2:]
output_shape = []
for i in range(num_spatial_dims):
current_output_padding = (
None if output_padding is None else output_padding[i]
)
shape_i = _get_output_shape_given_tf_padding(
input_size=input_spatial_shape[i],
kernel_size=kernel_spatial_shape[i],
strides=strides[i],
padding=padding,
output_padding=current_output_padding,
dilation_rate=dilation_rate[i],
)
output_shape.append(shape_i)
if data_format == "channels_last":
output_shape = [input_shape[0]] + output_shape + [filters]
else:
output_shape = [input_shape[0], filters] + output_shape
return output_shape
| keras-core/keras_core/backend/common/backend_utils.py/0 | {
"file_path": "keras-core/keras_core/backend/common/backend_utils.py",
"repo_id": "keras-core",
"token_count": 3790
} | 26 |
import types
import jax
import jax.numpy as jnp
import numpy as np
import tree
from jax.tree_util import Partial
from keras_core.backend.common import KerasVariable
from keras_core.backend.common import global_state
from keras_core.backend.common import standardize_dtype
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.backend.common.stateless_scope import StatelessScope
from keras_core.backend.jax import distribution_lib
from keras_core.utils.nest import pack_sequence_as
SUPPORTS_SPARSE_TENSORS = False
class Variable(KerasVariable):
def _initialize(self, value):
value = jnp.array(value, dtype=self._dtype)
# Note that variable.shape is needed by distribution_lib
self._shape = tuple(value.shape)
# We can't import the keras_core/distribution/distribution_lib
# due to circular dependency.
distribution = global_state.get_global_attribute("distribution")
if distribution is not None:
self._layout = distribution_lib.to_jax_layout(
distribution.get_variable_layout(self)
)
else:
self._layout = None
self._direct_assign(value)
def _direct_assign(self, value):
if getattr(self, "_layout", None) is not None:
value = distribution_lib.distribute_value(value, self._layout)
self._value = value
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
# Overload native accessor.
def __jax_array__(self):
return self.value
def convert_to_tensor(x, dtype=None, sparse=False):
if sparse:
raise ValueError("`sparse=True` is not supported with jax backend")
if dtype is not None:
dtype = standardize_dtype(dtype)
if isinstance(x, Variable):
if dtype and dtype != x.dtype:
return x.value.astype(dtype)
return x.value
return jnp.array(x, dtype=dtype)
def convert_to_numpy(x):
return np.array(x)
def is_tensor(x):
if isinstance(x, jnp.ndarray):
return True
return False
def shape(x):
# This will work as long as we disallow
# dynamic shapes in JAX.
return x.shape
def cast(x, dtype):
return convert_to_tensor(x, dtype=dtype)
# Shape / dtype inference util
def compute_output_spec(fn, *args, **kwargs):
with StatelessScope():
all_input_ktensors = []
built_in_types = (type(None), int, float, str, bool, complex, bytes)
# First, separate symbolic args from other args
static_args_idx = []
static_args = []
maybe_symbolic_args = []
static_kwargs = {}
maybe_symbolic_kwargs = {}
for idx, arg in enumerate(args):
if isinstance(arg, built_in_types):
static_args_idx.append(idx)
static_args.append(arg)
else:
maybe_symbolic_args.append(arg)
for k, v in kwargs.items():
if isinstance(v, built_in_types):
static_kwargs[k] = v
else:
maybe_symbolic_kwargs[k] = v
# Second, identify all ktensors
def index_all_ktensors(x):
if isinstance(x, KerasTensor):
all_input_ktensors.append(x)
return x
# Third, find out if there are dynamic shapes
maybe_symbolic_args, maybe_symbolic_kwargs = tree.map_structure(
index_all_ktensors, (maybe_symbolic_args, maybe_symbolic_kwargs)
)
none_count = 0
for x in all_input_ktensors:
for d in x.shape:
if d is None:
none_count += 1
def convert_keras_tensor_to_jax(x, fill_value=None):
if isinstance(x, KerasTensor):
shape = list(x.shape)
if fill_value:
for i, e in enumerate(shape):
if e is None:
shape[i] = fill_value
jax_tensor = jax.ShapeDtypeStruct(shape, dtype=x.dtype)
return jax_tensor
if isinstance(x, types.FunctionType):
def _fn(*args, **kwargs):
out = x(*args, **kwargs)
out = convert_keras_tensor_to_jax(
out, fill_value=fill_value
)
return out
return Partial(_fn)
if isinstance(x, dict):
return {
k: convert_keras_tensor_to_jax(v, fill_value=fill_value)
for k, v in x.items()
}
if isinstance(x, list):
return [
convert_keras_tensor_to_jax(xi, fill_value=fill_value)
for xi in x
]
return x
def wrapped_fn(*args, **kwargs):
rec_args = []
idx_static = 0
idx_sym = 0
i = 0
while idx_static < len(static_args) or idx_sym < len(args):
if i in static_args_idx:
rec_args.append(static_args[idx_static])
idx_static += 1
else:
rec_args.append(args[idx_sym])
idx_sym += 1
i += 1
return fn(*rec_args, **kwargs, **static_kwargs)
jax_out = None
if none_count:
try:
ms_args_1, ms_kwargs_1 = tree.map_structure(
lambda x: convert_keras_tensor_to_jax(x, fill_value=83),
(maybe_symbolic_args, maybe_symbolic_kwargs),
)
_, jax_out_1 = jax.make_jaxpr(wrapped_fn, return_shape=True)(
*ms_args_1, **ms_kwargs_1
)
ms_args_2, ms_kwargs_2 = tree.map_structure(
lambda x: convert_keras_tensor_to_jax(x, fill_value=89),
(maybe_symbolic_args, maybe_symbolic_kwargs),
)
_, jax_out_2 = jax.make_jaxpr(wrapped_fn, return_shape=True)(
*ms_args_2, **ms_kwargs_2
)
flat_out_1 = tree.flatten(jax_out_1)
flat_out_2 = tree.flatten(jax_out_2)
flat_out = []
for x1, x2 in zip(flat_out_1, flat_out_2):
if isinstance(x1, jax.ShapeDtypeStruct):
if not isinstance(x2, jax.ShapeDtypeStruct):
raise ValueError("Indeterministic output ordering.")
shape = list(x1.shape)
for i, e in enumerate(x2.shape):
if e != shape[i]:
shape[i] = None
flat_out.append(
jax.ShapeDtypeStruct(shape, dtype=x1.dtype)
)
else:
flat_out.append(x1)
jax_out = pack_sequence_as(jax_out_1, flat_out)
except:
# Errors can happen when the filled dimensions
# are not compatible with the function
# (or when the function contains a bug).
# In such cases we don't want to confuse users
# with random filled dimensions and the like,
# so we rerun a pass on the dynamic shapes,
# which will likely error out when JAX tries to
# validate shapes as fully static.
# The error message will be much easier to understand.
pass
if jax_out is None:
maybe_symbolic_args, maybe_symbolic_kwargs = tree.map_structure(
convert_keras_tensor_to_jax,
(maybe_symbolic_args, maybe_symbolic_kwargs),
)
_, jax_out = jax.make_jaxpr(wrapped_fn, return_shape=True)(
*maybe_symbolic_args, **maybe_symbolic_kwargs
)
def convert_jax_spec_to_keras_tensor(x):
if isinstance(x, jax.ShapeDtypeStruct):
return KerasTensor(x.shape, x.dtype)
return x
output_shape = tree.map_structure(
convert_jax_spec_to_keras_tensor, jax_out
)
return output_shape
def cond(pred, true_fn, false_fn):
return jax.lax.cond(pred, true_fun=true_fn, false_fun=false_fn)
def vectorized_map(function, elements):
return jax.vmap(function)(elements)
def scatter(indices, values, shape):
zeros = jnp.zeros(shape, values.dtype)
key = tuple(jnp.moveaxis(indices, -1, 0))
return zeros.at[key].add(values)
def scatter_update(inputs, indices, updates):
indices = jnp.array(indices)
indices = jnp.transpose(indices)
inputs[tuple(indices)] = updates
return inputs
def slice(inputs, start_indices, shape):
return jax.lax.dynamic_slice(inputs, start_indices, shape)
def slice_update(inputs, start_indices, updates):
return jax.lax.dynamic_update_slice(inputs, updates, start_indices)
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
loop_vars = tuple(loop_vars)
if maximum_iterations is not None:
current_iter = 0
loop_vars = loop_vars + (current_iter,)
# Unpack list/tuple args. The last argument is `current_iter`.
def _cond(args):
return cond(*args[:-1]) & (args[-1] < maximum_iterations)
def _body(args):
return tuple(body(*args[:-1])) + (args[-1] + 1,)
else:
def _cond(args):
return cond(*args)
def _body(args):
return tuple(body(*args))
outputs = jax.lax.while_loop(_cond, _body, loop_vars)
if maximum_iterations is not None:
outputs = outputs[:-1]
return outputs
def fori_loop(lower, upper, body_fun, init_val):
return jax.lax.fori_loop(lower, upper, body_fun, init_val)
def stop_gradient(variable):
return jax.lax.stop_gradient(variable)
def unstack(x, num=None, axis=0):
return [
jax.lax.index_in_dim(x, i, axis, keepdims=False)
for i in range(x.shape[axis])
]
| keras-core/keras_core/backend/jax/core.py/0 | {
"file_path": "keras-core/keras_core/backend/jax/core.py",
"repo_id": "keras-core",
"token_count": 5223
} | 27 |
import numpy as np
from keras_core.backend import config
from keras_core.backend import standardize_dtype
def add(x1, x2):
return np.add(x1, x2)
def einsum(subscripts, *operands, **kwargs):
return np.einsum(subscripts, *operands, **kwargs)
def subtract(x1, x2):
return np.subtract(x1, x2)
def matmul(x1, x2):
return np.matmul(x1, x2)
def multiply(x1, x2):
return np.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.mean(x, axis=axis, keepdims=keepdims)
def max(x, axis=None, keepdims=False, initial=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.max(x, axis=axis, keepdims=keepdims, initial=initial)
def ones(shape, dtype="float32"):
return np.ones(shape, dtype=dtype)
def zeros(shape, dtype="float32"):
return np.zeros(shape, dtype=dtype)
def absolute(x):
return np.absolute(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.all(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.amax(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.amin(x, axis=axis, keepdims=keepdims)
def append(
x1,
x2,
axis=None,
):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.append(x1, x2, axis=axis)
def arange(start, stop=None, step=None, dtype=None):
if dtype is None:
if hasattr(start, "dtype"):
dtype = start.dtype
elif isinstance(start, int):
dtype = "int32"
else:
dtype = config.floatx()
return np.arange(start, stop, step=step, dtype=dtype)
def arccos(x):
return np.arccos(x)
def arccosh(x):
return np.arccosh(x)
def arcsin(x):
return np.arcsin(x)
def arcsinh(x):
return np.arcsinh(x)
def arctan(x):
return np.arctan(x)
def arctan2(x1, x2):
return np.arctan2(x1, x2)
def arctanh(x):
return np.arctanh(x)
def argmax(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.argmax(x, axis=axis)
def argmin(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.argmin(x, axis=axis)
def argsort(x, axis=-1):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.argsort(x, axis=axis)
def array(x, dtype=None):
dtype = dtype or config.floatx()
return np.array(x, dtype=dtype)
def average(x, axis=None, weights=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.average(x, weights=weights, axis=axis)
def bincount(x, weights=None, minlength=0):
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return np.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return np.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return np.stack(bincounts)
return np.bincount(x, weights, minlength)
def broadcast_to(x, shape):
return np.broadcast_to(x, shape)
def ceil(x):
return np.ceil(x)
def clip(x, x_min, x_max):
return np.clip(x, x_min, x_max)
def concatenate(xs, axis=0):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.concatenate(xs, axis=axis)
def conjugate(x):
return np.conjugate(x)
def conj(x):
return conjugate(x)
def copy(x):
return np.copy(x)
def cos(x):
return np.cos(x)
def cosh(x):
return np.cosh(x)
def count_nonzero(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.count_nonzero(x, axis=axis)
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
def cumprod(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.cumprod(x, axis=axis)
def cumsum(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.cumsum(x, axis=axis)
def diag(x, k=0):
return np.diag(x, k=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
axis1 = tuple(axis1) if isinstance(axis1, list) else axis1
axis2 = tuple(axis2) if isinstance(axis2, list) else axis2
return np.diagonal(
x,
offset=offset,
axis1=axis1,
axis2=axis2,
)
def digitize(x, bins):
return np.digitize(x, bins).astype(np.int32)
def dot(x, y):
return np.dot(x, y)
def empty(shape, dtype="float32"):
return np.empty(shape, dtype=dtype)
def equal(x1, x2):
return np.equal(x1, x2)
def exp(x):
return np.exp(x)
def expand_dims(x, axis):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.expand_dims(x, axis)
def expm1(x):
return np.expm1(x)
def flip(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.flip(x, axis=axis)
def floor(x):
return np.floor(x)
def full(shape, fill_value, dtype=None):
dtype = dtype or config.floatx()
return np.full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, dtype=None):
return np.full_like(x, fill_value, dtype=dtype)
def greater(x1, x2):
return np.greater(x1, x2)
def greater_equal(x1, x2):
return np.greater_equal(x1, x2)
def hstack(xs):
return np.hstack(xs)
def identity(n, dtype="float32"):
return np.identity(n, dtype=dtype)
def imag(x):
return np.imag(x)
def isclose(x1, x2):
return np.isclose(x1, x2)
def isfinite(x):
return np.isfinite(x)
def isinf(x):
return np.isinf(x)
def isnan(x):
return np.isnan(x)
def less(x1, x2):
return np.less(x1, x2)
def less_equal(x1, x2):
return np.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
def log(x):
return np.log(x)
def log10(x):
return np.log10(x)
def log1p(x):
return np.log1p(x)
def log2(x):
return np.log2(x)
def logaddexp(x1, x2):
return np.logaddexp(x1, x2)
def logical_and(x1, x2):
return np.logical_and(x1, x2)
def logical_not(x):
return np.logical_not(x)
def logical_or(x1, x2):
return np.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
return np.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
def maximum(x1, x2):
return np.maximum(x1, x2)
def meshgrid(*x, indexing="xy"):
return np.meshgrid(*x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.min(x, axis=axis, keepdims=keepdims, initial=initial)
def minimum(x1, x2):
return np.minimum(x1, x2)
def mod(x1, x2):
return np.mod(x1, x2)
def moveaxis(x, source, destination):
return np.moveaxis(x, source=source, destination=destination)
def nan_to_num(x):
return np.nan_to_num(x)
def ndim(x):
return np.ndim(x)
def nonzero(x):
return np.nonzero(x)
def not_equal(x1, x2):
return np.not_equal(x1, x2)
def zeros_like(x, dtype=None):
return np.zeros_like(x, dtype=dtype)
def ones_like(x, dtype=None):
return np.ones_like(x, dtype=dtype)
def outer(x1, x2):
return np.outer(x1, x2)
def pad(x, pad_width, mode="constant"):
return np.pad(x, pad_width, mode=mode)
def prod(x, axis=None, keepdims=False, dtype=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
def ravel(x):
return np.ravel(x)
def real(x):
return np.real(x)
def reciprocal(x):
return np.reciprocal(x)
def repeat(x, repeats, axis=None):
return np.repeat(x, repeats, axis=axis)
def reshape(x, new_shape):
return np.reshape(x, new_shape)
def roll(x, shift, axis=None):
return np.roll(x, shift, axis=axis)
def sign(x):
return np.sign(x)
def sin(x):
return np.sin(x)
def sinh(x):
return np.sinh(x)
def size(x):
return np.size(x)
def sort(x, axis=-1):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.sort(x, axis=axis)
def split(x, indices_or_sections, axis=0):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.split(x, indices_or_sections, axis=axis)
def stack(x, axis=0):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.stack(x, axis=axis)
def std(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.std(x, axis=axis, keepdims=keepdims)
def swapaxes(x, axis1, axis2):
return np.swapaxes(x, axis1=axis1, axis2=axis2)
def take(x, indices, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.take(x, indices, axis=axis)
def take_along_axis(x, indices, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.take_along_axis(x, indices, axis=axis)
def tan(x):
return np.tan(x)
def tanh(x):
return np.tanh(x)
def tensordot(x1, x2, axes=2):
axes = tuple(axes) if isinstance(axes, list) else axes
return np.tensordot(x1, x2, axes=axes)
def round(x, decimals=0):
return np.round(x, decimals=decimals)
def tile(x, repeats):
return np.tile(x, repeats)
def trace(x, offset=0, axis1=0, axis2=1):
axis1 = tuple(axis1) if isinstance(axis1, list) else axis1
axis2 = tuple(axis2) if isinstance(axis2, list) else axis2
return np.trace(x, offset=offset, axis1=axis1, axis2=axis2)
def tri(N, M=None, k=0, dtype="float32"):
return np.tri(N, M=M, k=k, dtype=dtype)
def tril(x, k=0):
return np.tril(x, k=k)
def triu(x, k=0):
return np.triu(x, k=k)
def vdot(x1, x2):
return np.vdot(x1, x2)
def vstack(xs):
return np.vstack(xs)
def where(condition, x1, x2):
if x1 is not None and x2 is not None:
return np.where(condition, x1, x2)
else:
return np.where(condition)
def divide(x1, x2):
return np.divide(x1, x2)
def true_divide(x1, x2):
return np.true_divide(x1, x2)
def power(x1, x2):
return np.power(x1, x2)
def negative(x):
return np.negative(x)
def square(x):
return np.square(x)
def sqrt(x):
dtype = None
if hasattr(x, "dtype"):
if standardize_dtype(x.dtype).startswith("int"):
dtype = config.floatx()
return np.sqrt(x, dtype=dtype)
def squeeze(x, axis=None):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.squeeze(x, axis=axis)
def transpose(x, axes=None):
axes = tuple(axes) if isinstance(axes, list) else axes
return np.transpose(x, axes=axes)
def var(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.var(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
return np.sum(x, axis=axis, keepdims=keepdims)
def eye(N, M=None, k=0, dtype="float32"):
return np.eye(N, M=M, k=k, dtype=dtype)
def floor_divide(x1, x2):
return np.floor_divide(x1, x2)
def logical_xor(x1, x2):
return np.logical_xor(x1, x2)
| keras-core/keras_core/backend/numpy/numpy.py/0 | {
"file_path": "keras-core/keras_core/backend/numpy/numpy.py",
"repo_id": "keras-core",
"token_count": 5498
} | 28 |
import numpy as np
import pytest
from keras_core import models
from keras_core import testing
from keras_core.callbacks.callback import Callback
class CallbackTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_model_state_is_current_on_epoch_end(self):
class TestModel(models.Model):
def __init__(self):
super().__init__()
self.iterations = self.add_variable(
shape=(), initializer="zeros", trainable=False
)
def call(self, inputs):
self.iterations.assign(self.iterations + 1)
return inputs
class CBK(Callback):
def on_batch_end(self, batch, logs):
assert np.int32(self.model.iterations) == batch + 1
model = TestModel()
model.compile(optimizer="sgd", loss="mse")
x = np.random.random((8, 1))
y = np.random.random((8, 1))
model.fit(x, y, callbacks=[CBK()], batch_size=2)
| keras-core/keras_core/callbacks/callback_test.py/0 | {
"file_path": "keras-core/keras_core/callbacks/callback_test.py",
"repo_id": "keras-core",
"token_count": 474
} | 29 |
"""MNIST handwritten digits dataset."""
import numpy as np
from keras_core.api_export import keras_core_export
from keras_core.utils.file_utils import get_file
@keras_core_export("keras_core.datasets.mnist.load_data")
def load_data(path="mnist.npz"):
"""Loads the MNIST dataset.
This is a dataset of 60,000 28x28 grayscale images of the 10 digits,
along with a test set of 10,000 images.
More info can be found at the
[MNIST homepage](http://yann.lecun.com/exdb/mnist/).
Args:
path: path where to cache the dataset locally
(relative to `~/.keras/datasets`).
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(60000, 28, 28)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of digit labels (integers in range 0-9)
with shape `(60000,)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 28, 28)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of digit labels (integers in range 0-9)
with shape `(10000,)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
assert x_train.shape == (60000, 28, 28)
assert x_test.shape == (10000, 28, 28)
assert y_train.shape == (60000,)
assert y_test.shape == (10000,)
```
License:
Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset,
which is a derivative work from original NIST datasets.
MNIST dataset is made available under the terms of the
[Creative Commons Attribution-Share Alike 3.0 license.](
https://creativecommons.org/licenses/by-sa/3.0/)
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=origin_folder + "mnist.npz",
file_hash=( # noqa: E501
"731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1"
),
)
with np.load(path, allow_pickle=True) as f:
x_train, y_train = f["x_train"], f["y_train"]
x_test, y_test = f["x_test"], f["y_test"]
return (x_train, y_train), (x_test, y_test)
| keras-core/keras_core/datasets/mnist.py/0 | {
"file_path": "keras-core/keras_core/datasets/mnist.py",
"repo_id": "keras-core",
"token_count": 1006
} | 30 |
from keras_core import activations
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.Activation")
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function. It could be a callable, or the name of
an activation from the `keras_core.activations` namespace.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> layer = keras_core.layers.Activation('relu')
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
>>> layer = keras_core.layers.Activation(keras_core.activations.relu)
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
"""
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"activation": activations.serialize(self.activation)}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/activations/activation.py/0 | {
"file_path": "keras-core/keras_core/layers/activations/activation.py",
"repo_id": "keras-core",
"token_count": 517
} | 31 |
import numpy as np
from keras_core import layers
from keras_core import testing
class AttentionTest(testing.TestCase):
def test_attention_basics(self):
# No scale, no concat.
self.run_layer_test(
layers.Attention,
init_kwargs={
"score_mode": "dot",
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
# Sale and concat.
self.run_layer_test(
layers.Attention,
init_kwargs={
"use_scale": True,
"score_mode": "concat",
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_attention_correctness(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
# Dot.
layer = layers.Attention(score_mode="dot")
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output, [[[2.462, 3.462], [1.538, 2.538]]], atol=1e-3
)
self.assertAllClose(
scores, [[[0.269, 0.731], [0.731, 0.269]]], atol=1e-3
)
# Concat.
layer = layers.Attention(score_mode="concat")
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output, [[[1.727, 2.727], [2.272, 3.272]]], atol=1e-3
)
self.assertAllClose(
scores, [[[0.636, 0.363], [0.363, 0.636]]], atol=1e-3
)
def test_attention_with_mask(self):
layer = layers.Attention()
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
value = np.array([[[1.0, 1.0], [1.0, 1.0]]])
query_mask = np.array([[True, False]])
value_mask = np.array([[True, False]])
output, scores = layer(
[query, value],
mask=[query_mask, value_mask],
return_attention_scores=True,
)
self.assertAllClose(output, [[[1.0, 1.0], [0.0, 0.0]]])
self.assertAllClose(scores, [[[1.0, 0.0], [1.0, 0.0]]])
def test_attention_errors(self):
layer = layers.Attention()
tensor = np.array([[[1.0, 1.0], [1.0, 1.0]]])
with self.assertRaisesRegex(ValueError, "must be called on a list"):
layer(tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor, tensor, tensor])
with self.assertRaisesRegex(ValueError, "layer mask must be a list"):
layer([tensor, tensor], mask=tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor], mask=[tensor])
| keras-core/keras_core/layers/attention/attention_test.py/0 | {
"file_path": "keras-core/keras_core/layers/attention/attention_test.py",
"repo_id": "keras-core",
"token_count": 1849
} | 32 |
from keras_core.api_export import keras_core_export
from keras_core.layers.convolutional.base_depthwise_conv import (
BaseDepthwiseConv,
)
@keras_core_export("keras_core.layers.DepthwiseConv1D")
class DepthwiseConv1D(BaseDepthwiseConv):
"""1D depthwise convolution layer.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular 1D convolution, depthwise convolution does not mix
information across different input channels.
The `depth_multiplier` argument determines how many filters are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
kernel_size: int or tuple/list of 1 integer, specifying the size of the
depthwise convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: Initializer for the convolution kernel.
If `None`, the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
depthwise_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape:
`(batch_shape, new_steps, channels * depth_multiplier)`
- If `data_format="channels_first"`:
A 3D tensor with shape:
`(batch_shape, channels * depth_multiplier, new_steps)`
Returns:
A 3D tensor representing
`activation(depthwise_conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Examples:
>>> x = np.random.rand(4, 10, 12)
>>> y = keras_core.layers.DepthwiseConv1D(3, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 4, 36)
"""
def __init__(
self,
kernel_size,
strides=1,
padding="valid",
depth_multiplier=1,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=1,
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
**kwargs
)
| keras-core/keras_core/layers/convolutional/depthwise_conv1d.py/0 | {
"file_path": "keras-core/keras_core/layers/convolutional/depthwise_conv1d.py",
"repo_id": "keras-core",
"token_count": 2343
} | 33 |
import numpy as np
from absl.testing import parameterized
from keras_core import backend
from keras_core import testing
from keras_core.backend import KerasTensor
from keras_core.layers import InputLayer
class InputLayerTest(testing.TestCase, parameterized.TestCase):
# Testing happy path for layer without input tensor
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
def test_input_basic(self, sparse):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
ndim = len(tuple((batch_size,) + input_shape))
init_kwargs = {
"shape": input_shape,
"batch_size": batch_size,
"dtype": dtype,
"sparse": sparse,
}
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
with self.assertRaisesRegex(
ValueError, "`sparse=True` is not supported"
):
InputLayer(**init_kwargs)
return
values = InputLayer(**init_kwargs)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.sparse, sparse)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output.ndim, ndim)
self.assertEqual(values.output.dtype, dtype)
self.assertEqual(values.output.sparse, sparse)
# Testing shape is not None and batch_shape is not None condition
def test_input_error1(self):
input_shape = (2, 3)
with self.assertRaisesRegex(
ValueError, "cannot pass both `shape` and `batch_shape`"
):
InputLayer(shape=input_shape, batch_shape=input_shape)
# Testing batch_size is not None and batch_shape is not None
def test_input_error2(self):
input_shape = (2, 3)
batch_size = 4
with self.assertRaisesRegex(
ValueError, "cannot pass both `batch_size` and `batch_shape`"
):
InputLayer(batch_size=batch_size, batch_shape=input_shape)
# Testing shape is None and batch_shape is None
def test_input_error3(self):
with self.assertRaisesRegex(ValueError, "pass a `shape` argument."):
InputLayer(shape=None, batch_shape=None)
# Testing Input tensor is not Keras tensor
def test_input_tensor_error(self):
input_shape = (2, 3)
batch_size = 4
input_tensor = np.zeros(input_shape)
with self.assertRaisesRegex(
ValueError, "Argument `input_tensor` must be a KerasTensor"
):
InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
)
# Testing happy path for layer with input tensor
def testing_input_tensor(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
input_tensor = KerasTensor(shape=input_shape, dtype=dtype)
values = InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
dtype=dtype,
)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output, input_tensor)
self.assertEqual(values.output.ndim, input_tensor.ndim)
self.assertEqual(values.output.dtype, dtype)
| keras-core/keras_core/layers/core/input_layer_test.py/0 | {
"file_path": "keras-core/keras_core/layers/core/input_layer_test.py",
"repo_id": "keras-core",
"token_count": 1734
} | 34 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.merging.base_merge import Merge
@keras_core_export("keras_core.layers.Maximum")
class Maximum(Merge):
"""Computes element-wise maximum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.Maximum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras_core.layers.maximum([x1, x2])`
>>> y = keras_core.layers.Maximum()([x1, x2])
>>> out = keras_core.layers.Dense(4)(y)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.maximum(output, inputs[i])
return output
@keras_core_export("keras_core.layers.maximum")
def maximum(inputs, **kwargs):
"""Functional interface to the `keras_core.layers.Maximum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras_core.layers.maximum([x1, x2])
Usage in a Keras model:
>>> input1 = keras_core.layers.Input(shape=(16,))
>>> x1 = keras_core.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras_core.layers.Input(shape=(32,))
>>> x2 = keras_core.layers.Dense(8, activation='relu')(input2)
>>> y = keras_core.layers.maximum([x1, x2])
>>> out = keras_core.layers.Dense(4)(y)
>>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out)
"""
return Maximum(**kwargs)(inputs)
| keras-core/keras_core/layers/merging/maximum.py/0 | {
"file_path": "keras-core/keras_core/layers/merging/maximum.py",
"repo_id": "keras-core",
"token_count": 950
} | 35 |
from keras_core.api_export import keras_core_export
from keras_core.layers.pooling.base_pooling import BasePooling
@keras_core_export(
["keras_core.layers.MaxPooling2D", "keras_core.layers.MaxPool2D"]
)
class MaxPooling2D(BasePooling):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras_core.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> max_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = keras_core.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> max_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = keras_core.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> max_pool_2d(x)
"""
def __init__(
self,
pool_size=(2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| keras-core/keras_core/layers/pooling/max_pooling2d.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/max_pooling2d.py",
"repo_id": "keras-core",
"token_count": 1836
} | 36 |
import collections
import numpy as np
from keras_core import backend
from keras_core.layers.layer import Layer
from keras_core.utils import argument_validation
from keras_core.utils import tf_utils
from keras_core.utils.module_utils import tensorflow as tf
class IndexLookup(Layer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output
via a table-based lookup, with optional out-of-vocabulary handling. This is
the basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer.
If `None`, there is no cap on the size of the vocabulary.
Note that this size includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are hashed to determine
their OOV value. If this value is 0,
OOV inputs will cause an error when calling the layer.
mask_token: A token that represents masked inputs.
When `output_mode` is `"int"`,
the token is included in vocabulary and mapped to index 0.
In other output modes, the token will not appear in the vocabulary
and instances of the mask token in the input will be dropped.
If set to `None`, no mask term will be added.
oov_token: Only used when `invert` is `True`.
The token to return for OOV indices.
vocabulary: Optional. Either an array or a string path to a text file.
If passing an array, can pass a tuple, list, 1D numpy array,
or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt` the layer.
vocabulary_dtype: The dtype of the vocabulary terms.
For example, `"int64"` or `"string"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D numpy array, or 1D tensor or the same length
as the vocabulary, containing the floating point
inverse document frequency weights, which will be multiplied
by per sample term counts for the final TF-IDF
weight. If the `vocabulary` argument is set, and `output_mode`
is `"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1
at the element index. If the last dimension is size 1,
will encode on that dimension.
If the last dimension is not size 1,
will append a new dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into
a single array the same size as the vocabulary,
containing a 1 for each vocabulary term present in the sample.
Treats the last dimension as the sample dimension,
if input shape is `(..., sample_length)`, output shape will
be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains a count
of the number of times the token at that index appeared
in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm
is applied to find the value in each token slot.
Defaults to `"int"`.
pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have its
feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`,
`"count"` and `"tf-idf"` output modes.
If `True`, returns a `SparseTensor` instead of a dense `Tensor`.
Defaults to `False`.
"""
def __init__(
self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary_dtype,
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
name=None,
**kwargs,
):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError(
"If set, `max_tokens` must be greater than 1. "
f"Received: max_tokens={max_tokens}"
)
if pad_to_max_tokens and max_tokens is None:
raise ValueError(
"If pad_to_max_tokens is True, must set `max_tokens`. "
f"Received: max_tokens={max_tokens}"
)
if num_oov_indices < 0:
raise ValueError(
"`num_oov_indices` must be greater than or equal to 0. "
f"Received: num_oov_indices={num_oov_indices}"
)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
if output_mode == "tf-idf":
output_mode = "tf_idf"
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
"tf_idf",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
if invert and output_mode != "int":
raise ValueError(
"`output_mode` must be `'int'` when `invert` is true. "
f"Received: output_mode={output_mode}"
)
if sparse and output_mode == "int":
raise ValueError(
"`sparse` may only be true if `output_mode` is "
"`'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}"
)
if idf_weights is not None and output_mode != "tf_idf":
raise ValueError(
"`idf_weights` should only be set if `output_mode` is "
f"`'tf_idf'`. Received: idf_weights={idf_weights} and "
f"output_mode={output_mode}"
)
super().__init__(name=name)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.mask_token = mask_token
self.oov_token = oov_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self.vocabulary_dtype = tf.as_dtype(vocabulary_dtype).name
self._frozen_vocab_size = kwargs.pop("vocabulary_size", None)
self.input_vocabulary = vocabulary
self.input_idf_weights = idf_weights
# We set this hidden attr to
# persist the fact that we have have a non-adaptable layer with a
# manually set vocab.
self._has_input_vocabulary = kwargs.pop(
"has_input_vocabulary", (vocabulary is not None)
)
kwargs.pop("trainable", None)
kwargs.pop("dtype", None)
if kwargs:
raise ValueError(f"Unrecognized keyword argument(s): {kwargs}")
if invert:
self._key_dtype = "int64"
self._value_dtype = self.vocabulary_dtype
mask_key = 0
mask_value = mask_token
self._default_value = self.oov_token
else:
self._key_dtype = self.vocabulary_dtype
self._value_dtype = "int64"
mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max
# ints will be dropped from the bincount op.
mask_value = (
0
if self.output_mode == "int"
else tf.as_dtype(self._value_dtype).max
)
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 and error
# out during call if we find a negative index.
self._default_value = -1
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the
# default value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we have multiple OOV values, we need to do a further
# hashing step; to make this easier, we set the OOV value to -1.
# (This lets us do a vectorized add and cast to boolean to
# determine locations where we need to do extra hashing.)
self._default_value = -1
if self.mask_token is not None:
self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
self._mask_value = tf.convert_to_tensor(
mask_value, self._value_dtype
)
if self.output_mode == "tf_idf":
if self._has_input_vocabulary and idf_weights is None:
raise ValueError(
"When specifying the `vocabulary` argument, "
"in TF-IDF output mode, the `idf_weights` argument "
"must also be provided."
)
if idf_weights is not None:
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
if vocabulary is not None:
self.set_vocabulary(vocabulary, idf_weights)
else:
# When restoring from a keras SavedModel, the loading code will
# expect to find and restore a lookup_table attribute on the layer.
# This table needs to be uninitialized as a StaticHashTable cannot
# be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()
# Only set up adapt state if we did not receive a vocab on construction.
if not self._has_input_vocabulary:
# Set adapt state.
self.token_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
if self.output_mode == "tf_idf":
self.token_document_counts = (
tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype,
value_dtype="int64",
default_value=0,
)
)
self.num_documents = tf.Variable(
0, dtype="int64", trainable=False
)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If `True`, the returned vocabulary
will include mask and OOV tokens,
and a term's index in the vocabulary
will equal the term's index when calling the layer.
If `False`, the returned vocabulary will not include
any mask or OOV tokens.
"""
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices
# [0, vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (
self._tensor_vocab_to_numpy(vocab),
indices.numpy(),
)
lookup = collections.defaultdict(
lambda: self.oov_token, zip(indices, vocab)
)
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == "int":
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index() :]
if self.vocabulary_dtype == "string":
return [
i.decode("utf-8") if isinstance(i, bytes) else i for i in vocab
]
else:
return vocab
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional mask and oov
indices.
"""
if tf.executing_eagerly():
return (
int(self.lookup_table.size().numpy())
+ self._token_start_index()
)
else:
return self.lookup_table.size() + self._token_start_index()
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"sparse": self.sparse,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary_dtype": self.vocabulary_dtype,
"idf_weights": listify_tensors(self.input_idf_weights),
"vocabulary": listify_tensors(self.input_vocabulary),
"vocabulary_size": self._frozen_vocab_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _record_vocabulary_size(self):
self._ensure_vocab_size_unchanged()
with tf.init_scope():
self._frozen_vocab_size = self.vocabulary_size()
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is already present in the layer, this
method will replace it.
Args:
vocabulary: Either an array or a string path to a text file.
If passing an array, can pass a tuple, list,
1D numpy array, or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line
per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor
of inverse document frequency weights with equal
length to vocabulary. Must be set if `output_mode`
is `"tf_idf"`. Should not be set otherwise.
"""
if self.output_mode == "tf_idf":
if idf_weights is None:
raise ValueError(
"`idf_weights` must be set if output_mode is 'tf_idf'."
)
elif idf_weights is not None:
raise ValueError(
"`idf_weights` should only be set if output_mode is "
f"`'tf_idf'`. Received: output_mode={self.output_mode} "
f"and idf_weights={idf_weights}"
)
if isinstance(vocabulary, str):
if not tf.io.gfile.exists(vocabulary):
raise ValueError(
f"Vocabulary file {vocabulary} does not exist."
)
if self.output_mode == "tf_idf":
raise ValueError(
"output_mode `'tf_idf'` does not support loading a "
"vocabulary from file."
)
self.lookup_table = self._lookup_table_from_file(vocabulary)
self._record_vocabulary_size()
return
if not tf.executing_eagerly() and (
tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)
):
raise RuntimeError(
f"Cannot set a tensor vocabulary on layer {self.name} "
"when not executing eagerly. "
"Create this layer or call `set_vocabulary()` "
"outside of any traced function."
)
# TODO(mattdangerw): for better performance we should rewrite this
# entire function to operate on tensors and convert vocabulary to a
# tensor here.
if tf.is_tensor(vocabulary):
vocabulary = self._tensor_vocab_to_numpy(vocabulary)
elif isinstance(vocabulary, (list, tuple)):
vocabulary = np.array(vocabulary)
if tf.is_tensor(idf_weights):
idf_weights = idf_weights.numpy()
elif isinstance(idf_weights, (list, tuple)):
idf_weights = np.array(idf_weights)
if vocabulary.size == 0:
raise ValueError(
"Cannot set an empty vocabulary. "
f"Received: vocabulary={vocabulary}"
)
oov_start = self._oov_start_index()
token_start = self._token_start_index()
special_tokens = [self.mask_token] * oov_start + [
self.oov_token
] * self.num_oov_indices
found_special_tokens = np.array_equal(
special_tokens, vocabulary[:token_start]
)
if found_special_tokens:
tokens = vocabulary[token_start:]
else:
tokens = vocabulary
repeated_tokens = self._find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError(
"The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
f"are: {repeated_tokens}"
)
if self.mask_token is not None and self.mask_token in tokens:
mask_index = np.argwhere(vocabulary == self.mask_token)[-1]
raise ValueError(
"Found reserved mask token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: mask_token={self.mask_token} at "
f"vocabulary index {mask_index}"
)
# Only error out for oov_token when invert=True. When invert=False,
# oov_token is unused during lookup.
if (
self.oov_token is not None
and self.invert
and self.oov_token in tokens
):
oov_index = np.argwhere(vocabulary == self.oov_token)[-1]
raise ValueError(
"Found reserved OOV token at unexpected location in "
"`vocabulary`. Note that passed `vocabulary` does not need to "
"include the OOV and mask tokens. Either remove all mask and "
"OOV tokens, or include them only at the start of the "
f"vocabulary in precisely this order: {special_tokens}. "
f"Received: oov_token={self.oov_token} at "
f"vocabulary index {oov_index}"
)
new_vocab_size = token_start + len(tokens)
if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab "
f"size. Received vocabulary size is {new_vocab_size}; "
f"`max_tokens` is {self.max_tokens}."
)
self.lookup_table = self._lookup_table_from_tokens(tokens)
self._record_vocabulary_size()
if self.output_mode == "tf_idf" and idf_weights is not None:
if len(vocabulary) != len(idf_weights):
raise ValueError(
"`idf_weights` must be the same length as vocabulary. "
f"len(idf_weights) is {len(idf_weights)}; "
f"len(vocabulary) is {len(vocabulary)}"
)
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array. "
f"Received: type(idf_weights)={type(idf_weights)}"
)
# If the passed vocabulary has no special tokens, we need to pad the
# front of idf_weights. We don't have real document frequencies for
# these tokens so we will use an average of all idf_weights passed
# in as a reasonable default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our
# total vocab size, we need to pad the back of idf_weights with
# zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = (
self.max_tokens - front_padding - len(idf_weights)
)
else:
back_padding = 0
weights = np.pad(
idf_weights,
(front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value),
)
weights = tf.convert_to_tensor(weights, dtype=backend.floatx())
self.idf_weights = tf.Variable(
weights,
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
def build(self):
self.built = True
def get_build_config(self):
return {}
def build_from_config(self, config):
self.build()
@property
def compute_dtype(self):
return self.vocabulary_dtype
@property
def variable_dtype(self):
return self.vocabulary_dtype
def compute_output_shape(self, input_shape):
if self.output_mode == "int":
return input_shape
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
return (input_shape[0], depth)
def compute_output_spec(self, inputs):
if self.output_mode == "int":
output_dtype = "int64"
else:
output_dtype = backend.floatx()
output_shape = self.compute_output_shape(inputs.shape)
return backend.KerasTensor(output_shape, dtype=output_dtype)
def adapt(self, data, steps=None):
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 1:
# A plain list of strings
# is treated as as many documents
data = tf.expand_dims(data, -1)
self.update_state(data)
self.finalize_state()
def update_state(self, data):
if self._has_input_vocabulary:
raise ValueError(
f"Cannot adapt layer '{self.name}' after setting a static "
"vocabulary via `vocabulary` argument or "
"`set_vocabulary()` method."
)
data = tf_utils.ensure_tensor(data, dtype=self.vocabulary_dtype)
if data.shape.rank == 0:
data = tf.expand_dims(data, 0)
if data.shape.rank == 1:
# Expand dims on axis 0 for tf-idf. A 1-d tensor
# is a single document.
data = tf.expand_dims(data, 0)
tokens, counts = self._num_tokens(data)
self.token_counts.insert(
tokens, counts + self.token_counts.lookup(tokens)
)
if self.output_mode == "tf_idf":
# Dedupe each row of our dataset.
if isinstance(data, tf.RaggedTensor):
deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data)
else:
deduped_doc_data = [tf.unique(x)[0] for x in data]
deduped_doc_data = tf.concat(deduped_doc_data, axis=0)
# Flatten and count tokens.
tokens, counts = self._num_tokens(deduped_doc_data)
self.token_document_counts.insert(
tokens, counts + self.token_document_counts.lookup(tokens)
)
if isinstance(data, tf.RaggedTensor):
self.num_documents.assign_add(data.nrows())
else:
self.num_documents.assign_add(
tf.shape(data, out_type="int64")[0]
)
def finalize_state(self):
if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0):
# Finalize idf_weights to a const for call even if we don't need to
# compute a new vocabulary.
if self.output_mode == "tf_idf":
self.idf_weights_const = self.idf_weights.value()
self._record_vocabulary_size()
return
# Remove special tokens from our counts.
if self.mask_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype)
)
if self.oov_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype)
)
tokens, counts = self.token_counts.export()
# To keep vocabs deterministic, we sort our tokens by count and break
# ties by sorting the tokens themselves. Tensorflow has no ops for
# sorting strings, so we need to use numpy for the sort.
sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
token_start = self._token_start_index()
if self.max_tokens:
max_learned_tokens = self.max_tokens - token_start
sorted_indices = sorted_indices[:max_learned_tokens]
tokens = tf.gather(tokens, sorted_indices)
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == "tf_idf":
token_document_counts = self.token_document_counts.lookup(tokens)
idf_weights = self._inverse_document_frequency(
token_document_counts, self.num_documents
)
idf_weights = tf.cast(idf_weights, backend.floatx())
# Pad the front of idf_weights with the average idf weight for OOV
# tokens. We cannot compute the real idf weight of OOV in a single
# pass.
idf_weights = tf.pad(
idf_weights,
[[self._token_start_index(), 0]],
constant_values=tf.reduce_mean(idf_weights),
)
if self.pad_to_max_tokens and self.max_tokens is not None:
# Pad the back of idf_weights with zeros.
idf_weights = tf.pad(
idf_weights,
[[0, self.max_tokens - tf.size(idf_weights)]],
constant_values=0,
)
self.idf_weights = tf.Variable(
idf_weights,
dtype=backend.floatx(),
trainable=False,
)
self.idf_weights_const = self.idf_weights.value()
# We call this here to save memory, now that we've built our vocabulary,
# we don't want to keep every token we've seen in separate lookup
# tables.
self.reset_state()
self._record_vocabulary_size()
def reset_state(self):
if self._has_input_vocabulary:
return
self.token_counts.remove(self.token_counts.export()[0])
if self.output_mode == "tf_idf":
self.token_document_counts.remove(
self.token_document_counts.export()[0]
)
self.num_documents.assign(0)
def call(self, inputs):
self._ensure_known_vocab_size()
inputs = tf_utils.ensure_tensor(inputs, dtype=self._key_dtype)
original_shape = inputs.shape
# Some ops will not handle scalar input, so uprank to rank 1.
if inputs.shape.rank == 0:
inputs = self._expand_dims(inputs, -1)
if isinstance(inputs, tf.SparseTensor):
lookups = tf.SparseTensor(
inputs.indices,
self._lookup_dense(inputs.values),
inputs.dense_shape,
)
elif isinstance(inputs, tf.RaggedTensor):
lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs)
else:
lookups = self._lookup_dense(inputs)
if self.output_mode == "int":
# If we received a scalar input, downrank back to a scalar.
if original_shape.rank == 0:
lookups = tf.squeeze(lookups, -1)
return lookups
depth = (
self.max_tokens
if self.pad_to_max_tokens
else self._frozen_vocab_size
)
idf_weights = (
self.idf_weights_const if self.output_mode == "tf_idf" else None
)
return tf_utils.encode_categorical_inputs(
lookups,
output_mode=self.output_mode,
depth=depth,
dtype=self._value_dtype,
sparse=self.sparse,
idf_weights=idf_weights,
)
def _lookup_dense(self, inputs):
"""Lookup table values for a dense Tensor, handling masking and OOV."""
# When executing eagerly and tracing keras.Input objects,
# do not call lookup.
# This is critical for restoring SavedModel, which will first trace
# layer.call and then attempt to restore the table. We need the table to
# be uninitialized for the restore to work, but calling the table
# uninitialized would error.
if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
else:
lookups = self.lookup_table.lookup(inputs)
if self.mask_token is not None:
mask_locations = tf.equal(inputs, self._mask_key)
lookups = tf.where(mask_locations, self._mask_value, lookups)
if self.invert:
return lookups
lookup_checks = []
if self.num_oov_indices == 0:
# If we have zero oov indices, we need to check for oov inputs.
oov_indices = tf.where(tf.equal(lookups, -1))
oov_inputs = tf.gather_nd(inputs, oov_indices)
msg = tf.strings.format(
"When `num_oov_indices=0` all inputs should be in vocabulary, "
"found OOV values {}, consider setting `num_oov_indices=1`.",
(oov_inputs,),
)
assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg])
lookup_checks.append(assertion)
elif self.num_oov_indices > 1:
# If we have multiple oov indices, we need a further hashing step.
if tf.as_dtype(self._key_dtype).is_integer:
oov_indices = tf.math.floormod(inputs, self.num_oov_indices)
else:
oov_indices = tf.strings.to_hash_bucket_fast(
inputs, num_buckets=self.num_oov_indices
)
oov_indices = oov_indices + self._oov_start_index()
oov_locations = tf.equal(lookups, self._default_value)
lookups = tf.where(oov_locations, oov_indices, lookups)
with tf.control_dependencies(lookup_checks):
return tf.identity(lookups)
def save_own_variables(self, store):
if self.output_mode == "tf_idf":
store["idf_weights"] = self.idf_weights_const.numpy()
def load_own_variables(self, store):
if self.output_mode == "tf_idf":
self.idf_weights.assign(store["idf_weights"])
self.idf_weights_const = self.idf_weights.value()
def save_assets(self, dir_path):
if self.input_vocabulary:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
vocabulary = self.get_vocabulary(include_special_tokens=True)
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
with open(vocabulary_filepath, "w") as f:
f.write("\n".join([str(w) for w in vocabulary]))
def load_assets(self, dir_path):
if self.input_vocabulary:
# Vocab saved in config.
# TODO: consider unifying both paths.
return
vocabulary_filepath = tf.io.gfile.join(dir_path, "vocabulary.txt")
# TODO: fix bug with include_special_tokens and set reload from file.
with open(vocabulary_filepath, "r") as f:
lines = f.read().split("\n")
if tf.as_dtype(self.vocabulary_dtype) == tf.string:
values = [str(line) for line in lines]
else:
values = [int(line) for line in lines]
if self.output_mode == "tf_idf":
self.set_vocabulary(values, idf_weights=False)
else:
self.set_vocabulary(values)
def _uninitialized_lookup_table(self):
with tf.init_scope():
initializer = get_null_initializer(
self._key_dtype, self._value_dtype
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_tokens(self, tokens):
with tf.init_scope():
token_start = self._token_start_index()
token_end = token_start + tf.size(tokens)
indices_dtype = (
self._key_dtype if self.invert else self._value_dtype
)
indices = tf.range(token_start, token_end, dtype=indices_dtype)
keys, values = (
(indices, tokens) if self.invert else (tokens, indices)
)
initializer = tf.lookup.KeyValueTensorInitializer(
keys, values, self._key_dtype, self._value_dtype
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_file(self, filename):
if self.invert:
key_index = tf.lookup.TextFileIndex.LINE_NUMBER
value_index = tf.lookup.TextFileIndex.WHOLE_LINE
else:
key_index = tf.lookup.TextFileIndex.WHOLE_LINE
value_index = tf.lookup.TextFileIndex.LINE_NUMBER
with tf.init_scope():
initializer = tf.lookup.TextFileInitializer(
filename=filename,
key_dtype=self._key_dtype,
key_index=key_index,
value_dtype=self._value_dtype,
value_index=value_index,
value_index_offset=self._token_start_index(),
)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _expand_dims(self, inputs, axis):
if isinstance(inputs, tf.SparseTensor):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
def _oov_start_index(self):
return (
1
if self.mask_token is not None and self.output_mode == "int"
else 0
)
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
def _ensure_known_vocab_size(self):
if self.output_mode == "int" or self.pad_to_max_tokens:
return
if self._frozen_vocab_size is None:
raise RuntimeError(
f"When using `output_mode={self.output_mode}` "
"and `pad_to_max_tokens=False`, "
"you must set the layer's vocabulary before calling it. Either "
"pass a `vocabulary` argument to the layer, or call `adapt` "
"with some sample data."
)
def _ensure_vocab_size_unchanged(self):
if self.output_mode == "int" or self.pad_to_max_tokens:
return
with tf.init_scope():
new_vocab_size = self.vocabulary_size()
if (
self._frozen_vocab_size is not None
and new_vocab_size != self._frozen_vocab_size
):
raise RuntimeError(
f"When using `output_mode={self.output_mode}` "
"and `pad_to_max_tokens=False`, "
"the vocabulary size cannot be changed after the layer is "
f"called. Old vocab size is {self._frozen_vocab_size}, "
f"new vocab size is {new_vocab_size}"
)
def _find_repeated_tokens(self, vocabulary):
"""Return all repeated tokens in a vocabulary."""
vocabulary_set = set(vocabulary)
if len(vocabulary) != len(vocabulary_set):
return [
item
for item, count in collections.Counter(vocabulary).items()
if count > 1
]
else:
return []
def _num_tokens(self, data):
"""Count the number of tokens in a ragged, sparse or dense tensor."""
if isinstance(data, tf.SparseTensor):
flat_values = data.values
elif isinstance(data, tf.RaggedTensor):
flat_values = data.flat_values
else:
flat_values = tf.reshape(data, [-1])
tokens, _, counts = tf.unique_with_counts(flat_values, out_idx="int64")
return tokens, counts
def _inverse_document_frequency(self, token_document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of "tf_idf".
Args:
token_document_counts: An array of the # of documents each token
appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return tf.math.log(1 + num_documents / (1 + token_document_counts))
# Override points for IntegerLookup and StringLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
"""Converts a tensor vocabulary to a numpy vocabulary."""
return vocabulary.numpy()
def get_null_initializer(key_dtype, value_dtype):
class NullInitializer(tf.lookup.KeyValueTensorInitializer):
"""A placeholder initializer for restoring from a SavedModel."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = key_dtype
self._value_dtype = value_dtype
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
pass
return NullInitializer(key_dtype, value_dtype)
def listify_tensors(x):
"""Convert any tensors or numpy arrays to lists for config serialization."""
if tf.is_tensor(x):
x = x.numpy()
if isinstance(x, np.ndarray):
x = x.tolist()
return x
| keras-core/keras_core/layers/preprocessing/index_lookup.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/index_lookup.py",
"repo_id": "keras-core",
"token_count": 20024
} | 37 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.random.seed_generator import SeedGenerator
@keras_core_export("keras_core.layers.RandomTranslation")
class RandomTranslation(TFDataLayer):
"""A preprocessing layer which randomly translates images during training.
This layer will apply random translations to each image during training,
filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting vertically. A
negative value means shifting image up, while a positive value means
shifting image down. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`height_factor=(-0.2, 0.3)` results in an output shifted by a random
amount in the range `[-20%, +30%]`. `height_factor=0.2` results in
an output height shifted by a random amount in the range
`[-20%, +20%]`.
width_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting horizontally.
A negative value means shifting image left, while a positive value
means shifting image right. When represented as a single positive
float, this value is used for both the upper and lower bound. For
instance, `width_factor=(-0.2, 0.3)` results in an output shifted
left by 20%, and shifted right by 30%. `width_factor=0.2` results
in an output height shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Note that when using torch backend, `"reflect"` is redirected to
`"mirror"` `(c d c b | a b c d | c b a b)` because torch does not
support `"reflect"`.
Note that torch backend does not support `"wrap"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
_FACTOR_VALIDATION_ERROR = (
"The `factor` argument should be a number (or a list of two numbers) "
"in the range [-1.0, 1.0]. "
)
_SUPPORTED_FILL_MODE = ("reflect", "wrap", "constant", "nearest")
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
height_factor,
width_factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
data_format=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_factor = height_factor
self.height_lower, self.height_upper = self._set_factor(
height_factor, "height_factor"
)
self.width_factor = width_factor
self.width_lower, self.width_upper = self._set_factor(
width_factor, "width_factor"
)
if fill_mode not in self._SUPPORTED_FILL_MODE:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODE}."
)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.data_format = backend.standardize_data_format(data_format)
self.supports_jit = False
def _set_factor(self, factor, factor_name):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
lower, upper = [-factor, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
return lower, upper
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < -1.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def call(self, inputs, training=True):
inputs = self.backend.cast(inputs, self.compute_dtype)
if training:
return self._randomly_translate_inputs(inputs)
else:
return inputs
def _randomly_translate_inputs(self, inputs):
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
inputs_shape = self.backend.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == "channels_first":
height = inputs_shape[-2]
width = inputs_shape[-1]
else:
height = inputs_shape[-3]
width = inputs_shape[-2]
seed_generator = self._get_seed_generator(self.backend._backend)
height_translate = self.backend.random.uniform(
minval=self.height_lower,
maxval=self.height_upper,
shape=[batch_size, 1],
seed=seed_generator,
)
height_translate = height_translate * height
width_translate = self.backend.random.uniform(
minval=self.width_lower,
maxval=self.width_upper,
shape=[batch_size, 1],
seed=seed_generator,
)
width_translate = width_translate * width
translations = self.backend.cast(
self.backend.numpy.concatenate(
[width_translate, height_translate], axis=1
),
dtype="float32",
)
outputs = self.backend.image.affine_transform(
inputs,
transform=self._get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
if unbatched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def _get_translation_matrix(self, translations):
num_translations = self.backend.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# translation matrices are always float32.
return self.backend.numpy.concatenate(
[
self.backend.numpy.ones((num_translations, 1)),
self.backend.numpy.zeros((num_translations, 1)),
-translations[:, 0:1],
self.backend.numpy.zeros((num_translations, 1)),
self.backend.numpy.ones((num_translations, 1)),
-translations[:, 1:],
self.backend.numpy.zeros((num_translations, 2)),
],
axis=1,
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"interpolation": self.interpolation,
"seed": self.seed,
"fill_value": self.fill_value,
"data_format": self.data_format,
}
return {**base_config, **config}
| keras-core/keras_core/layers/preprocessing/random_translation.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_translation.py",
"repo_id": "keras-core",
"token_count": 4780
} | 38 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
all inputs is unchanged.
Note that the `Dropout` layer only applies when `training` is set to `True`
in `call()`, such that no values are dropped during inference.
When using `model.fit`, `training` will be appropriately set to `True`
automatically. In other contexts, you can set the argument explicitly
to `True` when calling the layer.
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
`trainable` does not affect the layer's behavior, as `Dropout` does
not have any variables/weights that can be frozen during training.)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
| keras-core/keras_core/layers/regularization/dropout.py/0 | {
"file_path": "keras-core/keras_core/layers/regularization/dropout.py",
"repo_id": "keras-core",
"token_count": 1184
} | 39 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import layers
from keras_core import ops
from keras_core import testing
class FlattenTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_flatten(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
inputs = np.random.random((10, 3, 5, 5)).astype("float32")
# Make the ndarray relatively sparse
inputs = np.multiply(inputs, inputs >= 0.8)
expected_output_channels_last = ops.convert_to_tensor(
np.reshape(inputs, (-1, 5 * 5 * 3))
)
expected_output_channels_first = ops.convert_to_tensor(
np.reshape(np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
)
if sparse:
import tensorflow as tf
inputs = tf.sparse.from_dense(inputs)
expected_output_channels_last = tf.sparse.from_dense(
expected_output_channels_last
)
expected_output_channels_first = tf.sparse.from_dense(
expected_output_channels_first
)
# Test default data_format and channels_last
self.run_layer_test(
layers.Flatten,
init_kwargs={},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_last,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_last"},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_last,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
# Test channels_first
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_first"},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_first,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
@pytest.mark.requires_trainable_backend
def test_flatten_with_scalar_channels(self):
inputs = np.random.random((10,)).astype("float32")
expected_output = ops.convert_to_tensor(np.expand_dims(inputs, -1))
# Test default data_format and channels_last
self.run_layer_test(
layers.Flatten,
init_kwargs={},
input_data=inputs,
expected_output=expected_output,
)
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_last"},
input_data=inputs,
expected_output=expected_output,
)
# Test channels_first
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_first"},
input_data=inputs,
expected_output=expected_output,
)
def test_flatten_with_dynamic_batch_size(self):
input_layer = layers.Input(batch_shape=(None, 2, 3))
flattened = layers.Flatten()(input_layer)
self.assertEqual(flattened.shape, (None, 2 * 3))
def test_flatten_with_dynamic_dimension(self):
input_layer = layers.Input(batch_shape=(5, 2, None))
flattened = layers.Flatten()(input_layer)
self.assertEqual(flattened.shape, (5, None))
| keras-core/keras_core/layers/reshaping/flatten_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/flatten_test.py",
"repo_id": "keras-core",
"token_count": 1833
} | 40 |
import numpy as np
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
class ZeroPadding2DTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("channels_first", "channels_first"), ("channels_last", "channels_last")
)
def test_zero_padding_2d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2))}, # 2 tuples
{"padding": (2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_2d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, :], inputs)
def test_zero_padding_2d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, 2, None, 4))
padded = layers.ZeroPadding2D(((1, 2), (3, 4)))(input_layer)
self.assertEqual(padded.shape, (1, 5, None, 4))
def test_zero_padding_2d_errors_if_padding_argument_invalid(self):
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding=(1,))
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding=(1, 2, 3))
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding="1")
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding=((1, 2), (3, 4, 5)))
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding=((1, 2), (3, -4)))
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding=((1, 2), "3"))
| keras-core/keras_core/layers/reshaping/zero_padding2d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/zero_padding2d_test.py",
"repo_id": "keras-core",
"token_count": 1565
} | 41 |
import tree
from keras_core import activations
from keras_core import backend
from keras_core import constraints
from keras_core import initializers
from keras_core import ops
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell
from keras_core.layers.rnn.rnn import RNN
@keras_core_export("keras_core.layers.GRUCell")
class GRUCell(Layer, DropoutRNNCell):
"""Cell class for the GRU layer.
This class processes one step within the whole time sequence input, whereas
`keras_core.layer.GRU` processes the whole sequence.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation
of the recurrent state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and cuDNN compatible).
seed: Random seed for dropout.
Call arguments:
inputs: A 2D tensor, with shape `(batch, features)`.
states: A 2D tensor with shape `(batch, units)`, which is the state
from the previous time step.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Example:
>>> inputs = np.random.random((32, 10, 8))
>>> rnn = keras_core.layers.RNN(keras_core.layers.GRUCell(4))
>>> output = rnn(inputs)
>>> output.shape
(32, 4)
>>> rnn = keras_core.layers.RNN(
... keras_core.layers.GRUCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> whole_sequence_output.shape
(32, 10, 4)
>>> final_state.shape
(32, 4)
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
reset_after=True,
seed=None,
**kwargs,
):
if units <= 0:
raise ValueError(
"Received an invalid value for argument `units`, "
f"expected a positive integer, got {units}."
)
implementation = kwargs.pop("implementation", 2)
super().__init__(**kwargs)
self.implementation = implementation
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed=seed)
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
def build(self, input_shape):
super().build(input_shape)
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU
# biases `(2 * 3 * self.units,)`, so that we can distinguish the
# classes when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(
shape=bias_shape,
name="bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=False):
h_tm1 = (
states[0] if tree.is_nested(states) else states
) # previous state
dp_mask = self.get_dropout_mask(inputs)
rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1)
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = (
ops.squeeze(e, axis=0)
for e in ops.split(self.bias, self.bias.shape[0], axis=0)
)
if training and 0.0 < self.dropout < 1.0:
inputs *= dp_mask
if training and 0.0 < self.recurrent_dropout < 1.0:
h_tm1 *= rec_dp_mask
if self.implementation == 1:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = ops.matmul(inputs_z, self.kernel[:, : self.units])
x_r = ops.matmul(
inputs_r, self.kernel[:, self.units : self.units * 2]
)
x_h = ops.matmul(inputs_h, self.kernel[:, self.units * 2 :])
if self.use_bias:
x_z += input_bias[: self.units]
x_r += input_bias[self.units : self.units * 2]
x_h += input_bias[self.units * 2 :]
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = ops.matmul(
h_tm1_z, self.recurrent_kernel[:, : self.units]
)
recurrent_r = ops.matmul(
h_tm1_r, self.recurrent_kernel[:, self.units : self.units * 2]
)
if self.reset_after and self.use_bias:
recurrent_z += recurrent_bias[: self.units]
recurrent_r += recurrent_bias[self.units : self.units * 2]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = ops.matmul(
h_tm1_h, self.recurrent_kernel[:, self.units * 2 :]
)
if self.use_bias:
recurrent_h += recurrent_bias[self.units * 2 :]
recurrent_h = r * recurrent_h
else:
recurrent_h = ops.matmul(
r * h_tm1_h, self.recurrent_kernel[:, self.units * 2 :]
)
hh = self.activation(x_h + recurrent_h)
else:
# inputs projected by all gate matrices at once
matrix_x = ops.matmul(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x += input_bias
x_z, x_r, x_h = ops.split(matrix_x, 3, axis=-1)
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = ops.matmul(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner += recurrent_bias
else:
# hidden state projected separately for update/reset and new
matrix_inner = ops.matmul(
h_tm1, self.recurrent_kernel[:, : 2 * self.units]
)
recurrent_z = matrix_inner[:, : self.units]
recurrent_r = matrix_inner[:, self.units : self.units * 2]
recurrent_h = matrix_inner[:, self.units * 2 :]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * recurrent_h
else:
recurrent_h = ops.matmul(
r * h_tm1, self.recurrent_kernel[:, 2 * self.units :]
)
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
new_state = [h] if tree.is_nested(states) else h
return h, new_state
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"reset_after": self.reset_after,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
def get_initial_state(self, batch_size=None):
return [
ops.zeros((batch_size, self.state_size), dtype=self.compute_dtype)
]
@keras_core_export("keras_core.layers.GRU")
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or backend-native)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the cuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation
when using the TensorFlow backend.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `dropout` == 0 and `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. `reset_after` is `True`
7. Inputs, if use masking, are strictly right-padded.
8. Eager execution is enabled in the outermost context.
There are two variants of the GRU implementation. The default one is based
on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to
hidden state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `reset_after=True` and
`recurrent_activation='sigmoid'`.
For example:
>>> inputs = np.random.random((32, 10, 8))
>>> gru = keras_core.layers.GRU(4)
>>> output = gru(inputs)
>>> output.shape
(32, 4)
>>> gru = keras_core.layers.GRU(4, return_sequences=True, return_state=True)
>>> whole_sequence_output, final_state = gru(inputs)
>>> whole_sequence_output.shape
(32, 10, 4)
>>> final_state.shape
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default: `False`). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). `False` is `"before"`,
`True` is `"after"` (default and cuDNN compatible).
Call arguments:
inputs: A 3D tensor, with shape `(batch, timesteps, feature)`.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked (optional).
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the
corresponding timestep should be ignored. Defaults to `None`.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
cell when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional). Defaults to `None`.
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, `None` causes creation
of zero-filled initial state tensors). Defaults to `None`.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=True,
**kwargs,
):
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
reset_after=reset_after,
dtype=kwargs.get("dtype", None),
trainable=kwargs.get("trainable", True),
name="gru_cell",
seed=seed,
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
activity_regularizer=activity_regularizer,
**kwargs,
)
self.input_spec = InputSpec(ndim=3)
if backend.backend() == "tensorflow" and backend.cudnn_ok(
cell.activation,
cell.recurrent_activation,
self.unroll,
cell.use_bias,
reset_after=reset_after,
):
self.supports_jit = False
def inner_loop(self, sequences, initial_state, mask, training=False):
if tree.is_nested(initial_state):
initial_state = initial_state[0]
if tree.is_nested(mask):
mask = mask[0]
if not self.dropout and not self.recurrent_dropout:
try:
# Backends are allowed to specify (optionally) optimized
# implementation of the inner GRU loop. In the case of
# TF for instance, it will leverage cuDNN when feasible, and
# it will raise NotImplementedError otherwise.
out = backend.gru(
sequences,
initial_state,
mask,
kernel=self.cell.kernel,
recurrent_kernel=self.cell.recurrent_kernel,
bias=self.cell.bias,
activation=self.cell.activation,
recurrent_activation=self.cell.recurrent_activation,
return_sequences=self.return_sequences,
go_backwards=self.go_backwards,
unroll=self.unroll,
reset_after=self.cell.reset_after,
)
# We disable jit_compile for the model in this case,
# since cuDNN ops aren't XLA compatible.
if backend.backend() == "tensorflow":
self.supports_jit = False
return out
except NotImplementedError:
pass
return super().inner_loop(
sequences, initial_state, mask=mask, training=training
)
def call(self, sequences, initial_state=None, mask=None, training=False):
return super().call(
sequences, mask=mask, training=training, initial_state=initial_state
)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"reset_after": self.reset_after,
"seed": self.cell.seed,
}
base_config = super().get_config()
del base_config["cell"]
return {**base_config, **config}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-core/keras_core/layers/rnn/gru.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/gru.py",
"repo_id": "keras-core",
"token_count": 11954
} | 42 |
import numpy as np
from keras_core import testing
from keras_core.losses import losses
class MeanSquaredErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(losses.MeanSquaredError(name="mymse"))
def test_all_correct_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 49.5)
def test_scalar_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 113.85)
def test_sample_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 767.8 / 6)
def test_timestep_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mse_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 97.833336)
def test_zero_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0)
def test_no_reduction(self):
mse_obj = losses.MeanSquaredError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [84.3333, 143.3666])
def test_sum_reduction(self):
mse_obj = losses.MeanSquaredError(reduction="sum")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 227.69998)
class MeanAbsoluteErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.MeanAbsoluteError(name="mymae")
)
def test_all_correct_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mae_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 5.5)
def test_scalar_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 12.65)
def test_sample_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 81.4 / 6)
def test_timestep_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mae_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 13.833333)
def test_zero_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0)
def test_no_reduction(self):
mae_obj = losses.MeanAbsoluteError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [10.7333, 14.5666])
def test_sum_reduction(self):
mae_obj = losses.MeanAbsoluteError(reduction="sum")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 25.29999)
class MeanAbsolutePercentageErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.MeanAbsolutePercentageError(name="mymape")
)
def test_all_correct_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mape_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 487.259, 3)
def test_sample_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 422.8888, 3)
def test_timestep_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mape_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 694.4444)
def test_zero_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
def test_no_reduction(self):
mape_obj = losses.MeanAbsolutePercentageError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [621.8518, 352.6666])
class MeanSquaredLogarithmicErrorTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.MeanSquaredLogarithmicError(name="mysloge")
)
def test_unweighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = msle_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.4370, 3)
def test_scalar_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 3.3051, 3)
def test_sample_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 3.7856, 3)
def test_timestep_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = msle_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 2.647374)
def test_zero_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = msle_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
class HingeTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.Hinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.3, 3)
# Reduction = "sum"
hinge_obj = losses.Hinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 2.6, 3)
# Reduction = None
hinge_obj = losses.Hinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.1, 1.5])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.Hinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.Hinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.55, 3)
# Reduction = "sum"
hinge_obj = losses.Hinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.1, 3)
# Reduction = None
hinge_obj = losses.Hinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.1, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.Hinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
class SquaredHingeTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.SquaredHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.86, 3)
# Reduction = "sum"
hinge_obj = losses.SquaredHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 3.72, 3)
# Reduction = None
hinge_obj = losses.SquaredHinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.46, 2.26])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.SquaredHinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.SquaredHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.73, 3)
# Reduction = "sum"
hinge_obj = losses.SquaredHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.46, 3)
# Reduction = None
hinge_obj = losses.SquaredHinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.46, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.SquaredHinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
class CategoricalHingeTest(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.CategoricalHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.4, 3)
# Reduction = "sum"
hinge_obj = losses.CategoricalHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 2.8, 3)
# Reduction = None
hinge_obj = losses.CategoricalHinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.2, 1.6])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.CategoricalHinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.CategoricalHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.6, 3)
# Reduction = "sum"
hinge_obj = losses.CategoricalHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.2, 3)
# Reduction = None
hinge_obj = losses.CategoricalHinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.2, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.CategoricalHinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
class CosineSimilarityTest(testing.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = self.np_y_true
self.y_pred = self.np_y_pred
def test_config(self):
cosine_obj = losses.CosineSimilarity(
axis=2, reduction="sum", name="cosine_loss"
)
self.assertEqual(cosine_obj.name, "cosine_loss")
self.assertEqual(cosine_obj.reduction, "sum")
def test_unweighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = 2.3
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_sample_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
np_y_true = self.np_y_true.reshape((2, 3, 1))
np_y_pred = self.np_y_pred.reshape((2, 3, 1))
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3))
y_true = self.l2_norm(np_y_true, 2)
y_pred = self.l2_norm(np_y_pred, 2)
expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,))
y_true = np_y_true
y_pred = np_y_pred
loss = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(expected_loss * sample_weight)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_zero_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = losses.CosineSimilarity(axis=1)
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
class HuberLossTest(testing.TestCase):
def huber_loss(self, y_true, y_pred, delta=1.0):
error = y_pred - y_true
abs_error = np.abs(error)
quadratic = np.minimum(abs_error, delta)
linear = np.subtract(abs_error, quadratic)
return np.add(
np.multiply(0.5, np.multiply(quadratic, quadratic)),
np.multiply(delta, linear),
)
def setup(self, delta=1.0):
self.np_y_pred = np.array([[0.9, 0.2, 0.2], [0.8, 0.4, 0.6]])
self.np_y_true = np.array([[1.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
self.batch_size = 6
self.expected_losses = self.huber_loss(
self.np_y_true, self.np_y_pred, delta
)
self.y_pred = self.np_y_pred
self.y_true = self.np_y_true
def test_config(self):
h_obj = losses.Huber(reduction="sum", name="huber")
self.assertEqual(h_obj.name, "huber")
self.assertEqual(h_obj.reduction, "sum")
def test_all_correct(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_true)
self.assertAlmostEqual(loss, 0.0, 3)
def test_unweighted(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = np.array([[1.2], [3.4]])
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(loss, actual_loss, 3)
def test_timestep_weighted(self):
self.setup()
h_obj = losses.Huber()
y_pred = self.np_y_pred.reshape((2, 3, 1))
y_true = self.np_y_true.reshape((2, 3, 1))
expected_losses = self.huber_loss(y_true, y_pred)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = h_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
actual_loss = np.multiply(expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(loss, actual_loss, 3)
def test_zero_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 0
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.0, 3)
def test_non_default_delta(self):
self.setup(delta=0.8)
h_obj = losses.Huber(delta=0.8)
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, actual_loss, 3)
def test_loss_with_non_default_dtype(self):
# Test case for GitHub issue:
# https://github.com/tensorflow/tensorflow/issues/39004
# TODO
pass
class LogCoshTest(testing.TestCase):
def setup(self):
y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
self.batch_size = 6
error = y_pred - y_true
self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_true = y_true
self.y_pred = y_pred
def test_config(self):
logcosh_obj = losses.LogCosh(reduction="sum", name="logcosh_loss")
self.assertEqual(logcosh_obj.name, "logcosh_loss")
self.assertEqual(logcosh_obj.reduction, "sum")
def test_unweighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
loss = logcosh_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 2.3
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = np.asarray([1.2, 3.4])
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
error = y_pred - y_true
expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = logcosh_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
expected_loss = (
np.sum(expected_losses * sample_weight) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_zero_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 0
loss = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, 0.0, 3)
class KLDivergenceTest(testing.TestCase):
def setup(self):
self.y_pred = np.asarray(
[0.4, 0.9, 0.12, 0.36, 0.3, 0.4], dtype=np.float32
).reshape((2, 3))
self.y_true = np.asarray(
[0.5, 0.8, 0.12, 0.7, 0.43, 0.8], dtype=np.float32
).reshape((2, 3))
self.batch_size = 2
self.expected_losses = np.multiply(
self.y_true, np.log(self.y_true / self.y_pred)
)
def test_config(self):
k_obj = losses.KLDivergence(reduction="sum", name="kld")
self.assertEqual(k_obj.name, "kld")
self.assertEqual(k_obj.reduction, "sum")
def test_unweighted(self):
self.setup()
k_obj = losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = 2.3
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = np.asarray([1.2, 3.4], dtype=np.float32).reshape((2, 1))
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray(
[1.2, 1.2, 1.2, 3.4, 3.4, 3.4], dtype=np.float32
).reshape(2, 3),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
y_true = self.y_true.reshape(2, 3, 1)
y_pred = self.y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3)
expected_losses = np.sum(
np.multiply(y_true, np.log(y_true / y_pred)), axis=-1
)
loss = k_obj(y_true, y_pred, sample_weight=sample_weight)
num_timesteps = 3
expected_loss = np.sum(expected_losses * sample_weight) / (
self.batch_size * num_timesteps
)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_zero_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
class PoissonTest(testing.TestCase):
def setup(self):
self.y_pred = np.asarray([1, 9, 2, 5, 2, 6], dtype=np.float32).reshape(
(2, 3)
)
self.y_true = np.asarray([4, 8, 12, 8, 1, 3], dtype=np.float32).reshape(
(2, 3)
)
self.batch_size = 6
self.expected_losses = self.y_pred - np.multiply(
self.y_true, np.log(self.y_pred)
)
def test_config(self):
poisson_obj = losses.Poisson(reduction="sum", name="poisson")
self.assertEqual(poisson_obj.name, "poisson")
self.assertEqual(poisson_obj.reduction, "sum")
def test_unweighted(self):
self.setup()
poisson_obj = losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
sample_weight = 2.3
loss = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = (
sample_weight * np.sum(self.expected_losses) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
self.assertAlmostEqual(loss, expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, loss_2, 3)
def test_sample_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
sample_weight = np.asarray([1.2, 3.4]).reshape((2, 1))
loss = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
)
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(loss, expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
y_true = self.y_true.reshape(2, 3, 1)
y_pred = self.y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1)
expected_losses = y_pred - np.multiply(y_true, np.log(y_pred))
loss = poisson_obj(
y_true,
y_pred,
sample_weight=np.asarray(sample_weight).reshape((2, 3)),
)
expected_loss = (
np.sum(expected_losses * sample_weight) / self.batch_size
)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_zero_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
class BinaryCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.BinaryCrossentropy(name="bce", axis=-1)
)
def test_all_correct_unweighted(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype="float32")
bce_obj = losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
# Test with logits.
logits = np.array(
[
[10.0, -10.0, -10.0],
[-10.0, 10.0, -10.0],
[-10.0, -10.0, 10.0],
]
)
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype="float32")
y_pred = np.array(
[[0.9, 0.1, 0.2], [0.3, 0.8, 0.1], [0.1, 0.2, 0.7]], dtype="float32"
)
bce_obj = losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_pred)
self.assertAllClose(loss, 0.20046903)
y_true = np.array([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.array([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
bce_obj = losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 3.98559)
# Test with logits.
y_true = np.array([[1, 0, 1], [0, 1, 1]])
logits = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(loss, 3.3333)
def test_scalar_weighted(self):
bce_obj = losses.BinaryCrossentropy()
y_true = np.array([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.array([1, 1, 1, 0], dtype="float32").reshape([2, 2])
loss = bce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 9.1668)
# Test with logits.
y_true = np.array([[1, 0, 1], [0, 1, 1]])
logits = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(loss, 7.666)
def test_sample_weighted(self):
bce_obj = losses.BinaryCrossentropy()
y_true = np.array([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.array([1, 1, 1, 0], dtype="float32").reshape([2, 2])
sample_weight = np.array([1.2, 3.4]).reshape((2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 4.7827)
# Test with logits.
y_true = np.array([[1, 0, 1], [0, 1, 1]])
logits = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
weights = np.array([4, 3])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
self.assertAlmostEqual(loss, 10.0)
def test_no_reduction(self):
y_true = np.array([[1, 0, 1], [0, 1, 1]])
logits = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True, reduction=None)
loss = bce_obj(y_true, logits)
self.assertAllClose(loss, [0.0, 6.666], atol=1e-3)
def test_label_smoothing(self):
logits = np.array([[10.0, -10.0, -10.0]])
y_true = np.array([[1, 0, 1]])
label_smoothing = 0.1
bce_obj = losses.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = bce_obj(y_true, logits)
expected_value = (10.0 + 5.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss, expected_value)
def test_shape_mismatch(self):
y_true = np.array([[0], [1], [2]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]]
)
cce_obj = losses.BinaryCrossentropy()
with self.assertRaisesRegex(ValueError, "must have the same shape"):
cce_obj(y_true, y_pred)
class CategoricalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.CategoricalCrossentropy(name="cce", axis=-1)
)
def test_all_correct_unweighted(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype="int64")
y_pred = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype="float32",
)
cce_obj = losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.0)
# Test with logits.
logits = np.array(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.3239)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0573)
def test_scalar_weighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.7449)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.1317)
def test_sample_weighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
sample_weight = np.array([[1.2], [3.4], [5.6]]).reshape((3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.0696)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.31829)
def test_no_reduction(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), loss, 3)
def test_label_smoothing(self):
logits = np.array([[100.0, -100.0, -100.0]])
y_true = np.array([[1, 0, 0]])
label_smoothing = 0.1
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss, expected_value)
def test_label_smoothing_ndarray(self):
logits = np.asarray([[100.0, -100.0, -100.0]])
y_true = np.asarray([[1, 0, 0]])
label_smoothing = 0.1
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss, expected_value)
def test_shape_mismatch(self):
y_true = np.array([[0], [1], [2]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]]
)
cce_obj = losses.CategoricalCrossentropy()
with self.assertRaisesRegex(ValueError, "must have the same shape"):
cce_obj(y_true, y_pred)
class SparseCategoricalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.SparseCategoricalCrossentropy(name="scce")
)
def test_all_correct_unweighted(self):
y_true = np.array([[0], [1], [2]], dtype="int64")
y_pred = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype="float32",
)
cce_obj = losses.SparseCategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.0, 3)
# Test with logits.
logits = np.array(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0, 3)
def test_unweighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = np.array([0, 1, 2])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.3239, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0573, 3)
def test_scalar_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = np.array([[0], [1], [2]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.7449, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.1317, 3)
def test_sample_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = np.array([[0], [1], [2]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
sample_weight = np.array([[1.2], [3.4], [5.6]]).reshape((3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.0696, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.31829, 3)
def test_no_reduction(self):
y_true = np.array([[0], [1], [2]])
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), loss, 3)
def test_ignore_class(self):
y_true = np.array([[-1, 2]])
logits = np.array([[[0.854, 0.698, 0.598], [0.088, 0.86, 0.018]]])
cce_obj = losses.SparseCategoricalCrossentropy(
from_logits=True, ignore_class=-1, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose([[0.0, 1.48012]], loss, 3)
class BinaryFocalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.BinaryFocalCrossentropy(name="bfce")
)
def test_all_correct_unweighted(self):
y_true = np.array(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
],
dtype="float32",
)
obj = losses.BinaryFocalCrossentropy(gamma=1.5)
loss = obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0, 3)
# Test with logits.
logits = np.array(
[
[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0],
]
)
obj = losses.BinaryFocalCrossentropy(gamma=2.0, from_logits=True)
loss = obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0, 3)
def test_unweighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(gamma=2.0)
loss = obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.268, 3)
# Test with logits.
y_true = np.array([[1, 1, 0], [0, 1, 0]], dtype="float32")
logits = np.array([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(gamma=3.0, from_logits=True)
loss = obj(y_true, logits)
self.assertAlmostEqual(loss, 0.799, 3)
def test_scalar_weighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(gamma=2.0)
loss = obj(y_true, y_pred, sample_weight=1.23)
self.assertAlmostEqual(loss, 0.3296, 3)
# Test with logits.
y_true = np.array([[1, 1, 0], [0, 1, 0]], dtype="float32")
logits = np.array([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(gamma=3.0, from_logits=True)
loss = obj(y_true, logits, sample_weight=3.21)
self.assertAlmostEqual(loss, 2.565, 3)
def test_sample_weighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
sample_weight = np.array([1.2, 3.4]).reshape((2, 1))
obj = losses.BinaryFocalCrossentropy(gamma=2.0)
loss = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.34415, 3)
# Test with logits.
y_true = np.array([[1, 1, 0], [0, 1, 0]], dtype="float32")
logits = np.array([[1.5, -2.7, 2.9], [-3.8, 1.2, -4.5]])
obj = losses.BinaryFocalCrossentropy(gamma=3.0, from_logits=True)
loss = obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.95977, 3)
def test_no_reduction(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([0.9, 0.8, 0.7, 0.2], dtype=np.float32).reshape(
[2, 2]
)
obj = losses.BinaryFocalCrossentropy(
gamma=2.0,
reduction=None,
)
loss = obj(y_true, y_pred)
self.assertAllClose(loss, (0.5155, 0.0205), 3)
class CategoricalFocalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.CategoricalFocalCrossentropy(name="cfce")
)
def test_all_correct_unweighted(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype="int64")
y_pred = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype="float32",
)
cce_obj = losses.CategoricalFocalCrossentropy(alpha=0.25, gamma=2.0)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.0, 3)
# Test with logits.
logits = np.array(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0, 3)
def test_unweighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.02059, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.000345, 3)
def test_scalar_weighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.047368, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.000794, 4)
def test_sample_weighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
sample_weight = np.array([[1.2], [3.4], [5.6]]).reshape((3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.06987, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.001933, 3)
def test_no_reduction(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose(
(1.5096224e-09, 2.4136547e-11, 1.0360638e-03),
loss,
3,
)
def test_label_smoothing(self):
logits = np.array([[4.9, -0.5, 2.05]])
y_true = np.array([[1, 0, 0]])
label_smoothing = 0.1
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 0.06685
self.assertAlmostEqual(loss, expected_value, 3)
| keras-core/keras_core/losses/losses_test.py/0 | {
"file_path": "keras-core/keras_core/losses/losses_test.py",
"repo_id": "keras-core",
"token_count": 26577
} | 43 |
import numpy as np
from keras_core import metrics
from keras_core import testing
class KLDivergenceTest(testing.TestCase):
def setup(self):
self.y_pred = np.asarray(
[0.4, 0.9, 0.12, 0.36, 0.3, 0.4], dtype=np.float32
).reshape((2, 3))
self.y_true = np.asarray(
[0.5, 0.8, 0.12, 0.7, 0.43, 0.8], dtype=np.float32
).reshape((2, 3))
self.batch_size = 2
self.expected_results = np.multiply(
self.y_true, np.log(self.y_true / self.y_pred)
)
def test_config(self):
k_obj = metrics.KLDivergence(name="kld", dtype="int32")
self.assertEqual(k_obj.name, "kld")
self.assertEqual(k_obj._dtype, "int32")
k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())
self.assertEqual(k_obj2.name, "kld")
self.assertEqual(k_obj2._dtype, "int32")
def test_unweighted(self):
self.setup()
k_obj = metrics.KLDivergence()
k_obj.update_state(self.y_true, self.y_pred)
result = k_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
k_obj = metrics.KLDivergence()
sample_weight = np.asarray([1.2, 3.4], dtype=np.float32).reshape((2, 1))
result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray(
[1.2, 1.2, 1.2, 3.4, 3.4, 3.4], dtype=np.float32
).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / (1.2 + 3.4)
self.assertAllClose(result, expected_result, atol=1e-3)
class PoissonTest(testing.TestCase):
def setup(self):
self.y_pred = np.asarray([1, 9, 2, 5, 2, 6], dtype=np.float32).reshape(
(2, 3)
)
self.y_true = np.asarray([4, 8, 12, 8, 1, 3], dtype=np.float32).reshape(
(2, 3)
)
self.batch_size = 6
self.expected_results = self.y_pred - np.multiply(
self.y_true, np.log(self.y_pred)
)
def test_config(self):
self.run_class_serialization_test(metrics.Poisson(name="poisson"))
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
poisson_obj.update_state(self.y_true, self.y_pred)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
sample_weight = np.asarray([1.2, 3.4], dtype=np.float32).reshape((2, 1))
result = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
sample_weight = np.asarray(
[1.2, 1.2, 1.2, 3.4, 3.4, 3.4], dtype=np.float32
).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
class BinaryCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.BinaryCrossentropy(
name="bce", dtype="int32", label_smoothing=0.2
)
)
def test_unweighted(self):
bce_obj = metrics.BinaryCrossentropy()
y_true = np.array([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.array([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
result = bce_obj(y_true, y_pred)
self.assertAllClose(result, 3.9855, atol=1e-3)
def test_unweighted_with_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
y_true = np.array([[1, 0, 1], [0, 1, 1]])
y_pred = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
result = bce_obj(y_true, y_pred)
self.assertAllClose(result, 3.333, atol=1e-3)
def test_weighted(self):
bce_obj = metrics.BinaryCrossentropy()
y_true = np.array([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.array([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = np.array([1.5, 2.0])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 3.4162, atol=1e-3)
def test_weighted_from_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
y_true = np.array([[1, 0, 1], [0, 1, 1]])
y_pred = np.array([[10.0, -10.0, 10.0], [10.0, 10.0, -10.0]])
sample_weight = np.array([2.0, 2.5])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 3.7037, atol=1e-3)
def test_label_smoothing(self):
logits = np.array(((10.0, -10.0, -10.0)))
y_true = np.array(((1, 0, 1)))
label_smoothing = 0.1
bce_obj = metrics.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
result = bce_obj(y_true, logits)
expected_value = (10.0 + 5.0 * label_smoothing) / 3.0
self.assertAllClose(expected_value, result, atol=1e-3)
class CategoricalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.CategoricalCrossentropy(
name="cce", dtype="int32", label_smoothing=0.2
)
)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
y_true = np.array([[0, 1, 0], [0, 0, 1]])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
self.assertAllClose(result, 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
self.assertAllClose(result, 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
y_true = np.array([[0, 1, 0], [0, 0, 1]])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = np.array([1.5, 2.0])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = np.array([1.5, 2.0])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAllClose(result, 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.array([[0, 1, 0], [0, 0, 1]])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
self.assertAllClose(loss, 3.667, atol=1e-3)
class SparseCategoricalCrossentropyTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.SparseCategoricalCrossentropy(name="scce", dtype="int32")
)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
y_true = np.array([1, 2])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
self.assertAllClose(result, 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
y_true = np.array([1, 2])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
self.assertAllClose(result, 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
y_true = np.array([1, 2])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = np.array([1.5, 2.0])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
y_true = np.array([1, 2])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = np.array([1.5, 2.0])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAllClose(result, 4.0012, atol=1e-3)
| keras-core/keras_core/metrics/probabilistic_metrics_test.py/0 | {
"file_path": "keras-core/keras_core/metrics/probabilistic_metrics_test.py",
"repo_id": "keras-core",
"token_count": 4484
} | 44 |
from keras_core.layers.layer import Layer
from keras_core.metrics.metric import Metric
from keras_core.optimizers.optimizer import Optimizer
from keras_core.saving import saving_lib
def map_trackable_variables(trackable, store, visited_trackables):
# If the trackable has already been saved, skip it.
if id(trackable) in visited_trackables:
return
visited_trackables.add(id(trackable))
variables = []
if isinstance(trackable, Layer):
variables = (
trackable._trainable_variables + trackable._non_trainable_variables
)
elif isinstance(trackable, Optimizer):
variables = trackable._variables
elif isinstance(trackable, Metric):
variables = trackable._variables
for v in variables:
if v.path in store:
raise ValueError(
"The model contains two variables with a duplicate path: "
f"path='{v.path}' appears at least twice. "
f"This path is used for {v} and for {store[v.path]}. "
"In order to get a variable map, make sure to use "
"unique paths/names for each variable."
)
store[v.path] = v
# Recursively save state of children trackables (layers, optimizers, etc.)
for child_attr, child_obj in saving_lib._walk_trackable(trackable):
if saving_lib._is_keras_trackable(child_obj):
map_trackable_variables(
child_obj,
store,
visited_trackables=visited_trackables,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
map_container_variables(
child_obj,
store,
visited_trackables=visited_trackables,
)
def map_container_variables(container, store, visited_trackables):
if isinstance(container, dict):
container = list(container.values())
for trackable in container:
if saving_lib._is_keras_trackable(trackable):
map_trackable_variables(
trackable,
store,
visited_trackables=visited_trackables,
)
| keras-core/keras_core/models/variable_mapping.py/0 | {
"file_path": "keras-core/keras_core/models/variable_mapping.py",
"repo_id": "keras-core",
"token_count": 959
} | 45 |
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow.python.ops.numpy_ops import np_config
from keras_core import backend
from keras_core import testing
from keras_core.backend.common import standardize_dtype
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.ops import numpy as knp
# TODO: remove reliance on this (or alternatively, turn it on by default).
np_config.enable_numpy_behavior()
class NumpyTwoInputOpsDynamicShapeTest(testing.TestCase):
def test_add(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.add(x, y).shape, (2, 3))
def test_subtract(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.subtract(x, y).shape, (2, 3))
def test_multiply(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.multiply(x, y).shape, (2, 3))
def test_matmul(self):
x = KerasTensor([None, 3, 4])
y = KerasTensor([3, None, 4, 5])
self.assertEqual(knp.matmul(x, y).shape, (3, None, 3, 5))
def test_power(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.power(x, y).shape, (2, 3))
def test_divide(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.divide(x, y).shape, (2, 3))
def test_true_divide(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.true_divide(x, y).shape, (2, 3))
def test_append(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.append(x, y).shape, (None,))
def test_arctan2(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.arctan2(x, y).shape, (2, 3))
def test_cross(self):
x1 = KerasTensor([2, 3, 3])
x2 = KerasTensor([1, 3, 2])
y = KerasTensor([None, 1, 2])
self.assertEqual(knp.cross(x1, y).shape, (2, 3, 3))
self.assertEqual(knp.cross(x2, y).shape, (None, 3))
def test_einsum(self):
x = KerasTensor([None, 3])
y = KerasTensor([3, 4])
self.assertEqual(knp.einsum("ij,jk->ik", x, y).shape, (None, 4))
self.assertEqual(knp.einsum("ij,jk->ikj", x, y).shape, (None, 4, 3))
self.assertEqual(knp.einsum("ii", x).shape, ())
self.assertEqual(knp.einsum(",ij", 5, x).shape, (None, 3))
x = KerasTensor([None, 3, 4])
y = KerasTensor([None, 4, 5])
z = KerasTensor([1, 1, 1, 9])
self.assertEqual(knp.einsum("ijk,jkl->li", x, y).shape, (5, None))
self.assertEqual(knp.einsum("ijk,jkl->lij", x, y).shape, (5, None, 3))
self.assertEqual(
knp.einsum("...,...j->...j", x, y).shape, (None, 3, 4, 5)
)
self.assertEqual(
knp.einsum("i...,...j->i...j", x, y).shape, (None, 3, 4, 5)
)
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, None, 5))
self.assertEqual(
knp.einsum("i...,...j,...k", x, y, z).shape, (1, 3, 4, None, 5, 9)
)
self.assertEqual(
knp.einsum("mij,ijk,...", x, y, z).shape, (1, 1, 1, 9, 5, None)
)
with self.assertRaises(ValueError):
x = KerasTensor([None, 3])
y = KerasTensor([3, 4])
knp.einsum("ijk,jk->ik", x, y)
def test_full_like(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.full_like(x, KerasTensor([1, 3])).shape, (None, 3))
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.full_like(x, 2).shape, (None, 3, 3))
def test_greater(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.greater(x, y).shape, (2, 3))
def test_greater_equal(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.greater_equal(x, y).shape, (2, 3))
def test_isclose(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.isclose(x, y).shape, (2, 3))
def test_less(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.less(x, y).shape, (2, 3))
def test_less_equal(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.less_equal(x, y).shape, (2, 3))
def test_linspace(self):
start = KerasTensor([None, 3, 4])
stop = KerasTensor([2, 3, 4])
self.assertEqual(
knp.linspace(start, stop, 10, axis=1).shape, (2, 10, 3, 4)
)
start = KerasTensor([None, 3])
stop = 2
self.assertEqual(
knp.linspace(start, stop, 10, axis=1).shape, (None, 10, 3)
)
def test_logical_and(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.logical_and(x, y).shape, (2, 3))
def test_logical_or(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.logical_or(x, y).shape, (2, 3))
def test_logspace(self):
start = KerasTensor([None, 3, 4])
stop = KerasTensor([2, 3, 4])
self.assertEqual(
knp.logspace(start, stop, 10, axis=1).shape, (2, 10, 3, 4)
)
start = KerasTensor([None, 3])
stop = 2
self.assertEqual(
knp.logspace(start, stop, 10, axis=1).shape, (None, 10, 3)
)
def test_maximum(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.maximum(x, y).shape, (2, 3))
def test_minimum(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.minimum(x, y).shape, (2, 3))
def test_mod(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.mod(x, y).shape, (2, 3))
def test_not_equal(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.not_equal(x, y).shape, (2, 3))
def test_outer(self):
x = KerasTensor([None, 3])
y = KerasTensor([2, None])
self.assertEqual(knp.outer(x, y).shape, (None, None))
def test_take(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.take(x, 1).shape, ())
self.assertEqual(knp.take(x, [1, 2]).shape, (2,))
self.assertEqual(
knp.take(x, [[1, 2], [1, 2]], axis=1).shape, (None, 2, 2)
)
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.take(x, 1, axis=1).shape, (None, 3))
self.assertEqual(knp.take(x, [1, 2]).shape, (2,))
self.assertEqual(
knp.take(x, [[1, 2], [1, 2]], axis=1).shape, (None, 2, 2, 3)
)
# test with negative axis
self.assertEqual(knp.take(x, 1, axis=-2).shape, (None, 3))
# test with multi-dimensional indices
x = KerasTensor([None, 3, None, 5])
indices = KerasTensor([6, 7])
self.assertEqual(knp.take(x, indices, axis=2).shape, (None, 3, 6, 7, 5))
def test_take_along_axis(self):
x = KerasTensor([None, 3])
indices = KerasTensor([1, 3])
self.assertEqual(knp.take_along_axis(x, indices, axis=0).shape, (1, 3))
self.assertEqual(
knp.take_along_axis(x, indices, axis=1).shape, (None, 3)
)
x = KerasTensor([None, 3, 3])
indices = KerasTensor([1, 3, None])
self.assertEqual(
knp.take_along_axis(x, indices, axis=1).shape, (None, 3, 3)
)
def test_tensordot(self):
x = KerasTensor([None, 3, 4])
y = KerasTensor([3, 4])
self.assertEqual(knp.tensordot(x, y, axes=1).shape, (None, 3, 4))
self.assertEqual(knp.tensordot(x, y, axes=[[0, 1], [1, 0]]).shape, (4,))
def test_vdot(self):
x = KerasTensor([None, 3])
y = KerasTensor([None, 3])
self.assertEqual(knp.vdot(x, y).shape, ())
x = KerasTensor([None, 3, 3])
y = KerasTensor([None, 3, 3])
self.assertEqual(knp.vdot(x, y).shape, ())
def test_where(self):
condition = KerasTensor([2, None, 1])
x = KerasTensor([None, 1])
y = KerasTensor([None, 3])
self.assertEqual(knp.where(condition, x, y).shape, (2, None, 3))
self.assertEqual(knp.where(condition).shape, (2, None, 1))
def test_floordiv(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.floor_divide(x, y).shape, (2, 3))
def test_xor(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.logical_xor(x, y).shape, (2, 3))
def test_shape_equal_basic_equality(self):
x = KerasTensor([3, 4]).shape
y = KerasTensor([3, 4]).shape
self.assertTrue(knp.shape_equal(x, y))
y = KerasTensor([3, 5]).shape
self.assertFalse(knp.shape_equal(x, y))
def test_shape_equal_allow_none(self):
x = KerasTensor([3, 4, None]).shape
y = KerasTensor([3, 4, 5]).shape
self.assertTrue(knp.shape_equal(x, y, allow_none=True))
self.assertFalse(knp.shape_equal(x, y, allow_none=False))
def test_shape_equal_different_shape_lengths(self):
x = KerasTensor([3, 4]).shape
y = KerasTensor([3, 4, 5]).shape
self.assertFalse(knp.shape_equal(x, y))
def test_shape_equal_ignore_axes(self):
x = KerasTensor([3, 4, 5]).shape
y = KerasTensor([3, 6, 5]).shape
self.assertTrue(knp.shape_equal(x, y, axis=1))
y = KerasTensor([3, 6, 7]).shape
self.assertTrue(knp.shape_equal(x, y, axis=(1, 2)))
self.assertFalse(knp.shape_equal(x, y, axis=1))
def test_shape_equal_only_none(self):
x = KerasTensor([None, None]).shape
y = KerasTensor([5, 6]).shape
self.assertTrue(knp.shape_equal(x, y, allow_none=True))
def test_shape_equal_axis_as_list(self):
x = KerasTensor([3, 4, 5]).shape
y = KerasTensor([3, 6, 5]).shape
self.assertTrue(knp.shape_equal(x, y, axis=[1]))
def test_shape_non_equal_with_negative_axis(self):
x = KerasTensor([3, 4, 5]).shape
y = KerasTensor([3, 4, 6]).shape
self.assertFalse(knp.shape_equal(x, y, axis=-2))
def test_shape_equal_with_negative_axis(self):
x = KerasTensor([3, 4, 5]).shape
y = KerasTensor([3, 4, 5]).shape
self.assertTrue(knp.shape_equal(x, y, axis=-1))
def test_shape_equal_zeros(self):
x = KerasTensor([0, 4]).shape
y = KerasTensor([0, 4]).shape
self.assertTrue(knp.shape_equal(x, y))
y = KerasTensor([0, 5]).shape
self.assertFalse(knp.shape_equal(x, y))
def test_broadcast_shapes_conversion_to_list(self):
shape1 = KerasTensor([1, 2]).shape
shape2 = KerasTensor([3, 1]).shape
expected_output = [3, 2]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_shape1_longer_than_shape2(self):
shape1 = KerasTensor([5, 3, 2]).shape
shape2 = KerasTensor([1, 3]).shape
with self.assertRaisesRegex(ValueError, "Cannot broadcast shape"):
knp.broadcast_shapes(shape1, shape2)
def test_broadcast_shapes_shape2_longer_than_shape1(self):
shape1 = KerasTensor([5, 3]).shape
shape2 = KerasTensor([2, 5, 3]).shape
expected_output = [2, 5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_broadcasting_shape1_is_1(self):
shape1 = KerasTensor([1, 3]).shape
shape2 = KerasTensor([5, 1]).shape
expected_output = [5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_broadcasting_shape1_is_none(self):
shape1 = KerasTensor([None, 3]).shape
shape2 = KerasTensor([5, 1]).shape
expected_output = [5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
shape1 = KerasTensor([None, 3]).shape
shape2 = KerasTensor([5, 3]).shape
expected_output = [5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_broadcasting_shape2_conditions(self):
shape1 = KerasTensor([5, 3, 2]).shape
shape2 = KerasTensor([1, 3, 2]).shape
expected_output = [5, 3, 2]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
shape1 = KerasTensor([5, 3, 2]).shape
shape2 = KerasTensor([1, None, 2]).shape
expected_output = [5, 3, 2]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
class NumpyTwoInputOpsStaticShapeTest(testing.TestCase):
def test_add(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.add(x, y).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.add(x, y)
def test_add_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3))
result = knp.add(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3))
y = KerasTensor((2, 3), sparse=True)
result = knp.add(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3), sparse=True)
result = knp.add(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertTrue(result.sparse)
def test_subtract(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.subtract(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.subtract(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.subtract(x, y)
def test_subtract_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3))
result = knp.subtract(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3))
y = KerasTensor((2, 3), sparse=True)
result = knp.subtract(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3), sparse=True)
result = knp.subtract(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertTrue(result.sparse)
def test_multiply(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.multiply(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.multiply(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.multiply(x, y)
def test_multiply_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3))
result = knp.multiply(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertTrue(result.sparse)
x = KerasTensor((2, 3))
y = KerasTensor((2, 3), sparse=True)
result = knp.multiply(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertTrue(result.sparse)
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3), sparse=True)
result = knp.multiply(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertTrue(result.sparse)
def test_matmul(self):
x = KerasTensor([2, 3])
y = KerasTensor([3, 2])
self.assertEqual(knp.matmul(x, y).shape, (2, 2))
with self.assertRaises(ValueError):
x = KerasTensor([3, 4])
y = KerasTensor([2, 3, 4])
knp.matmul(x, y)
def test_matmul_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((3, 2))
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
x = KerasTensor((2, 3))
y = KerasTensor((3, 2), sparse=True)
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((3, 2), sparse=True)
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
self.assertTrue(result.sparse)
def test_power(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.power(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.power(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.power(x, y)
def test_divide(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.divide(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.divide(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.divide(x, y)
def test_true_divide(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.true_divide(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.true_divide(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.true_divide(x, y)
def test_append(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.append(x, y).shape, (12,))
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.append(x, y, axis=0).shape, (4, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.append(x, y, axis=2)
def test_arctan2(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.arctan2(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.arctan2(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.arctan2(x, y)
def test_cross(self):
x1 = KerasTensor([2, 3, 3])
x2 = KerasTensor([1, 3, 2])
y1 = KerasTensor([2, 3, 3])
y2 = KerasTensor([2, 3, 2])
self.assertEqual(knp.cross(x1, y1).shape, (2, 3, 3))
self.assertEqual(knp.cross(x2, y2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.cross(x, y)
with self.assertRaises(ValueError):
x = KerasTensor([4, 3, 3])
y = KerasTensor([2, 3, 3])
knp.cross(x, y)
def test_einsum(self):
x = KerasTensor([2, 3])
y = KerasTensor([3, 4])
self.assertEqual(knp.einsum("ij,jk->ik", x, y).shape, (2, 4))
self.assertEqual(knp.einsum("ij,jk->ikj", x, y).shape, (2, 4, 3))
self.assertEqual(knp.einsum("ii", x).shape, ())
self.assertEqual(knp.einsum(",ij", 5, x).shape, (2, 3))
x = KerasTensor([2, 3, 4])
y = KerasTensor([3, 4, 5])
z = KerasTensor([1, 1, 1, 9])
self.assertEqual(knp.einsum("ijk,jkl->li", x, y).shape, (5, 2))
self.assertEqual(knp.einsum("ijk,jkl->lij", x, y).shape, (5, 2, 3))
self.assertEqual(knp.einsum("...,...j->...j", x, y).shape, (2, 3, 4, 5))
self.assertEqual(
knp.einsum("i...,...j->i...j", x, y).shape, (2, 3, 4, 5)
)
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, 2, 5))
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, 2, 5))
self.assertEqual(
knp.einsum("i...,...j,...k", x, y, z).shape, (1, 3, 4, 2, 5, 9)
)
self.assertEqual(
knp.einsum("mij,ijk,...", x, y, z).shape, (1, 1, 1, 9, 5, 2)
)
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([3, 4])
knp.einsum("ijk,jk->ik", x, y)
def test_full_like(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.full_like(x, 2).shape, (2, 3))
def test_greater(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.greater(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.greater(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.greater(x, y)
def test_greater_equal(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.greater_equal(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.greater_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.greater_equal(x, y)
def test_isclose(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.isclose(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.isclose(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.isclose(x, y)
def test_less(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.less(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.less(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.less(x, y)
def test_less_equal(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.less_equal(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.less_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.less_equal(x, y)
def test_linspace(self):
start = KerasTensor([2, 3, 4])
stop = KerasTensor([2, 3, 4])
self.assertEqual(knp.linspace(start, stop, 10).shape, (10, 2, 3, 4))
with self.assertRaises(ValueError):
start = KerasTensor([2, 3])
stop = KerasTensor([2, 3, 4])
knp.linspace(start, stop)
def test_logical_and(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.logical_and(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.logical_and(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.logical_and(x, y)
def test_logical_or(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.logical_or(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.logical_or(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.logical_or(x, y)
def test_logspace(self):
start = KerasTensor([2, 3, 4])
stop = KerasTensor([2, 3, 4])
self.assertEqual(knp.logspace(start, stop, 10).shape, (10, 2, 3, 4))
with self.assertRaises(ValueError):
start = KerasTensor([2, 3])
stop = KerasTensor([2, 3, 4])
knp.logspace(start, stop)
def test_maximum(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.maximum(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.maximum(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.maximum(x, y)
def test_maximum_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3))
result = knp.maximum(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3))
y = KerasTensor((2, 3), sparse=True)
result = knp.maximum(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3), sparse=True)
result = knp.maximum(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertTrue(result.sparse)
def test_minimum(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.minimum(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.minimum(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.minimum(x, y)
def test_minimum_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3))
result = knp.minimum(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3))
y = KerasTensor((2, 3), sparse=True)
result = knp.minimum(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3), sparse=True)
result = knp.minimum(x, y)
self.assertEqual(result.shape, (2, 3))
self.assertTrue(result.sparse)
def test_mod(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.mod(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.mod(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.mod(x, y)
def test_not_equal(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.not_equal(x, y).shape, (2, 3))
x = KerasTensor([2, 3])
self.assertEqual(knp.not_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.not_equal(x, y)
def test_outer(self):
x = KerasTensor([3])
y = KerasTensor([4])
self.assertEqual(knp.outer(x, y).shape, (3, 4))
x = KerasTensor([2, 3])
y = KerasTensor([4, 5])
self.assertEqual(knp.outer(x, y).shape, (6, 20))
x = KerasTensor([2, 3])
self.assertEqual(knp.outer(x, 2).shape, (6, 1))
def test_take(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.take(x, 1).shape, ())
self.assertEqual(knp.take(x, [1, 2]).shape, (2,))
self.assertEqual(knp.take(x, [[1, 2], [1, 2]], axis=1).shape, (2, 2, 2))
# test with multi-dimensional indices
x = KerasTensor([2, 3, 4, 5])
indices = KerasTensor([6, 7])
self.assertEqual(knp.take(x, indices, axis=2).shape, (2, 3, 6, 7, 5))
def test_take_along_axis(self):
x = KerasTensor([2, 3])
indices = KerasTensor([1, 3])
self.assertEqual(knp.take_along_axis(x, indices, axis=0).shape, (1, 3))
self.assertEqual(knp.take_along_axis(x, indices, axis=1).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
indices = KerasTensor([1, 4])
knp.take_along_axis(x, indices, axis=0)
def test_tensordot(self):
x = KerasTensor([2, 3, 3])
y = KerasTensor([3, 3, 4])
self.assertEqual(knp.tensordot(x, y, axes=1).shape, (2, 3, 3, 4))
self.assertEqual(knp.tensordot(x, y, axes=2).shape, (2, 4))
self.assertEqual(
knp.tensordot(x, y, axes=[[1, 2], [0, 1]]).shape, (2, 4)
)
def test_vdot(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.vdot(x, y).shape, ())
def test_where(self):
condition = KerasTensor([2, 3])
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.where(condition, x, y).shape, (2, 3))
self.assertAllEqual(knp.where(condition).shape, (2, 3))
def test_floordiv(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.floor_divide(x, y).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.floor_divide(x, y)
def test_xor(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.logical_xor(x, y).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
knp.logical_xor(x, y)
def test_digitize(self):
x = KerasTensor((2, 3))
bins = KerasTensor((3,))
self.assertEqual(knp.digitize(x, bins).shape, (2, 3))
self.assertTrue(knp.digitize(x, bins).dtype == "int32")
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
bins = KerasTensor([2, 3, 4])
knp.digitize(x, bins)
class NumpyOneInputOpsDynamicShapeTest(testing.TestCase):
def test_mean(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.mean(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.mean(x, axis=1).shape, (None, 3))
self.assertEqual(knp.mean(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_all(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.all(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.all(x, axis=1).shape, (None, 3))
self.assertEqual(knp.all(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_any(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.any(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.any(x, axis=1).shape, (None, 3))
self.assertEqual(knp.any(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_var(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.var(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.var(x, axis=1).shape, (None, 3))
self.assertEqual(knp.var(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_sum(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.sum(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.sum(x, axis=1).shape, (None, 3))
self.assertEqual(knp.sum(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_amax(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.amax(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.amax(x, axis=1).shape, (None, 3))
self.assertEqual(knp.amax(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_amin(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.amin(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.amin(x, axis=1).shape, (None, 3))
self.assertEqual(knp.amin(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_square(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.square(x).shape, (None, 3))
def test_negative(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.negative(x).shape, (None, 3))
def test_abs(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.abs(x).shape, (None, 3))
def test_absolute(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.absolute(x).shape, (None, 3))
def test_squeeze(self):
x = KerasTensor([None, 1])
self.assertEqual(knp.squeeze(x).shape, (None,))
self.assertEqual(knp.squeeze(x, axis=1).shape, (None,))
with self.assertRaises(ValueError):
x = KerasTensor([None, 1])
knp.squeeze(x, axis=0)
def test_transpose(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.transpose(x).shape, (3, None))
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.transpose(x, (2, 0, 1)).shape, (3, None, 3))
def test_arccos(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.arccos(x).shape, (None, 3))
def test_arccosh(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.arccosh(x).shape, (None, 3))
def test_arcsin(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.arcsin(x).shape, (None, 3))
def test_arcsinh(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.arcsinh(x).shape, (None, 3))
def test_arctan(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.arctan(x).shape, (None, 3))
def test_arctanh(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.arctanh(x).shape, (None, 3))
def test_argmax(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.argmax(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.argmax(x, axis=1).shape, (None, 3))
def test_argmin(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.argmin(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.argmin(x, axis=1).shape, (None, 3))
def test_argsort(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.argsort(x).shape, (None, 3))
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.argsort(x, axis=1).shape, (None, 3, 3))
def test_array(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.array(x).shape, (None, 3))
def test_average(self):
x = KerasTensor([None, 3])
weights = KerasTensor([None, 3])
self.assertEqual(knp.average(x, weights=weights).shape, ())
x = KerasTensor([None, 3])
weights = KerasTensor([3])
self.assertEqual(knp.average(x, axis=1, weights=weights).shape, (None,))
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.average(x, axis=1).shape, (None, 3))
with self.assertRaises(ValueError):
x = KerasTensor([None, 3, 3])
weights = KerasTensor([None, 4])
knp.average(x, weights=weights)
def test_broadcast_to(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.broadcast_to(x, (2, 3, 3)).shape, (2, 3, 3))
with self.assertRaises(ValueError):
x = KerasTensor([3, 3])
knp.broadcast_to(x, (2, 2, 3))
def test_ceil(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.ceil(x).shape, (None, 3))
def test_clip(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.clip(x, 1, 2).shape, (None, 3))
def test_concatenate(self):
x = KerasTensor([None, 3])
y = KerasTensor([None, 3])
self.assertEqual(
knp.concatenate(
[x, y],
).shape,
(None, 3),
)
self.assertEqual(knp.concatenate([x, y], axis=1).shape, (None, 6))
with self.assertRaises(ValueError):
self.assertEqual(knp.concatenate([x, y], axis=None).shape, (None,))
with self.assertRaises(ValueError):
x = KerasTensor([None, 3, 5])
y = KerasTensor([None, 4, 6])
knp.concatenate([x, y], axis=1)
def test_concatenate_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3))
result = knp.concatenate([x, y], axis=1)
self.assertEqual(result.shape, (2, 6))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3))
y = KerasTensor((2, 3), sparse=True)
result = knp.concatenate([x, y], axis=1)
self.assertEqual(result.shape, (2, 6))
self.assertFalse(result.sparse)
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((2, 3), sparse=True)
result = knp.concatenate([x, y], axis=1)
self.assertEqual(result.shape, (2, 6))
self.assertTrue(result.sparse)
def test_conjugate(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.conjugate(x).shape, (None, 3))
def test_conj(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.conj(x).shape, (None, 3))
def test_copy(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.copy(x).shape, (None, 3))
def test_cos(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.cos(x).shape, (None, 3))
def test_cosh(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.cosh(x).shape, (None, 3))
def test_count_nonzero(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.count_nonzero(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.count_nonzero(x, axis=1).shape, (None, 3))
def test_cumprod(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.cumprod(x).shape, (None,))
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.cumprod(x, axis=1).shape, (None, 3, 3))
def test_cumsum(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.cumsum(x).shape, (None,))
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.cumsum(x, axis=1).shape, (None, 3, 3))
def test_diag(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.diag(x).shape, (None,))
self.assertEqual(knp.diag(x, k=3).shape, (None,))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3, 4])
knp.diag(x)
def test_diagonal(self):
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.diagonal(x).shape, (3, None))
def test_dot(self):
x = KerasTensor([None, 3])
y = KerasTensor([3, 2])
z = KerasTensor([None, None, 2])
self.assertEqual(knp.dot(x, y).shape, (None, 2))
self.assertEqual(knp.dot(x, 2).shape, (None, 3))
self.assertEqual(knp.dot(x, z).shape, (None, None, 2))
x = KerasTensor([None])
y = KerasTensor([5])
self.assertEqual(knp.dot(x, y).shape, ())
def test_exp(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.exp(x).shape, (None, 3))
def test_expand_dims(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.expand_dims(x, -1).shape, (None, 3, 1))
self.assertEqual(knp.expand_dims(x, 0).shape, (1, None, 3))
self.assertEqual(knp.expand_dims(x, 1).shape, (None, 1, 3))
self.assertEqual(knp.expand_dims(x, -2).shape, (None, 1, 3))
def test_expm1(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.expm1(x).shape, (None, 3))
def test_flip(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.flip(x).shape, (None, 3))
def test_floor(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.floor(x).shape, (None, 3))
def test_get_item(self):
x = KerasTensor([None, 5, 16])
# Simple slice.
sliced = knp.get_item(x, 5)
self.assertEqual(sliced.shape, (5, 16))
# Ellipsis slice.
sliced = knp.get_item(x, np.s_[..., -1])
self.assertEqual(sliced.shape, (None, 5))
# `newaxis` slice.
sliced = knp.get_item(x, np.s_[:, np.newaxis, ...])
self.assertEqual(sliced.shape, (None, 1, 5, 16))
# Strided slice.
sliced = knp.get_item(x, np.s_[:5, 3:, 3:12:2])
self.assertEqual(sliced.shape, (None, 2, 5))
# Error states.
with self.assertRaises(ValueError):
sliced = knp.get_item(x, np.s_[:, 17, :])
with self.assertRaises(ValueError):
sliced = knp.get_item(x, np.s_[..., 5, ...])
with self.assertRaises(ValueError):
sliced = knp.get_item(x, np.s_[:, :, :, :])
def test_hstack(self):
x = KerasTensor([None, 3])
y = KerasTensor([None, 3])
self.assertEqual(knp.hstack([x, y]).shape, (None, 6))
x = KerasTensor([None, 3])
y = KerasTensor([None, None])
self.assertEqual(knp.hstack([x, y]).shape, (None, None))
def test_imag(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.imag(x).shape, (None, 3))
def test_isfinite(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.isfinite(x).shape, (None, 3))
def test_isinf(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.isinf(x).shape, (None, 3))
def test_isnan(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.isnan(x).shape, (None, 3))
def test_log(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.log(x).shape, (None, 3))
def test_log10(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.log10(x).shape, (None, 3))
def test_log1p(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.log1p(x).shape, (None, 3))
def test_log2(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.log2(x).shape, (None, 3))
def test_logaddexp(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.logaddexp(x, x).shape, (None, 3))
def test_logical_not(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.logical_not(x).shape, (None, 3))
def test_max(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.max(x).shape, ())
def test_meshgrid(self):
x = KerasTensor([None, 3])
y = KerasTensor([None, 3])
self.assertEqual(knp.meshgrid(x, y)[0].shape, (None, None))
self.assertEqual(knp.meshgrid(x, y)[1].shape, (None, None))
with self.assertRaises(ValueError):
knp.meshgrid(x, y, indexing="kk")
def test_moveaxis(self):
x = KerasTensor([None, 3, 4, 5])
self.assertEqual(knp.moveaxis(x, 0, -1).shape, (3, 4, 5, None))
self.assertEqual(knp.moveaxis(x, -1, 0).shape, (5, None, 3, 4))
self.assertEqual(
knp.moveaxis(x, [0, 1], [-1, -2]).shape, (4, 5, 3, None)
)
self.assertEqual(knp.moveaxis(x, [0, 1], [1, 0]).shape, (3, None, 4, 5))
self.assertEqual(
knp.moveaxis(x, [0, 1], [-2, -1]).shape, (4, 5, None, 3)
)
def test_ndim(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.ndim(x).shape, (2,))
def test_ones_like(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.ones_like(x).shape, (None, 3))
self.assertEqual(knp.ones_like(x).dtype, x.dtype)
def test_zeros_like(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.zeros_like(x).shape, (None, 3))
self.assertEqual(knp.zeros_like(x).dtype, x.dtype)
def test_pad(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.pad(x, 1).shape, (None, 5))
self.assertEqual(knp.pad(x, (1, 2)).shape, (None, 6))
self.assertEqual(knp.pad(x, ((1, 2), (3, 4))).shape, (None, 10))
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.pad(x, 1).shape, (None, 5, 5))
self.assertEqual(knp.pad(x, (1, 2)).shape, (None, 6, 6))
self.assertEqual(
knp.pad(x, ((1, 2), (3, 4), (5, 6))).shape, (None, 10, 14)
)
def test_prod(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.prod(x).shape, ())
self.assertEqual(knp.prod(x, axis=0).shape, (3,))
self.assertEqual(knp.prod(x, axis=1, keepdims=True).shape, (None, 1))
def test_ravel(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.ravel(x).shape, (None,))
def test_real(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.real(x).shape, (None, 3))
def test_reciprocal(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.reciprocal(x).shape, (None, 3))
def test_repeat(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.repeat(x, 2).shape, (None,))
self.assertEqual(knp.repeat(x, 3, axis=1).shape, (None, 9))
self.assertEqual(knp.repeat(x, [1, 2], axis=0).shape, (3, 3))
self.assertEqual(knp.repeat(x, 2, axis=0).shape, (None, 3))
def test_reshape(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.reshape(x, (3, 2)).shape, (3, 2))
self.assertEqual(knp.reshape(x, (3, -1)).shape, (3, None))
def test_reshape_sparse(self):
x = KerasTensor([None, 3], sparse=True)
self.assertTrue(knp.reshape(x, (3, 2)).sparse)
self.assertEqual(knp.reshape(x, (3, 2)).shape, (3, 2))
self.assertTrue(knp.reshape(x, (3, -1)).sparse)
self.assertEqual(knp.reshape(x, (3, -1)).shape, (3, None))
def test_roll(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.roll(x, 1).shape, (None, 3))
self.assertEqual(knp.roll(x, 1, axis=1).shape, (None, 3))
self.assertEqual(knp.roll(x, 1, axis=0).shape, (None, 3))
def test_round(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.round(x).shape, (None, 3))
def test_sign(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.sign(x).shape, (None, 3))
def test_sin(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.sin(x).shape, (None, 3))
def test_sinh(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.sinh(x).shape, (None, 3))
def test_size(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.size(x).shape, ())
def test_sort(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.sort(x).shape, (None, 3))
self.assertEqual(knp.sort(x, axis=1).shape, (None, 3))
self.assertEqual(knp.sort(x, axis=0).shape, (None, 3))
def test_split(self):
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.split(x, 2)[0].shape, (None, 3, 3))
self.assertEqual(knp.split(x, 3, axis=1)[0].shape, (None, 1, 3))
self.assertEqual(len(knp.split(x, [1, 3], axis=1)), 3)
self.assertEqual(knp.split(x, [1, 3], axis=1)[0].shape, (None, 1, 3))
self.assertEqual(knp.split(x, [1, 3], axis=1)[1].shape, (None, 2, 3))
self.assertEqual(knp.split(x, [1, 3], axis=1)[2].shape, (None, 0, 3))
def test_sqrt(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.sqrt(x).shape, (None, 3))
def test_stack(self):
x = KerasTensor([None, 3])
y = KerasTensor([None, 3])
self.assertEqual(knp.stack([x, y]).shape, (2, None, 3))
self.assertEqual(knp.stack([x, y], axis=-1).shape, (None, 3, 2))
def test_std(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.std(x).shape, ())
x = KerasTensor([None, 3, 3])
self.assertEqual(knp.std(x, axis=1).shape, (None, 3))
self.assertEqual(knp.std(x, axis=1, keepdims=True).shape, (None, 1, 3))
def test_swapaxes(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.swapaxes(x, 0, 1).shape, (3, None))
def test_tan(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.tan(x).shape, (None, 3))
def test_tanh(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.tanh(x).shape, (None, 3))
def test_tile(self):
x = KerasTensor([None, 3])
self.assertEqual(knp.tile(x, [2]).shape, (None, 6))
self.assertEqual(knp.tile(x, [1, 2]).shape, (None, 6))
self.assertEqual(knp.tile(x, [2, 1, 2]).shape, (2, None, 6))
def test_trace(self):
x = KerasTensor([None, 3, None, 5])
self.assertEqual(knp.trace(x).shape, (None, 5))
self.assertEqual(knp.trace(x, axis1=2, axis2=3).shape, (None, 3))
def test_tril(self):
x = KerasTensor([None, 3, None, 5])
self.assertEqual(knp.tril(x).shape, (None, 3, None, 5))
self.assertEqual(knp.tril(x, k=1).shape, (None, 3, None, 5))
self.assertEqual(knp.tril(x, k=-1).shape, (None, 3, None, 5))
def test_triu(self):
x = KerasTensor([None, 3, None, 5])
self.assertEqual(knp.triu(x).shape, (None, 3, None, 5))
self.assertEqual(knp.triu(x, k=1).shape, (None, 3, None, 5))
self.assertEqual(knp.triu(x, k=-1).shape, (None, 3, None, 5))
def test_vstack(self):
x = KerasTensor([None, 3])
y = KerasTensor([None, 3])
self.assertEqual(knp.vstack([x, y]).shape, (None, 3))
x = KerasTensor([None, 3])
y = KerasTensor([None, None])
self.assertEqual(knp.vstack([x, y]).shape, (None, 3))
class NumpyOneInputOpsStaticShapeTest(testing.TestCase):
def test_mean(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.mean(x).shape, ())
def test_all(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.all(x).shape, ())
def test_any(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.any(x).shape, ())
def test_var(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.var(x).shape, ())
def test_sum(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.sum(x).shape, ())
def test_amax(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.amax(x).shape, ())
def test_amin(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.amin(x).shape, ())
def test_square(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.square(x).shape, (2, 3))
def test_negative(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.negative(x).shape, (2, 3))
def test_abs(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.abs(x).shape, (2, 3))
def test_absolute(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.absolute(x).shape, (2, 3))
def test_squeeze(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.squeeze(x).shape, (2, 3))
x = KerasTensor([2, 1, 3])
self.assertEqual(knp.squeeze(x).shape, (2, 3))
self.assertEqual(knp.squeeze(x, axis=1).shape, (2, 3))
self.assertEqual(knp.squeeze(x, axis=-2).shape, (2, 3))
with self.assertRaises(ValueError):
knp.squeeze(x, axis=0)
def test_squeeze_sparse(self):
x = KerasTensor([2, 3], sparse=True)
self.assertTrue(knp.squeeze(x).sparse)
self.assertEqual(knp.squeeze(x).shape, (2, 3))
x = KerasTensor([2, 1, 3], sparse=True)
self.assertTrue(knp.squeeze(x).sparse)
self.assertEqual(knp.squeeze(x).shape, (2, 3))
self.assertTrue(knp.squeeze(x, axis=1).sparse)
self.assertEqual(knp.squeeze(x, axis=1).shape, (2, 3))
self.assertTrue(knp.squeeze(x, axis=-2).sparse)
self.assertEqual(knp.squeeze(x, axis=-2).shape, (2, 3))
def test_transpose(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.transpose(x).shape, (3, 2))
def test_transpose_sparse(self):
x = KerasTensor([2, 3], sparse=True)
result = knp.transpose(x)
self.assertEqual(result.shape, (3, 2))
self.assertTrue(result.sparse)
def test_arccos(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.arccos(x).shape, (2, 3))
def test_arccosh(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.arccosh(x).shape, (2, 3))
def test_arcsin(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.arcsin(x).shape, (2, 3))
def test_arcsinh(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.arcsinh(x).shape, (2, 3))
def test_arctan(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.arctan(x).shape, (2, 3))
def test_arctanh(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.arctanh(x).shape, (2, 3))
def test_argmax(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.argmax(x).shape, ())
def test_argmin(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.argmin(x).shape, ())
def test_argsort(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.argsort(x).shape, (2, 3))
self.assertEqual(knp.argsort(x, axis=None).shape, (6,))
def test_array(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.array(x).shape, (2, 3))
def test_average(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.average(x).shape, ())
def test_broadcast_to(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.broadcast_to(x, (2, 2, 3)).shape, (2, 2, 3))
with self.assertRaises(ValueError):
x = KerasTensor([3, 3])
knp.broadcast_to(x, (2, 2, 3))
def test_ceil(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.ceil(x).shape, (2, 3))
def test_clip(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.clip(x, 1, 2).shape, (2, 3))
def test_concatenate(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.concatenate([x, y]).shape, (4, 3))
self.assertEqual(knp.concatenate([x, y], axis=1).shape, (2, 6))
with self.assertRaises(ValueError):
self.assertEqual(knp.concatenate([x, y], axis=None).shape, (None,))
def test_conjugate(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.conjugate(x).shape, (2, 3))
def test_conj(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.conj(x).shape, (2, 3))
def test_copy(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.copy(x).shape, (2, 3))
def test_cos(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.cos(x).shape, (2, 3))
def test_cosh(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.cosh(x).shape, (2, 3))
def test_count_nonzero(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.count_nonzero(x).shape, ())
def test_cumprod(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.cumprod(x).shape, (6,))
def test_cumsum(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.cumsum(x).shape, (6,))
def test_diag(self):
x = KerasTensor([3])
self.assertEqual(knp.diag(x).shape, (3, 3))
self.assertEqual(knp.diag(x, k=3).shape, (6, 6))
self.assertEqual(knp.diag(x, k=-2).shape, (5, 5))
x = KerasTensor([3, 5])
self.assertEqual(knp.diag(x).shape, (3,))
self.assertEqual(knp.diag(x, k=3).shape, (2,))
self.assertEqual(knp.diag(x, k=-2).shape, (1,))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3, 4])
knp.diag(x)
def test_diagonal(self):
x = KerasTensor([3, 3])
self.assertEqual(knp.diagonal(x).shape, (3,))
self.assertEqual(knp.diagonal(x, offset=1).shape, (2,))
x = KerasTensor([3, 5, 5])
self.assertEqual(knp.diagonal(x).shape, (5, 3))
with self.assertRaises(ValueError):
x = KerasTensor([3])
knp.diagonal(x)
def test_dot(self):
x = KerasTensor([2, 3])
y = KerasTensor([3, 2])
z = KerasTensor([4, 3, 2])
self.assertEqual(knp.dot(x, y).shape, (2, 2))
self.assertEqual(knp.dot(x, 2).shape, (2, 3))
self.assertEqual(knp.dot(x, z).shape, (2, 4, 2))
x = KerasTensor([5])
y = KerasTensor([5])
self.assertEqual(knp.dot(x, y).shape, ())
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
knp.dot(x, y)
def test_exp(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.exp(x).shape, (2, 3))
def test_expand_dims(self):
x = KerasTensor([2, 3, 4])
self.assertEqual(knp.expand_dims(x, 0).shape, (1, 2, 3, 4))
self.assertEqual(knp.expand_dims(x, 1).shape, (2, 1, 3, 4))
self.assertEqual(knp.expand_dims(x, -2).shape, (2, 3, 1, 4))
def test_expand_dims_sparse(self):
x = KerasTensor([2, 3, 4], sparse=True)
self.assertTrue(knp.expand_dims(x, 0).sparse)
self.assertEqual(knp.expand_dims(x, 0).shape, (1, 2, 3, 4))
self.assertTrue(knp.expand_dims(x, 1).sparse)
self.assertEqual(knp.expand_dims(x, 1).shape, (2, 1, 3, 4))
self.assertTrue(knp.expand_dims(x, -2).sparse)
self.assertEqual(knp.expand_dims(x, -2).shape, (2, 3, 1, 4))
def test_expm1(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.expm1(x).shape, (2, 3))
def test_flip(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.flip(x).shape, (2, 3))
def test_floor(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.floor(x).shape, (2, 3))
def test_get_item(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.get_item(x, 1).shape, (3,))
x = KerasTensor([5, 3, 2])
self.assertEqual(knp.get_item(x, 3).shape, (3, 2))
x = KerasTensor(
[
2,
]
)
self.assertEqual(knp.get_item(x, 0).shape, ())
def test_hstack(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.hstack([x, y]).shape, (2, 6))
def test_imag(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.imag(x).shape, (2, 3))
def test_isfinite(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.isfinite(x).shape, (2, 3))
def test_isinf(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.isinf(x).shape, (2, 3))
def test_isnan(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.isnan(x).shape, (2, 3))
def test_log(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.log(x).shape, (2, 3))
def test_log10(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.log10(x).shape, (2, 3))
def test_log1p(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.log1p(x).shape, (2, 3))
def test_log2(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.log2(x).shape, (2, 3))
def test_logaddexp(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.logaddexp(x, x).shape, (2, 3))
def test_logical_not(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.logical_not(x).shape, (2, 3))
def test_max(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.max(x).shape, ())
def test_meshgrid(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3, 4])
z = KerasTensor([2, 3, 4, 5])
self.assertEqual(knp.meshgrid(x, y)[0].shape, (24, 6))
self.assertEqual(knp.meshgrid(x, y)[1].shape, (24, 6))
self.assertEqual(knp.meshgrid(x, y, indexing="ij")[0].shape, (6, 24))
self.assertEqual(
knp.meshgrid(x, y, z, indexing="ij")[0].shape, (6, 24, 120)
)
with self.assertRaises(ValueError):
knp.meshgrid(x, y, indexing="kk")
def test_moveaxis(self):
x = KerasTensor([2, 3, 4, 5])
self.assertEqual(knp.moveaxis(x, 0, -1).shape, (3, 4, 5, 2))
self.assertEqual(knp.moveaxis(x, -1, 0).shape, (5, 2, 3, 4))
self.assertEqual(knp.moveaxis(x, [0, 1], [-1, -2]).shape, (4, 5, 3, 2))
self.assertEqual(knp.moveaxis(x, [0, 1], [1, 0]).shape, (3, 2, 4, 5))
self.assertEqual(knp.moveaxis(x, [0, 1], [-2, -1]).shape, (4, 5, 2, 3))
def test_ndim(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.ndim(x).shape, (2,))
def test_ones_like(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.ones_like(x).shape, (2, 3))
self.assertEqual(knp.ones_like(x).dtype, x.dtype)
def test_zeros_like(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.zeros_like(x).shape, (2, 3))
self.assertEqual(knp.zeros_like(x).dtype, x.dtype)
def test_pad(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.pad(x, 1).shape, (4, 5))
self.assertEqual(knp.pad(x, (1, 2)).shape, (5, 6))
self.assertEqual(knp.pad(x, ((1, 2), (3, 4))).shape, (5, 10))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
knp.pad(x, ((1, 2), (3, 4), (5, 6)))
def test_prod(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.prod(x).shape, ())
self.assertEqual(knp.prod(x, axis=0).shape, (3,))
self.assertEqual(knp.prod(x, axis=1).shape, (2,))
def test_ravel(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.ravel(x).shape, (6,))
def test_real(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.real(x).shape, (2, 3))
def test_reciprocal(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.reciprocal(x).shape, (2, 3))
def test_repeat(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.repeat(x, 2).shape, (12,))
self.assertEqual(knp.repeat(x, 3, axis=1).shape, (2, 9))
self.assertEqual(knp.repeat(x, [1, 2], axis=0).shape, (3, 3))
def test_reshape(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.reshape(x, (3, 2)).shape, (3, 2))
self.assertEqual(knp.reshape(x, (3, -1)).shape, (3, 2))
self.assertEqual(knp.reshape(x, (6,)).shape, (6,))
self.assertEqual(knp.reshape(x, (-1,)).shape, (6,))
def test_reshape_sparse(self):
x = KerasTensor([2, 3], sparse=True)
result = knp.reshape(x, (3, 2))
self.assertEqual(result.shape, (3, 2))
self.assertTrue(result.sparse)
result = knp.reshape(x, (3, -1))
self.assertEqual(result.shape, (3, 2))
self.assertTrue(result.sparse)
result = knp.reshape(x, (6,))
self.assertEqual(result.shape, (6,))
self.assertTrue(result.sparse)
result = knp.reshape(x, (-1,))
self.assertEqual(result.shape, (6,))
self.assertTrue(result.sparse)
def test_roll(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.roll(x, 1).shape, (2, 3))
self.assertEqual(knp.roll(x, 1, axis=1).shape, (2, 3))
self.assertEqual(knp.roll(x, 1, axis=0).shape, (2, 3))
def test_round(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.round(x).shape, (2, 3))
def test_sign(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.sign(x).shape, (2, 3))
def test_sin(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.sin(x).shape, (2, 3))
def test_sinh(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.sinh(x).shape, (2, 3))
def test_size(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.size(x).shape, ())
def test_sort(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.sort(x).shape, (2, 3))
self.assertEqual(knp.sort(x, axis=1).shape, (2, 3))
self.assertEqual(knp.sort(x, axis=0).shape, (2, 3))
def test_split(self):
x = KerasTensor([2, 3])
self.assertEqual(len(knp.split(x, 2)), 2)
self.assertEqual(knp.split(x, 2)[0].shape, (1, 3))
self.assertEqual(knp.split(x, 3, axis=1)[0].shape, (2, 1))
self.assertEqual(len(knp.split(x, [1, 3], axis=1)), 3)
self.assertEqual(knp.split(x, [1, 3], axis=1)[0].shape, (2, 1))
self.assertEqual(knp.split(x, [1, 3], axis=1)[1].shape, (2, 2))
self.assertEqual(knp.split(x, [1, 3], axis=1)[2].shape, (2, 0))
with self.assertRaises(ValueError):
knp.split(x, 2, axis=1)
def test_sqrt(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.sqrt(x).shape, (2, 3))
def test_stack(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.stack([x, y]).shape, (2, 2, 3))
self.assertEqual(knp.stack([x, y], axis=-1).shape, (2, 3, 2))
with self.assertRaises(ValueError):
x = KerasTensor([2, 3])
y = KerasTensor([3, 3])
knp.stack([x, y])
def test_std(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.std(x).shape, ())
def test_swapaxes(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.swapaxes(x, 0, 1).shape, (3, 2))
def test_tan(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.tan(x).shape, (2, 3))
def test_tanh(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.tanh(x).shape, (2, 3))
def test_tile(self):
x = KerasTensor([2, 3])
self.assertEqual(knp.tile(x, [2]).shape, (2, 6))
self.assertEqual(knp.tile(x, [1, 2]).shape, (2, 6))
self.assertEqual(knp.tile(x, [2, 1, 2]).shape, (2, 2, 6))
def test_trace(self):
x = KerasTensor([2, 3, 4, 5])
self.assertEqual(knp.trace(x).shape, (4, 5))
self.assertEqual(knp.trace(x, axis1=2, axis2=3).shape, (2, 3))
def test_tril(self):
x = KerasTensor([2, 3, 4, 5])
self.assertEqual(knp.tril(x).shape, (2, 3, 4, 5))
self.assertEqual(knp.tril(x, k=1).shape, (2, 3, 4, 5))
self.assertEqual(knp.tril(x, k=-1).shape, (2, 3, 4, 5))
def test_triu(self):
x = KerasTensor([2, 3, 4, 5])
self.assertEqual(knp.triu(x).shape, (2, 3, 4, 5))
self.assertEqual(knp.triu(x, k=1).shape, (2, 3, 4, 5))
self.assertEqual(knp.triu(x, k=-1).shape, (2, 3, 4, 5))
def test_vstack(self):
x = KerasTensor([2, 3])
y = KerasTensor([2, 3])
self.assertEqual(knp.vstack([x, y]).shape, (4, 3))
class NumpyTwoInputOpsCorretnessTest(testing.TestCase, parameterized.TestCase):
def test_add(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.add(x, y), np.add(x, y))
self.assertAllClose(knp.add(x, z), np.add(x, z))
self.assertAllClose(knp.Add()(x, y), np.add(x, y))
self.assertAllClose(knp.Add()(x, z), np.add(x, z))
def test_subtract(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.subtract(x, y), np.subtract(x, y))
self.assertAllClose(knp.subtract(x, z), np.subtract(x, z))
self.assertAllClose(knp.Subtract()(x, y), np.subtract(x, y))
self.assertAllClose(knp.Subtract()(x, z), np.subtract(x, z))
def test_multiply(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.multiply(x, y), np.multiply(x, y))
self.assertAllClose(knp.multiply(x, z), np.multiply(x, z))
self.assertAllClose(knp.Multiply()(x, y), np.multiply(x, y))
self.assertAllClose(knp.Multiply()(x, z), np.multiply(x, z))
def test_matmul(self):
x = np.ones([2, 3, 4, 5])
y = np.ones([2, 3, 5, 6])
z = np.ones([5, 6])
self.assertAllClose(knp.matmul(x, y), np.matmul(x, y))
self.assertAllClose(knp.matmul(x, z), np.matmul(x, z))
self.assertAllClose(knp.Matmul()(x, y), np.matmul(x, y))
self.assertAllClose(knp.Matmul()(x, z), np.matmul(x, z))
@parameterized.product(
(
{"x_shape": (5, 3), "y_shape": (3, 4)},
{"x_shape": (2, 5, 3), "y_shape": (2, 3, 4)},
{"x_shape": (2, 2, 5, 3), "y_shape": (2, 2, 3, 4)},
),
dtype=["float16", "float32", "float64", "int32"],
x_sparse=[False, True],
y_sparse=[False, True],
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_matmul_sparse(self, dtype, x_shape, y_shape, x_sparse, y_sparse):
import tensorflow as tf
if x_sparse and y_sparse and dtype in ("float16", "int32"):
pytest.skip(f"Sparse sparse matmul unsupported for {dtype}")
rng = np.random.default_rng(0)
if x_sparse:
x = 4 * rng.standard_normal(x_shape)
x = tf.sparse.from_dense(tf.cast(tf.nn.dropout(x, 0.7), dtype))
x_np = tf.sparse.to_dense(x).numpy()
else:
x = x_np = (4 * rng.standard_normal(x_shape)).astype(dtype)
y = y_np = (4 * rng.standard_normal(y_shape)).astype(dtype)
if y_sparse:
y = 4 * rng.standard_normal(y_shape)
y = tf.sparse.from_dense(tf.cast(tf.nn.dropout(y, 0.7), dtype))
y_np = tf.sparse.to_dense(y).numpy()
else:
y = y_np = (4 * rng.standard_normal(y_shape)).astype(dtype)
atol = 0.1 if dtype == "float16" else 1e-5
self.assertAllClose(knp.matmul(x, y), np.matmul(x_np, y_np), atol=atol)
if x_sparse and y_sparse:
self.assertIsInstance(knp.matmul(x, y), tf.SparseTensor)
def test_power(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.power(x, y), np.power(x, y))
self.assertAllClose(knp.power(x, z), np.power(x, z))
self.assertAllClose(knp.Power()(x, y), np.power(x, y))
self.assertAllClose(knp.Power()(x, z), np.power(x, z))
def test_divide(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.divide(x, y), np.divide(x, y))
self.assertAllClose(knp.divide(x, z), np.divide(x, z))
self.assertAllClose(knp.Divide()(x, y), np.divide(x, y))
self.assertAllClose(knp.Divide()(x, z), np.divide(x, z))
def test_true_divide(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.true_divide(x, y), np.true_divide(x, y))
self.assertAllClose(knp.true_divide(x, z), np.true_divide(x, z))
self.assertAllClose(knp.TrueDivide()(x, y), np.true_divide(x, y))
self.assertAllClose(knp.TrueDivide()(x, z), np.true_divide(x, z))
def test_append(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]], [[4, 5, 6], [3, 2, 1]]])
self.assertAllClose(knp.append(x, y), np.append(x, y))
self.assertAllClose(knp.append(x, y, axis=1), np.append(x, y, axis=1))
self.assertAllClose(knp.append(x, z), np.append(x, z))
self.assertAllClose(knp.Append()(x, y), np.append(x, y))
self.assertAllClose(knp.Append(axis=1)(x, y), np.append(x, y, axis=1))
self.assertAllClose(knp.Append()(x, z), np.append(x, z))
def test_arctan2(self):
x = np.array([[1.0, 2.0, 3.0], [3.0, 2.0, 1.0]])
y = np.array([[4.0, 5.0, 6.0], [3.0, 2.0, 1.0]])
self.assertAllClose(knp.arctan2(x, y), np.arctan2(x, y))
self.assertAllClose(knp.Arctan2()(x, y), np.arctan2(x, y))
def test_cross(self):
x1 = np.ones([2, 1, 4, 3])
x2 = np.ones([2, 1, 4, 2])
y1 = np.ones([2, 1, 4, 3])
y2 = np.ones([1, 5, 4, 3])
y3 = np.ones([1, 5, 4, 2])
self.assertAllClose(knp.cross(x1, y1), np.cross(x1, y1))
self.assertAllClose(knp.cross(x1, y2), np.cross(x1, y2))
if backend.backend() != "torch":
# API divergence between `torch.cross` and `np.cross`
# `torch.cross` only allows dim 3, `np.cross` allows dim 2 or 3
self.assertAllClose(knp.cross(x1, y3), np.cross(x1, y3))
self.assertAllClose(knp.cross(x2, y3), np.cross(x2, y3))
self.assertAllClose(knp.Cross()(x1, y1), np.cross(x1, y1))
self.assertAllClose(knp.Cross()(x1, y2), np.cross(x1, y2))
if backend.backend() != "torch":
# API divergence between `torch.cross` and `np.cross`
# `torch.cross` only allows dim 3, `np.cross` allows dim 2 or 3
self.assertAllClose(knp.Cross()(x1, y3), np.cross(x1, y3))
self.assertAllClose(knp.Cross()(x2, y3), np.cross(x2, y3))
def test_einsum(self):
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(24).reshape([2, 4, 3]).astype("float32")
self.assertAllClose(
knp.einsum("ijk,lkj->il", x, y),
np.einsum("ijk,lkj->il", x, y),
)
self.assertAllClose(
knp.einsum("ijk,ikj->i", x, y),
np.einsum("ijk,ikj->i", x, y),
)
self.assertAllClose(
knp.einsum("i...,j...k->...ijk", x, y),
np.einsum("i..., j...k->...ijk", x, y),
)
self.assertAllClose(knp.einsum(",ijk", 5, y), np.einsum(",ijk", 5, y))
self.assertAllClose(
knp.Einsum("ijk,lkj->il")(x, y),
np.einsum("ijk,lkj->il", x, y),
)
self.assertAllClose(
knp.Einsum("ijk,ikj->i")(x, y),
np.einsum("ijk,ikj->i", x, y),
)
self.assertAllClose(
knp.Einsum("i...,j...k->...ijk")(x, y),
np.einsum("i...,j...k->...ijk", x, y),
)
self.assertAllClose(knp.Einsum(",ijk")(5, y), np.einsum(",ijk", 5, y))
def test_full_like(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.full_like(x, 2), np.full_like(x, 2))
self.assertAllClose(
knp.full_like(x, 2, dtype="float32"),
np.full_like(x, 2, dtype="float32"),
)
self.assertAllClose(
knp.full_like(x, np.ones([2, 3])),
np.full_like(x, np.ones([2, 3])),
)
self.assertAllClose(knp.FullLike()(x, 2), np.full_like(x, 2))
self.assertAllClose(
knp.FullLike()(x, 2, dtype="float32"),
np.full_like(x, 2, dtype="float32"),
)
self.assertAllClose(
knp.FullLike()(x, np.ones([2, 3])),
np.full_like(x, np.ones([2, 3])),
)
def test_greater(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.greater(x, y), np.greater(x, y))
self.assertAllClose(knp.greater(x, 2), np.greater(x, 2))
self.assertAllClose(knp.greater(2, x), np.greater(2, x))
self.assertAllClose(knp.Greater()(x, y), np.greater(x, y))
self.assertAllClose(knp.Greater()(x, 2), np.greater(x, 2))
self.assertAllClose(knp.Greater()(2, x), np.greater(2, x))
def test_greater_equal(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(
knp.greater_equal(x, y),
np.greater_equal(x, y),
)
self.assertAllClose(
knp.greater_equal(x, 2),
np.greater_equal(x, 2),
)
self.assertAllClose(
knp.greater_equal(2, x),
np.greater_equal(2, x),
)
self.assertAllClose(
knp.GreaterEqual()(x, y),
np.greater_equal(x, y),
)
self.assertAllClose(
knp.GreaterEqual()(x, 2),
np.greater_equal(x, 2),
)
self.assertAllClose(
knp.GreaterEqual()(2, x),
np.greater_equal(2, x),
)
def test_isclose(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.isclose(x, y), np.isclose(x, y))
self.assertAllClose(knp.isclose(x, 2), np.isclose(x, 2))
self.assertAllClose(knp.isclose(2, x), np.isclose(2, x))
self.assertAllClose(knp.Isclose()(x, y), np.isclose(x, y))
self.assertAllClose(knp.Isclose()(x, 2), np.isclose(x, 2))
self.assertAllClose(knp.Isclose()(2, x), np.isclose(2, x))
def test_less(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.less(x, y), np.less(x, y))
self.assertAllClose(knp.less(x, 2), np.less(x, 2))
self.assertAllClose(knp.less(2, x), np.less(2, x))
self.assertAllClose(knp.Less()(x, y), np.less(x, y))
self.assertAllClose(knp.Less()(x, 2), np.less(x, 2))
self.assertAllClose(knp.Less()(2, x), np.less(2, x))
def test_less_equal(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.less_equal(x, y), np.less_equal(x, y))
self.assertAllClose(knp.less_equal(x, 2), np.less_equal(x, 2))
self.assertAllClose(knp.less_equal(2, x), np.less_equal(2, x))
self.assertAllClose(knp.LessEqual()(x, y), np.less_equal(x, y))
self.assertAllClose(knp.LessEqual()(x, 2), np.less_equal(x, 2))
self.assertAllClose(knp.LessEqual()(2, x), np.less_equal(2, x))
def test_linspace(self):
self.assertAllClose(knp.linspace(0, 10, 5), np.linspace(0, 10, 5))
self.assertAllClose(
knp.linspace(0, 10, 5, endpoint=False),
np.linspace(0, 10, 5, endpoint=False),
)
self.assertAllClose(knp.Linspace(num=5)(0, 10), np.linspace(0, 10, 5))
self.assertAllClose(
knp.Linspace(num=5, endpoint=False)(0, 10),
np.linspace(0, 10, 5, endpoint=False),
)
start = np.zeros([2, 3, 4])
stop = np.ones([2, 3, 4])
self.assertAllClose(
knp.linspace(start, stop, 5, retstep=True)[0],
np.linspace(start, stop, 5, retstep=True)[0],
)
self.assertAllClose(
backend.convert_to_numpy(
knp.linspace(start, stop, 5, endpoint=False, retstep=True)[0]
),
np.linspace(start, stop, 5, endpoint=False, retstep=True)[0],
)
self.assertAllClose(
backend.convert_to_numpy(
knp.linspace(
start, stop, 5, endpoint=False, retstep=True, dtype="int32"
)[0]
),
np.linspace(
start, stop, 5, endpoint=False, retstep=True, dtype="int32"
)[0],
)
self.assertAllClose(
knp.Linspace(5, retstep=True)(start, stop)[0],
np.linspace(start, stop, 5, retstep=True)[0],
)
self.assertAllClose(
backend.convert_to_numpy(
knp.Linspace(5, endpoint=False, retstep=True)(start, stop)[0]
),
np.linspace(start, stop, 5, endpoint=False, retstep=True)[0],
)
self.assertAllClose(
backend.convert_to_numpy(
knp.Linspace(5, endpoint=False, retstep=True, dtype="int32")(
start, stop
)[0]
),
np.linspace(
start, stop, 5, endpoint=False, retstep=True, dtype="int32"
)[0],
)
def test_logical_and(self):
x = np.array([[True, False], [True, True]])
y = np.array([[False, False], [True, False]])
self.assertAllClose(knp.logical_and(x, y), np.logical_and(x, y))
self.assertAllClose(knp.logical_and(x, True), np.logical_and(x, True))
self.assertAllClose(knp.logical_and(True, x), np.logical_and(True, x))
self.assertAllClose(knp.LogicalAnd()(x, y), np.logical_and(x, y))
self.assertAllClose(knp.LogicalAnd()(x, True), np.logical_and(x, True))
self.assertAllClose(knp.LogicalAnd()(True, x), np.logical_and(True, x))
def test_logical_or(self):
x = np.array([[True, False], [True, True]])
y = np.array([[False, False], [True, False]])
self.assertAllClose(knp.logical_or(x, y), np.logical_or(x, y))
self.assertAllClose(knp.logical_or(x, True), np.logical_or(x, True))
self.assertAllClose(knp.logical_or(True, x), np.logical_or(True, x))
self.assertAllClose(knp.LogicalOr()(x, y), np.logical_or(x, y))
self.assertAllClose(knp.LogicalOr()(x, True), np.logical_or(x, True))
self.assertAllClose(knp.LogicalOr()(True, x), np.logical_or(True, x))
def test_logspace(self):
self.assertAllClose(knp.logspace(0, 10, 5), np.logspace(0, 10, 5))
self.assertAllClose(
knp.logspace(0, 10, 5, endpoint=False),
np.logspace(0, 10, 5, endpoint=False),
)
self.assertAllClose(knp.Logspace(num=5)(0, 10), np.logspace(0, 10, 5))
self.assertAllClose(
knp.Logspace(num=5, endpoint=False)(0, 10),
np.logspace(0, 10, 5, endpoint=False),
)
start = np.zeros([2, 3, 4])
stop = np.ones([2, 3, 4])
self.assertAllClose(
knp.logspace(start, stop, 5, base=10),
np.logspace(start, stop, 5, base=10),
)
self.assertAllClose(
knp.logspace(start, stop, 5, endpoint=False, base=10),
np.logspace(start, stop, 5, endpoint=False, base=10),
)
self.assertAllClose(
knp.Logspace(5, base=10)(start, stop),
np.logspace(start, stop, 5, base=10),
)
self.assertAllClose(
knp.Logspace(5, endpoint=False, base=10)(start, stop),
np.logspace(start, stop, 5, endpoint=False, base=10),
)
def test_maximum(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.maximum(x, y), np.maximum(x, y))
self.assertAllClose(knp.maximum(x, 1), np.maximum(x, 1))
self.assertAllClose(knp.maximum(1, x), np.maximum(1, x))
self.assertAllClose(knp.Maximum()(x, y), np.maximum(x, y))
self.assertAllClose(knp.Maximum()(x, 1), np.maximum(x, 1))
self.assertAllClose(knp.Maximum()(1, x), np.maximum(1, x))
def test_minimum(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.minimum(x, y), np.minimum(x, y))
self.assertAllClose(knp.minimum(x, 1), np.minimum(x, 1))
self.assertAllClose(knp.minimum(1, x), np.minimum(1, x))
self.assertAllClose(knp.Minimum()(x, y), np.minimum(x, y))
self.assertAllClose(knp.Minimum()(x, 1), np.minimum(x, 1))
self.assertAllClose(knp.Minimum()(1, x), np.minimum(1, x))
def test_mod(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.mod(x, y), np.mod(x, y))
self.assertAllClose(knp.mod(x, 1), np.mod(x, 1))
self.assertAllClose(knp.mod(1, x), np.mod(1, x))
self.assertAllClose(knp.Mod()(x, y), np.mod(x, y))
self.assertAllClose(knp.Mod()(x, 1), np.mod(x, 1))
self.assertAllClose(knp.Mod()(1, x), np.mod(1, x))
def test_not_equal(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.not_equal(x, y), np.not_equal(x, y))
self.assertAllClose(knp.not_equal(x, 1), np.not_equal(x, 1))
self.assertAllClose(knp.not_equal(1, x), np.not_equal(1, x))
self.assertAllClose(knp.NotEqual()(x, y), np.not_equal(x, y))
self.assertAllClose(knp.NotEqual()(x, 1), np.not_equal(x, 1))
self.assertAllClose(knp.NotEqual()(1, x), np.not_equal(1, x))
def test_outer(self):
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
self.assertAllClose(knp.outer(x, y), np.outer(x, y))
self.assertAllClose(knp.Outer()(x, y), np.outer(x, y))
x = np.ones([2, 3, 4])
y = np.ones([2, 3, 4, 5, 6])
self.assertAllClose(knp.outer(x, y), np.outer(x, y))
self.assertAllClose(knp.Outer()(x, y), np.outer(x, y))
def test_take(self):
x = np.arange(24).reshape([1, 2, 3, 4])
indices = np.array([0, 1])
self.assertAllClose(knp.take(x, indices), np.take(x, indices))
self.assertAllClose(knp.take(x, 0), np.take(x, 0))
self.assertAllClose(knp.take(x, 0, axis=1), np.take(x, 0, axis=1))
self.assertAllClose(knp.Take()(x, indices), np.take(x, indices))
self.assertAllClose(knp.Take()(x, 0), np.take(x, 0))
self.assertAllClose(knp.Take(axis=1)(x, 0), np.take(x, 0, axis=1))
# test with multi-dimensional indices
rng = np.random.default_rng(0)
x = rng.standard_normal((2, 3, 4, 5))
indices = rng.integers(0, 4, (6, 7))
self.assertAllClose(
knp.take(x, indices, axis=2),
np.take(x, indices, axis=2),
)
# test with negative axis
self.assertAllClose(
knp.take(x, indices, axis=-2),
np.take(x, indices, axis=-2),
)
@parameterized.product(
dtype=[
"float16",
"float32",
"float64",
"uint8",
"int8",
"int16",
"int32",
],
axis=[None, 0, 1, -1],
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_take_sparse(self, dtype, axis):
import tensorflow as tf
rng = np.random.default_rng(0)
x = (4 * rng.standard_normal((3, 4, 5))).astype(dtype)
indices = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=(2, 3)
)
self.assertAllClose(
knp.take(x, indices, axis=axis),
np.take(x, tf.sparse.to_dense(indices).numpy(), axis=axis),
)
def test_take_along_axis(self):
x = np.arange(24).reshape([1, 2, 3, 4])
indices = np.ones([1, 4, 1, 1], dtype=np.int32)
self.assertAllClose(
knp.take_along_axis(x, indices, axis=1),
np.take_along_axis(x, indices, axis=1),
)
self.assertAllClose(
knp.TakeAlongAxis(axis=1)(x, indices),
np.take_along_axis(x, indices, axis=1),
)
x = np.arange(12).reshape([1, 1, 3, 4])
indices = np.ones([1, 4, 1, 1], dtype=np.int32)
self.assertAllClose(
knp.take_along_axis(x, indices, axis=2),
np.take_along_axis(x, indices, axis=2),
)
self.assertAllClose(
knp.TakeAlongAxis(axis=2)(x, indices),
np.take_along_axis(x, indices, axis=2),
)
def test_tensordot(self):
x = np.arange(24).reshape([1, 2, 3, 4]).astype("float32")
y = np.arange(24).reshape([3, 4, 1, 2]).astype("float32")
self.assertAllClose(
knp.tensordot(x, y, axes=2), np.tensordot(x, y, axes=2)
)
self.assertAllClose(
knp.tensordot(x, y, axes=([0, 1], [2, 3])),
np.tensordot(x, y, axes=([0, 1], [2, 3])),
)
self.assertAllClose(
knp.Tensordot(axes=2)(x, y),
np.tensordot(x, y, axes=2),
)
self.assertAllClose(
knp.Tensordot(axes=([0, 1], [2, 3]))(x, y),
np.tensordot(x, y, axes=([0, 1], [2, 3])),
)
def test_vdot(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([4.0, 5.0, 6.0])
self.assertAllClose(knp.vdot(x, y), np.vdot(x, y))
self.assertAllClose(knp.Vdot()(x, y), np.vdot(x, y))
def test_where(self):
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
self.assertAllClose(knp.where(x > 1, x, y), np.where(x > 1, x, y))
self.assertAllClose(knp.Where()(x > 1, x, y), np.where(x > 1, x, y))
self.assertAllClose(knp.where(x > 1), np.where(x > 1))
self.assertAllClose(knp.Where()(x > 1), np.where(x > 1))
def test_digitize(self):
x = np.array([0.0, 1.0, 3.0, 1.6])
bins = np.array([0.0, 3.0, 4.5, 7.0])
self.assertAllClose(knp.digitize(x, bins), np.digitize(x, bins))
self.assertAllClose(knp.Digitize()(x, bins), np.digitize(x, bins))
self.assertTrue(
standardize_dtype(knp.digitize(x, bins).dtype) == "int32"
)
self.assertTrue(
standardize_dtype(knp.Digitize()(x, bins).dtype) == "int32"
)
x = np.array([0.2, 6.4, 3.0, 1.6])
bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
self.assertAllClose(knp.digitize(x, bins), np.digitize(x, bins))
self.assertAllClose(knp.Digitize()(x, bins), np.digitize(x, bins))
self.assertTrue(
standardize_dtype(knp.digitize(x, bins).dtype) == "int32"
)
self.assertTrue(
standardize_dtype(knp.Digitize()(x, bins).dtype) == "int32"
)
x = np.array([1, 4, 10, 15])
bins = np.array([4, 10, 14, 15])
self.assertAllClose(knp.digitize(x, bins), np.digitize(x, bins))
self.assertAllClose(knp.Digitize()(x, bins), np.digitize(x, bins))
self.assertTrue(
standardize_dtype(knp.digitize(x, bins).dtype) == "int32"
)
self.assertTrue(
standardize_dtype(knp.Digitize()(x, bins).dtype) == "int32"
)
@parameterized.named_parameters(
[
{
"testcase_name": "add",
"op_function": knp.add,
"op_class": knp.Add,
"np_op": np.add,
},
{
"testcase_name": "subtract",
"op_function": knp.subtract,
"op_class": knp.Subtract,
"np_op": np.subtract,
},
{
"testcase_name": "multiply",
"op_function": knp.multiply,
"op_class": knp.Multiply,
"np_op": np.multiply,
"mixed_inputs_produce_sparse_output": True,
},
{
"testcase_name": "minimum",
"op_function": knp.minimum,
"op_class": knp.Minimum,
"np_op": np.minimum,
},
{
"testcase_name": "maximum",
"op_function": knp.maximum,
"op_class": knp.Maximum,
"np_op": np.maximum,
},
]
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse(
self,
op_function,
op_class,
np_op,
mixed_inputs_produce_sparse_output=False,
):
import tensorflow as tf
x = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1.0, 2.0], dense_shape=(2, 3)
)
x_np = tf.sparse.to_dense(x).numpy()
y = tf.SparseTensor(
indices=[[0, 0], [1, 1]], values=[4.0, 5.0], dense_shape=(2, 3)
)
y_np = tf.sparse.to_dense(y).numpy()
z = np.random.rand(2, 3).astype("float32")
# sparse tensor and dense tensor as inputs
if mixed_inputs_produce_sparse_output:
self.assertIsInstance(op_function(x, z), tf.SparseTensor)
self.assertIsInstance(op_class()(x, z), tf.SparseTensor)
self.assertAllClose(op_function(x, z), np_op(x_np, z))
self.assertAllClose(op_class()(x, z), np_op(x_np, z))
# dense tensor and sparse tensor as inputs
if mixed_inputs_produce_sparse_output:
self.assertIsInstance(op_function(z, x), tf.SparseTensor)
self.assertIsInstance(op_class()(z, x), tf.SparseTensor)
self.assertAllClose(op_function(z, x), np_op(z, x_np))
self.assertAllClose(op_class()(z, x), np_op(z, x_np))
# sparse tensor and sparse tensor as inputs
self.assertIsInstance(op_function(x, y), tf.SparseTensor)
self.assertIsInstance(op_class()(x, y), tf.SparseTensor)
self.assertAllClose(op_function(x, y), np_op(x_np, y_np))
self.assertAllClose(op_class()(x, y), np_op(x_np, y_np))
class NumpyOneInputOpsCorrectnessTest(testing.TestCase, parameterized.TestCase):
def test_mean(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.mean(x), np.mean(x))
self.assertAllClose(knp.mean(x, axis=()), np.mean(x, axis=()))
self.assertAllClose(knp.mean(x, axis=1), np.mean(x, axis=1))
self.assertAllClose(knp.mean(x, axis=(1,)), np.mean(x, axis=(1,)))
self.assertAllClose(
knp.mean(x, axis=1, keepdims=True),
np.mean(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Mean()(x), np.mean(x))
self.assertAllClose(knp.Mean(axis=1)(x), np.mean(x, axis=1))
self.assertAllClose(
knp.Mean(axis=1, keepdims=True)(x),
np.mean(x, axis=1, keepdims=True),
)
def test_all(self):
x = np.array([[True, False, True], [True, True, True]])
self.assertAllClose(knp.all(x), np.all(x))
self.assertAllClose(knp.all(x, axis=()), np.all(x, axis=()))
self.assertAllClose(knp.all(x, axis=1), np.all(x, axis=1))
self.assertAllClose(knp.all(x, axis=(1,)), np.all(x, axis=(1,)))
self.assertAllClose(
knp.all(x, axis=1, keepdims=True),
np.all(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.All()(x), np.all(x))
self.assertAllClose(knp.All(axis=1)(x), np.all(x, axis=1))
self.assertAllClose(
knp.All(axis=1, keepdims=True)(x),
np.all(x, axis=1, keepdims=True),
)
def test_any(self):
x = np.array([[True, False, True], [True, True, True]])
self.assertAllClose(knp.any(x), np.any(x))
self.assertAllClose(knp.any(x, axis=()), np.any(x, axis=()))
self.assertAllClose(knp.any(x, axis=1), np.any(x, axis=1))
self.assertAllClose(knp.any(x, axis=(1,)), np.any(x, axis=(1,)))
self.assertAllClose(
knp.any(x, axis=1, keepdims=True),
np.any(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Any()(x), np.any(x))
self.assertAllClose(knp.Any(axis=1)(x), np.any(x, axis=1))
self.assertAllClose(
knp.Any(axis=1, keepdims=True)(x),
np.any(x, axis=1, keepdims=True),
)
def test_var(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.var(x), np.var(x))
self.assertAllClose(knp.var(x, axis=()), np.var(x, axis=()))
self.assertAllClose(knp.var(x, axis=1), np.var(x, axis=1))
self.assertAllClose(knp.var(x, axis=(1,)), np.var(x, axis=(1,)))
self.assertAllClose(
knp.var(x, axis=1, keepdims=True),
np.var(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Var()(x), np.var(x))
self.assertAllClose(knp.Var(axis=1)(x), np.var(x, axis=1))
self.assertAllClose(
knp.Var(axis=1, keepdims=True)(x),
np.var(x, axis=1, keepdims=True),
)
def test_sum(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.sum(x), np.sum(x))
self.assertAllClose(knp.sum(x, axis=()), np.sum(x, axis=()))
self.assertAllClose(knp.sum(x, axis=1), np.sum(x, axis=1))
self.assertAllClose(knp.sum(x, axis=(1,)), np.sum(x, axis=(1,)))
self.assertAllClose(
knp.sum(x, axis=1, keepdims=True),
np.sum(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Sum()(x), np.sum(x))
self.assertAllClose(knp.Sum(axis=1)(x), np.sum(x, axis=1))
self.assertAllClose(
knp.Sum(axis=1, keepdims=True)(x),
np.sum(x, axis=1, keepdims=True),
)
def test_amax(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.amax(x), np.amax(x))
self.assertAllClose(knp.amax(x, axis=()), np.amax(x, axis=()))
self.assertAllClose(knp.amax(x, axis=1), np.amax(x, axis=1))
self.assertAllClose(knp.amax(x, axis=(1,)), np.amax(x, axis=(1,)))
self.assertAllClose(
knp.amax(x, axis=1, keepdims=True),
np.amax(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Amax()(x), np.amax(x))
self.assertAllClose(knp.Amax(axis=1)(x), np.amax(x, axis=1))
self.assertAllClose(
knp.Amax(axis=1, keepdims=True)(x),
np.amax(x, axis=1, keepdims=True),
)
def test_amin(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.amin(x), np.amin(x))
self.assertAllClose(knp.amin(x, axis=()), np.amin(x, axis=()))
self.assertAllClose(knp.amin(x, axis=1), np.amin(x, axis=1))
self.assertAllClose(knp.amin(x, axis=(1,)), np.amin(x, axis=(1,)))
self.assertAllClose(
knp.amin(x, axis=1, keepdims=True),
np.amin(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Amin()(x), np.amin(x))
self.assertAllClose(knp.Amin(axis=1)(x), np.amin(x, axis=1))
self.assertAllClose(
knp.Amin(axis=1, keepdims=True)(x),
np.amin(x, axis=1, keepdims=True),
)
def test_square(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.square(x), np.square(x))
self.assertAllClose(knp.Square()(x), np.square(x))
def test_negative(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.negative(x), np.negative(x))
self.assertAllClose(knp.Negative()(x), np.negative(x))
def test_abs(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.abs(x), np.abs(x))
self.assertAllClose(knp.Abs()(x), np.abs(x))
def test_absolute(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.absolute(x), np.absolute(x))
self.assertAllClose(knp.Absolute()(x), np.absolute(x))
def test_squeeze(self):
x = np.ones([1, 3, 1, 5])
self.assertAllClose(knp.squeeze(x), np.squeeze(x))
self.assertAllClose(knp.squeeze(x, axis=0), np.squeeze(x, axis=0))
self.assertAllClose(knp.Squeeze()(x), np.squeeze(x))
self.assertAllClose(knp.Squeeze(axis=0)(x), np.squeeze(x, axis=0))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_squeeze_sparse(self):
import tensorflow as tf
x = tf.SparseTensor(
indices=[[0, 0, 0, 0], [0, 2, 0, 4]],
values=[1, 2],
dense_shape=(1, 3, 1, 5),
)
x_np = tf.sparse.to_dense(x).numpy()
self.assertAllClose(knp.squeeze(x), np.squeeze(x_np))
self.assertAllClose(knp.squeeze(x, axis=0), np.squeeze(x_np, axis=0))
self.assertAllClose(knp.Squeeze()(x), np.squeeze(x_np))
self.assertAllClose(knp.Squeeze(axis=0)(x), np.squeeze(x_np, axis=0))
def test_transpose(self):
x = np.ones([1, 2, 3, 4, 5])
self.assertAllClose(knp.transpose(x), np.transpose(x))
self.assertAllClose(
knp.transpose(x, axes=(1, 0, 3, 2, 4)),
np.transpose(x, axes=(1, 0, 3, 2, 4)),
)
self.assertAllClose(knp.Transpose()(x), np.transpose(x))
self.assertAllClose(
knp.Transpose(axes=(1, 0, 3, 2, 4))(x),
np.transpose(x, axes=(1, 0, 3, 2, 4)),
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_transpose_sparse(self):
import tensorflow as tf
x = tf.SparseTensor(
indices=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
values=[1, 2],
dense_shape=(1, 2, 3, 4, 5),
)
x_np = tf.sparse.to_dense(x).numpy()
self.assertIsInstance(knp.transpose(x), tf.SparseTensor)
self.assertAllClose(knp.transpose(x), np.transpose(x_np))
self.assertIsInstance(
knp.transpose(x, axes=(1, 0, 3, 2, 4)), tf.SparseTensor
)
self.assertAllClose(
knp.transpose(x, axes=(1, 0, 3, 2, 4)),
np.transpose(x_np, axes=(1, 0, 3, 2, 4)),
)
self.assertIsInstance(knp.Transpose()(x), tf.SparseTensor)
self.assertAllClose(knp.Transpose()(x), np.transpose(x_np))
self.assertIsInstance(
knp.Transpose(axes=(1, 0, 3, 2, 4))(x), tf.SparseTensor
)
self.assertAllClose(
knp.Transpose(axes=(1, 0, 3, 2, 4))(x),
np.transpose(x_np, axes=(1, 0, 3, 2, 4)),
)
def test_arccos(self):
x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]])
self.assertAllClose(knp.arccos(x), np.arccos(x))
self.assertAllClose(knp.Arccos()(x), np.arccos(x))
def test_arccosh(self):
x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]])
self.assertAllClose(knp.arccosh(x), np.arccosh(x))
self.assertAllClose(knp.Arccosh()(x), np.arccosh(x))
def test_arcsin(self):
x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]])
self.assertAllClose(knp.arcsin(x), np.arcsin(x))
self.assertAllClose(knp.Arcsin()(x), np.arcsin(x))
def test_arcsinh(self):
x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]])
self.assertAllClose(knp.arcsinh(x), np.arcsinh(x))
self.assertAllClose(knp.Arcsinh()(x), np.arcsinh(x))
def test_arctan(self):
x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]])
self.assertAllClose(knp.arctan(x), np.arctan(x))
self.assertAllClose(knp.Arctan()(x), np.arctan(x))
def test_arctanh(self):
x = np.array([[1, 0.5, -0.7], [0.9, 0.2, -1]])
self.assertAllClose(knp.arctanh(x), np.arctanh(x))
self.assertAllClose(knp.Arctanh()(x), np.arctanh(x))
def test_argmax(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.argmax(x), np.argmax(x))
self.assertAllClose(knp.argmax(x, axis=1), np.argmax(x, axis=1))
self.assertAllClose(knp.Argmax()(x), np.argmax(x))
self.assertAllClose(knp.Argmax(axis=1)(x), np.argmax(x, axis=1))
def test_argmin(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.argmin(x), np.argmin(x))
self.assertAllClose(knp.argmin(x, axis=1), np.argmin(x, axis=1))
self.assertAllClose(knp.Argmin()(x), np.argmin(x))
self.assertAllClose(knp.Argmin(axis=1)(x), np.argmin(x, axis=1))
def test_argsort(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.argsort(x), np.argsort(x))
self.assertAllClose(knp.argsort(x, axis=1), np.argsort(x, axis=1))
self.assertAllClose(
knp.argsort(x, axis=None),
np.argsort(x, axis=None),
)
self.assertAllClose(knp.Argsort()(x), np.argsort(x))
self.assertAllClose(knp.Argsort(axis=1)(x), np.argsort(x, axis=1))
self.assertAllClose(
knp.Argsort(axis=None)(x),
np.argsort(x, axis=None),
)
def test_array(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.array(x), np.array(x))
self.assertAllClose(knp.Array()(x), np.array(x))
self.assertTrue(backend.is_tensor(knp.array(x)))
self.assertTrue(backend.is_tensor(knp.Array()(x)))
def test_average(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
weights = np.ones([2, 3])
weights_1d = np.ones([3])
self.assertAllClose(knp.average(x), np.average(x))
self.assertAllClose(knp.average(x, axis=()), np.average(x, axis=()))
self.assertAllClose(knp.average(x, axis=1), np.average(x, axis=1))
self.assertAllClose(knp.average(x, axis=(1,)), np.average(x, axis=(1,)))
self.assertAllClose(
knp.average(x, axis=1, weights=weights),
np.average(x, axis=1, weights=weights),
)
self.assertAllClose(
knp.average(x, axis=1, weights=weights_1d),
np.average(x, axis=1, weights=weights_1d),
)
self.assertAllClose(knp.Average()(x), np.average(x))
self.assertAllClose(knp.Average(axis=1)(x), np.average(x, axis=1))
self.assertAllClose(
knp.Average(axis=1)(x, weights=weights),
np.average(x, axis=1, weights=weights),
)
self.assertAllClose(
knp.Average(axis=1)(x, weights=weights_1d),
np.average(x, axis=1, weights=weights_1d),
)
def test_bincount(self):
x = np.array([1, 1, 2, 3, 2, 4, 4, 5])
weights = np.array([0, 0, 3, 2, 1, 1, 4, 2])
minlength = 3
self.assertAllClose(
knp.bincount(x, weights=weights, minlength=minlength),
np.bincount(x, weights=weights, minlength=minlength),
)
self.assertAllClose(
knp.Bincount(weights=weights, minlength=minlength)(x),
np.bincount(x, weights=weights, minlength=minlength),
)
x = np.array([[1, 1, 2, 3, 2, 4, 4, 5]])
weights = np.array([[0, 0, 3, 2, 1, 1, 4, 2]])
expected_output = np.array([[0, 0, 4, 2, 5, 2]])
self.assertAllClose(
knp.bincount(x, weights=weights, minlength=minlength),
expected_output,
)
self.assertAllClose(
knp.Bincount(weights=weights, minlength=minlength)(x),
expected_output,
)
# test with weights=None
expected_output = np.array([[0, 2, 2, 1, 2, 1]])
self.assertAllClose(
knp.Bincount(weights=None, minlength=minlength)(x),
expected_output,
)
def test_broadcast_to(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(
knp.broadcast_to(x, [2, 2, 3]),
np.broadcast_to(x, [2, 2, 3]),
)
self.assertAllClose(
knp.BroadcastTo([2, 2, 3])(x),
np.broadcast_to(x, [2, 2, 3]),
)
def test_ceil(self):
x = np.array([[1.2, 2.1, -2.5], [2.4, -11.9, -5.5]])
self.assertAllClose(knp.ceil(x), np.ceil(x))
self.assertAllClose(knp.Ceil()(x), np.ceil(x))
def test_clip(self):
x = np.array([[1.2, 2.1, -2.5], [2.4, -11.9, -5.5]])
self.assertAllClose(knp.clip(x, -2, 2), np.clip(x, -2, 2))
self.assertAllClose(knp.clip(x, -2, 2), np.clip(x, -2, 2))
self.assertAllClose(knp.Clip(0, 1)(x), np.clip(x, 0, 1))
self.assertAllClose(knp.Clip(0, 1)(x), np.clip(x, 0, 1))
def test_concatenate(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [6, 5, 4]])
z = np.array([[7, 8, 9], [9, 8, 7]])
self.assertAllClose(
knp.concatenate([x, y], axis=0),
np.concatenate([x, y], axis=0),
)
self.assertAllClose(
knp.concatenate([x, y, z], axis=0),
np.concatenate([x, y, z], axis=0),
)
self.assertAllClose(
knp.concatenate([x, y], axis=1),
np.concatenate([x, y], axis=1),
)
self.assertAllClose(
knp.Concatenate(axis=0)([x, y]),
np.concatenate([x, y], axis=0),
)
self.assertAllClose(
knp.Concatenate(axis=0)([x, y, z]),
np.concatenate([x, y, z], axis=0),
)
self.assertAllClose(
knp.Concatenate(axis=1)([x, y]),
np.concatenate([x, y], axis=1),
)
@parameterized.named_parameters(
[
{"testcase_name": "axis_0", "axis": 0},
{"testcase_name": "axis_1", "axis": 1},
]
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_concatenate_sparse(self, axis):
import tensorflow as tf
x = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1.0, 2.0], dense_shape=(2, 3)
)
x_np = tf.sparse.to_dense(x).numpy()
y = tf.SparseTensor(
indices=[[0, 0], [1, 1]], values=[4.0, 5.0], dense_shape=(2, 3)
)
y_np = tf.sparse.to_dense(y).numpy()
z = np.random.rand(2, 3).astype("float32")
self.assertAllClose(
knp.concatenate([x, z], axis=axis),
np.concatenate([x_np, z], axis=axis),
)
self.assertAllClose(
knp.concatenate([z, x], axis=axis),
np.concatenate([z, x_np], axis=axis),
)
self.assertAllClose(
knp.concatenate([x, y], axis=axis),
np.concatenate([x_np, y_np], axis=axis),
)
self.assertAllClose(
knp.Concatenate(axis=axis)([x, z]),
np.concatenate([x_np, z], axis=axis),
)
self.assertAllClose(
knp.Concatenate(axis=axis)([z, x]),
np.concatenate([z, x_np], axis=axis),
)
self.assertAllClose(
knp.Concatenate(axis=axis)([x, y]),
np.concatenate([x_np, y_np], axis=axis),
)
self.assertIsInstance(
knp.concatenate([x, y], axis=axis), tf.SparseTensor
)
self.assertIsInstance(
knp.Concatenate(axis=axis)([x, y]), tf.SparseTensor
)
def test_conjugate(self):
x = np.array([[1 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]])
self.assertAllClose(knp.conjugate(x), np.conjugate(x))
self.assertAllClose(knp.Conjugate()(x), np.conjugate(x))
def test_conj(self):
x = np.array([[1 + 2j, 2 + 3j], [3 + 4j, 4 + 5j]])
self.assertAllClose(knp.conj(x), np.conj(x))
self.assertAllClose(knp.Conj()(x), np.conj(x))
def test_copy(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.copy(x), np.copy(x))
self.assertAllClose(knp.Copy()(x), np.copy(x))
def test_cos(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.cos(x), np.cos(x))
self.assertAllClose(knp.Cos()(x), np.cos(x))
def test_cosh(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.cosh(x), np.cosh(x))
self.assertAllClose(knp.Cosh()(x), np.cosh(x))
def test_count_nonzero(self):
x = np.array([[0, 2, 3], [3, 2, 0]])
self.assertAllClose(knp.count_nonzero(x), np.count_nonzero(x))
self.assertAllClose(
knp.count_nonzero(x, axis=()), np.count_nonzero(x, axis=())
)
self.assertAllClose(
knp.count_nonzero(x, axis=1),
np.count_nonzero(x, axis=1),
)
self.assertAllClose(
knp.count_nonzero(x, axis=(1,)),
np.count_nonzero(x, axis=(1,)),
)
self.assertAllClose(
knp.CountNonzero()(x),
np.count_nonzero(x),
)
self.assertAllClose(
knp.CountNonzero(axis=1)(x),
np.count_nonzero(x, axis=1),
)
def test_cumprod(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.cumprod(x), np.cumprod(x))
self.assertAllClose(
knp.cumprod(x, axis=0),
np.cumprod(x, axis=0),
)
self.assertAllClose(
knp.cumprod(x, axis=None),
np.cumprod(x, axis=None),
)
self.assertAllClose(knp.Cumprod()(x), np.cumprod(x))
self.assertAllClose(
knp.Cumprod(axis=0)(x),
np.cumprod(x, axis=0),
)
self.assertAllClose(
knp.Cumprod(axis=None)(x),
np.cumprod(x, axis=None),
)
def test_cumsum(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.cumsum(x), np.cumsum(x))
self.assertAllClose(
knp.cumsum(x, axis=0),
np.cumsum(x, axis=0),
)
self.assertAllClose(
knp.cumsum(x, axis=1),
np.cumsum(x, axis=1),
)
self.assertAllClose(knp.Cumsum()(x), np.cumsum(x))
self.assertAllClose(
knp.Cumsum(axis=0)(x),
np.cumsum(x, axis=0),
)
self.assertAllClose(
knp.Cumsum(axis=1)(x),
np.cumsum(x, axis=1),
)
def test_diag(self):
x = np.array([1, 2, 3])
self.assertAllClose(knp.diag(x), np.diag(x))
self.assertAllClose(knp.diag(x, k=1), np.diag(x, k=1))
self.assertAllClose(knp.diag(x, k=-1), np.diag(x, k=-1))
self.assertAllClose(knp.Diag()(x), np.diag(x))
self.assertAllClose(knp.Diag(k=1)(x), np.diag(x, k=1))
self.assertAllClose(knp.Diag(k=-1)(x), np.diag(x, k=-1))
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.diag(x), np.diag(x))
self.assertAllClose(knp.diag(x, k=1), np.diag(x, k=1))
self.assertAllClose(knp.diag(x, k=-1), np.diag(x, k=-1))
self.assertAllClose(knp.Diag()(x), np.diag(x))
self.assertAllClose(knp.Diag(k=1)(x), np.diag(x, k=1))
self.assertAllClose(knp.Diag(k=-1)(x), np.diag(x, k=-1))
def test_diagonal(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.diagonal(x), np.diagonal(x))
self.assertAllClose(
knp.diagonal(x, offset=1),
np.diagonal(x, offset=1),
)
self.assertAllClose(
knp.diagonal(x, offset=-1), np.diagonal(x, offset=-1)
)
self.assertAllClose(knp.Diagonal()(x), np.diagonal(x))
self.assertAllClose(knp.Diagonal(offset=1)(x), np.diagonal(x, offset=1))
self.assertAllClose(
knp.Diagonal(offset=-1)(x), np.diagonal(x, offset=-1)
)
x = np.ones([2, 3, 4, 5])
self.assertAllClose(knp.diagonal(x), np.diagonal(x))
self.assertAllClose(
knp.diagonal(x, offset=1, axis1=2, axis2=3),
np.diagonal(x, offset=1, axis1=2, axis2=3),
)
self.assertAllClose(
knp.diagonal(x, offset=-1, axis1=2, axis2=3),
np.diagonal(x, offset=-1, axis1=2, axis2=3),
)
def test_dot(self):
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(12).reshape([4, 3]).astype("float32")
z = np.arange(4).astype("float32")
self.assertAllClose(knp.dot(x, y), np.dot(x, y))
self.assertAllClose(knp.dot(x, z), np.dot(x, z))
self.assertAllClose(knp.dot(x, 2), np.dot(x, 2))
self.assertAllClose(knp.Dot()(x, y), np.dot(x, y))
self.assertAllClose(knp.Dot()(x, z), np.dot(x, z))
self.assertAllClose(knp.Dot()(x, 2), np.dot(x, 2))
def test_exp(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.exp(x), np.exp(x))
self.assertAllClose(knp.Exp()(x), np.exp(x))
def test_expand_dims(self):
x = np.ones([2, 3, 4])
self.assertAllClose(knp.expand_dims(x, 0), np.expand_dims(x, 0))
self.assertAllClose(knp.expand_dims(x, 1), np.expand_dims(x, 1))
self.assertAllClose(knp.expand_dims(x, -2), np.expand_dims(x, -2))
self.assertAllClose(knp.ExpandDims(0)(x), np.expand_dims(x, 0))
self.assertAllClose(knp.ExpandDims(1)(x), np.expand_dims(x, 1))
self.assertAllClose(knp.ExpandDims(-2)(x), np.expand_dims(x, -2))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_expand_dims_sparse(self):
import tensorflow as tf
x = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=(2, 3),
)
x_np = tf.sparse.to_dense(x).numpy()
self.assertAllClose(knp.expand_dims(x, 0), np.expand_dims(x_np, 0))
self.assertAllClose(knp.expand_dims(x, 1), np.expand_dims(x_np, 1))
self.assertAllClose(knp.expand_dims(x, -2), np.expand_dims(x_np, -2))
self.assertAllClose(knp.ExpandDims(0)(x), np.expand_dims(x_np, 0))
self.assertAllClose(knp.ExpandDims(1)(x), np.expand_dims(x_np, 1))
self.assertAllClose(knp.ExpandDims(-2)(x), np.expand_dims(x_np, -2))
def test_expm1(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.expm1(x), np.expm1(x))
self.assertAllClose(knp.Expm1()(x), np.expm1(x))
def test_flip(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.flip(x), np.flip(x))
self.assertAllClose(knp.flip(x, 0), np.flip(x, 0))
self.assertAllClose(knp.flip(x, 1), np.flip(x, 1))
self.assertAllClose(knp.Flip()(x), np.flip(x))
self.assertAllClose(knp.Flip(0)(x), np.flip(x, 0))
self.assertAllClose(knp.Flip(1)(x), np.flip(x, 1))
def test_floor(self):
x = np.array([[1.1, 2.2, -3.3], [3.3, 2.2, -1.1]])
self.assertAllClose(knp.floor(x), np.floor(x))
self.assertAllClose(knp.Floor()(x), np.floor(x))
def test_hstack(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [6, 5, 4]])
self.assertAllClose(knp.hstack([x, y]), np.hstack([x, y]))
self.assertAllClose(knp.Hstack()([x, y]), np.hstack([x, y]))
x = np.ones([2, 3, 4])
y = np.ones([2, 5, 4])
self.assertAllClose(knp.hstack([x, y]), np.hstack([x, y]))
self.assertAllClose(knp.Hstack()([x, y]), np.hstack([x, y]))
def test_imag(self):
x = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [3 + 3j, 2 + 2j, 1 + 1j]])
self.assertAllClose(knp.imag(x), np.imag(x))
self.assertAllClose(knp.Imag()(x), np.imag(x))
def test_isfinite(self):
x = np.array([[1, 2, np.inf], [np.nan, np.nan, np.nan]])
self.assertAllClose(knp.isfinite(x), np.isfinite(x))
self.assertAllClose(knp.Isfinite()(x), np.isfinite(x))
def test_isinf(self):
x = np.array([[1, 2, np.inf], [np.nan, np.nan, np.nan]])
self.assertAllClose(knp.isinf(x), np.isinf(x))
self.assertAllClose(knp.Isinf()(x), np.isinf(x))
def test_isnan(self):
x = np.array([[1, 2, np.inf], [np.nan, np.nan, np.nan]])
self.assertAllClose(knp.isnan(x), np.isnan(x))
self.assertAllClose(knp.Isnan()(x), np.isnan(x))
def test_log(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.log(x), np.log(x))
self.assertAllClose(knp.Log()(x), np.log(x))
def test_log10(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.log10(x), np.log10(x))
self.assertAllClose(knp.Log10()(x), np.log10(x))
def test_log1p(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.log1p(x), np.log1p(x))
self.assertAllClose(knp.Log1p()(x), np.log1p(x))
def test_log2(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.log2(x), np.log2(x))
self.assertAllClose(knp.Log2()(x), np.log2(x))
def test_logaddexp(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.logaddexp(x, y), np.logaddexp(x, y))
self.assertAllClose(knp.Logaddexp()(x, y), np.logaddexp(x, y))
def test_logical_not(self):
x = np.array([[True, False], [False, True]])
self.assertAllClose(knp.logical_not(x), np.logical_not(x))
self.assertAllClose(knp.LogicalNot()(x), np.logical_not(x))
def test_max(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.max(x), np.max(x))
self.assertAllClose(knp.Max()(x), np.max(x))
self.assertAllClose(knp.max(x, 0), np.max(x, 0))
self.assertAllClose(knp.Max(0)(x), np.max(x, 0))
self.assertAllClose(knp.max(x, 1), np.max(x, 1))
self.assertAllClose(knp.Max(1)(x), np.max(x, 1))
# test max with initial
self.assertAllClose(knp.max(x, initial=4), 4)
# test empty tensor
x = np.array([[]])
self.assertAllClose(knp.max(x, initial=1), np.max(x, initial=1))
self.assertAllClose(
knp.max(x, initial=1, keepdims=True),
np.max(x, initial=1, keepdims=True),
)
def test_min(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.min(x), np.min(x))
self.assertAllClose(knp.Min()(x), np.min(x))
self.assertAllClose(knp.min(x, 0), np.min(x, 0))
self.assertAllClose(knp.Min(0)(x), np.min(x, 0))
self.assertAllClose(knp.min(x, 1), np.min(x, 1))
self.assertAllClose(knp.Min(1)(x), np.min(x, 1))
# test min with initial
self.assertAllClose(knp.min(x, initial=0), 0)
# test empty tensor
x = np.array([[]])
self.assertAllClose(knp.min(x, initial=1), np.min(x, initial=1))
self.assertAllClose(
knp.min(x, initial=1, keepdims=True),
np.min(x, initial=1, keepdims=True),
)
def test_meshgrid(self):
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
z = np.array([7, 8, 9])
self.assertAllClose(knp.meshgrid(x, y), np.meshgrid(x, y))
self.assertAllClose(knp.meshgrid(x, z), np.meshgrid(x, z))
self.assertAllClose(
knp.meshgrid(x, y, z, indexing="ij"),
np.meshgrid(x, y, z, indexing="ij"),
)
self.assertAllClose(knp.Meshgrid()(x, y), np.meshgrid(x, y))
self.assertAllClose(knp.Meshgrid()(x, z), np.meshgrid(x, z))
self.assertAllClose(
knp.Meshgrid(indexing="ij")(x, y, z),
np.meshgrid(x, y, z, indexing="ij"),
)
if backend.backend() == "tensorflow":
# Arguments to `jax.numpy.meshgrid` must be 1D now.
x = np.ones([1, 2, 3])
y = np.ones([4, 5, 6, 6])
z = np.ones([7, 8])
self.assertAllClose(knp.meshgrid(x, y), np.meshgrid(x, y))
self.assertAllClose(knp.meshgrid(x, z), np.meshgrid(x, z))
self.assertAllClose(
knp.meshgrid(x, y, z, indexing="ij"),
np.meshgrid(x, y, z, indexing="ij"),
)
self.assertAllClose(knp.Meshgrid()(x, y), np.meshgrid(x, y))
self.assertAllClose(knp.Meshgrid()(x, z), np.meshgrid(x, z))
self.assertAllClose(
knp.Meshgrid(indexing="ij")(x, y, z),
np.meshgrid(x, y, z, indexing="ij"),
)
def test_moveaxis(self):
x = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
self.assertAllClose(knp.moveaxis(x, 0, -1), np.moveaxis(x, 0, -1))
self.assertAllClose(knp.moveaxis(x, -1, 0), np.moveaxis(x, -1, 0))
self.assertAllClose(
knp.moveaxis(x, (0, 1), (1, 0)),
np.moveaxis(x, (0, 1), (1, 0)),
)
self.assertAllClose(
knp.moveaxis(x, [0, 1, 2], [2, 0, 1]),
np.moveaxis(x, [0, 1, 2], [2, 0, 1]),
)
self.assertAllClose(knp.Moveaxis(-1, 0)(x), np.moveaxis(x, -1, 0))
self.assertAllClose(
knp.Moveaxis((0, 1), (1, 0))(x),
np.moveaxis(x, (0, 1), (1, 0)),
)
self.assertAllClose(
knp.Moveaxis([0, 1, 2], [2, 0, 1])(x),
np.moveaxis(x, [0, 1, 2], [2, 0, 1]),
)
def test_ndim(self):
x = np.array([1, 2, 3])
self.assertEqual(knp.ndim(x), np.ndim(x))
self.assertEqual(knp.Ndim()(x), np.ndim(x))
def test_nonzero(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.nonzero(x), np.nonzero(x))
self.assertAllClose(knp.Nonzero()(x), np.nonzero(x))
def test_ones_like(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.ones_like(x), np.ones_like(x))
self.assertAllClose(knp.OnesLike()(x), np.ones_like(x))
@parameterized.product(
dtype=[
"float16",
"float32",
"float64",
"uint8",
"int8",
"int16",
"int32",
],
mode=["constant", "reflect", "symmetric"],
)
def test_pad(self, dtype, mode):
# 2D
x = np.ones([2, 3], dtype=dtype)
pad_width = ((1, 1), (1, 1))
self.assertAllClose(
knp.pad(x, pad_width, mode=mode), np.pad(x, pad_width, mode=mode)
)
self.assertAllClose(
knp.Pad(pad_width, mode=mode)(x), np.pad(x, pad_width, mode=mode)
)
# 5D (pad last 3D)
x = np.ones([2, 3, 4, 5, 6], dtype=dtype)
pad_width = ((0, 0), (0, 0), (2, 3), (1, 1), (1, 1))
self.assertAllClose(
knp.pad(x, pad_width, mode=mode), np.pad(x, pad_width, mode=mode)
)
self.assertAllClose(
knp.Pad(pad_width, mode=mode)(x), np.pad(x, pad_width, mode=mode)
)
# 5D (pad arbitrary dimensions)
if backend.backend() == "torch" and mode != "constant":
self.skipTest(
"reflect and symmetric padding for arbitary dimensions are not "
"supported by torch"
)
x = np.ones([2, 3, 4, 5, 6], dtype=dtype)
pad_width = ((1, 1), (2, 1), (3, 2), (4, 3), (5, 4))
self.assertAllClose(
knp.pad(x, pad_width, mode=mode), np.pad(x, pad_width, mode=mode)
)
self.assertAllClose(
knp.Pad(pad_width, mode=mode)(x), np.pad(x, pad_width, mode=mode)
)
def test_prod(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.prod(x), np.prod(x))
self.assertAllClose(knp.prod(x, axis=()), np.prod(x, axis=()))
self.assertAllClose(knp.prod(x, axis=1), np.prod(x, axis=1))
self.assertAllClose(knp.prod(x, axis=(1,)), np.prod(x, axis=(1,)))
self.assertAllClose(
knp.prod(x, axis=1, keepdims=True),
np.prod(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Prod()(x), np.prod(x))
self.assertAllClose(knp.Prod(axis=1)(x), np.prod(x, axis=1))
self.assertAllClose(
knp.Prod(axis=1, keepdims=True)(x),
np.prod(x, axis=1, keepdims=True),
)
def test_ravel(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.ravel(x), np.ravel(x))
self.assertAllClose(knp.Ravel()(x), np.ravel(x))
def test_real(self):
x = np.array([[1, 2, 3 - 3j], [3, 2, 1 + 5j]])
self.assertAllClose(knp.real(x), np.real(x))
self.assertAllClose(knp.Real()(x), np.real(x))
def test_reciprocal(self):
x = np.array([[1.0, 2.0, 3.0], [3.0, 2.0, 1.0]])
self.assertAllClose(knp.reciprocal(x), np.reciprocal(x))
self.assertAllClose(knp.Reciprocal()(x), np.reciprocal(x))
def test_repeat(self):
x = np.array([[1, 2], [3, 4]])
self.assertAllClose(knp.repeat(x, 2), np.repeat(x, 2))
self.assertAllClose(knp.repeat(x, 3, axis=1), np.repeat(x, 3, axis=1))
self.assertAllClose(
knp.repeat(x, np.array([1, 2]), axis=-1),
np.repeat(x, np.array([1, 2]), axis=-1),
)
self.assertAllClose(knp.Repeat(2)(x), np.repeat(x, 2))
self.assertAllClose(knp.Repeat(3, axis=1)(x), np.repeat(x, 3, axis=1))
self.assertAllClose(
knp.Repeat(np.array([1, 2]), axis=0)(x),
np.repeat(x, np.array([1, 2]), axis=0),
)
def test_reshape(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.reshape(x, [3, 2]), np.reshape(x, [3, 2]))
self.assertAllClose(knp.Reshape([3, 2])(x), np.reshape(x, [3, 2]))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_reshape_sparse(self):
import tensorflow as tf
x = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=(2, 3),
)
x_np = tf.sparse.to_dense(x).numpy()
self.assertIsInstance(knp.reshape(x, [3, 2]), tf.SparseTensor)
self.assertAllClose(knp.reshape(x, [3, 2]), np.reshape(x_np, [3, 2]))
self.assertIsInstance(knp.Reshape([3, 2])(x), tf.SparseTensor)
self.assertAllClose(knp.Reshape([3, 2])(x), np.reshape(x_np, [3, 2]))
def test_roll(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.roll(x, 1), np.roll(x, 1))
self.assertAllClose(knp.roll(x, 1, axis=1), np.roll(x, 1, axis=1))
self.assertAllClose(knp.roll(x, -1, axis=0), np.roll(x, -1, axis=0))
self.assertAllClose(knp.Roll(1)(x), np.roll(x, 1))
self.assertAllClose(knp.Roll(1, axis=1)(x), np.roll(x, 1, axis=1))
self.assertAllClose(knp.Roll(-1, axis=0)(x), np.roll(x, -1, axis=0))
def test_round(self):
x = np.array([[1.1, 2.5, 3.9], [3.2, 2.3, 1.8]])
self.assertAllClose(knp.round(x), np.round(x))
self.assertAllClose(knp.Round()(x), np.round(x))
def test_sign(self):
x = np.array([[1, -2, 3], [-3, 2, -1]])
self.assertAllClose(knp.sign(x), np.sign(x))
self.assertAllClose(knp.Sign()(x), np.sign(x))
def test_sin(self):
x = np.array([[1, -2, 3], [-3, 2, -1]])
self.assertAllClose(knp.sin(x), np.sin(x))
self.assertAllClose(knp.Sin()(x), np.sin(x))
def test_sinh(self):
x = np.array([[1, -2, 3], [-3, 2, -1]])
self.assertAllClose(knp.sinh(x), np.sinh(x))
self.assertAllClose(knp.Sinh()(x), np.sinh(x))
def test_size(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.size(x), np.size(x))
self.assertAllClose(knp.Size()(x), np.size(x))
def test_sort(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.sort(x), np.sort(x))
self.assertAllClose(knp.Sort()(x), np.sort(x))
self.assertAllClose(knp.sort(x, axis=0), np.sort(x, axis=0))
self.assertAllClose(knp.Sort(axis=0)(x), np.sort(x, axis=0))
def test_split(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.split(x, 2), np.split(x, 2))
self.assertAllClose(knp.Split(2)(x), np.split(x, 2))
self.assertAllClose(
knp.split(x, [1, 2], axis=1),
np.split(x, [1, 2], axis=1),
)
self.assertAllClose(
knp.Split([1, 2], axis=1)(x),
np.split(x, [1, 2], axis=1),
)
# test invalid indices_or_sections
with self.assertRaises(Exception):
knp.split(x, 3)
# test zero dimension
x = np.ones(shape=(0,))
self.assertEqual(len(knp.split(x, 2)), 2)
self.assertEqual(len(knp.Split(2)(x)), 2)
def test_sqrt(self):
x = np.array([[1, 4, 9], [16, 25, 36]], dtype="float32")
ref_y = np.sqrt(x)
y = knp.sqrt(x)
self.assertEqual(standardize_dtype(y.dtype), "float32")
self.assertAllClose(y, ref_y)
y = knp.Sqrt()(x)
self.assertEqual(standardize_dtype(y.dtype), "float32")
self.assertAllClose(y, ref_y)
@pytest.mark.skipif(
backend.backend() == "jax", reason="JAX does not support float64."
)
def test_sqrt_float64(self):
x = np.array([[1, 4, 9], [16, 25, 36]], dtype="float64")
ref_y = np.sqrt(x)
y = knp.sqrt(x)
self.assertEqual(standardize_dtype(y.dtype), "float64")
self.assertAllClose(y, ref_y)
y = knp.Sqrt()(x)
self.assertEqual(standardize_dtype(y.dtype), "float64")
self.assertAllClose(y, ref_y)
def test_sqrt_int32(self):
x = np.array([[1, 4, 9], [16, 25, 36]], dtype="int32")
ref_y = np.sqrt(x)
y = knp.sqrt(x)
self.assertEqual(standardize_dtype(y.dtype), "float32")
self.assertAllClose(y, ref_y)
y = knp.Sqrt()(x)
self.assertEqual(standardize_dtype(y.dtype), "float32")
self.assertAllClose(y, ref_y)
def test_stack(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [6, 5, 4]])
self.assertAllClose(knp.stack([x, y]), np.stack([x, y]))
self.assertAllClose(knp.stack([x, y], axis=1), np.stack([x, y], axis=1))
self.assertAllClose(knp.Stack()([x, y]), np.stack([x, y]))
self.assertAllClose(knp.Stack(axis=1)([x, y]), np.stack([x, y], axis=1))
def test_std(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.std(x), np.std(x))
self.assertAllClose(knp.std(x, axis=1), np.std(x, axis=1))
self.assertAllClose(
knp.std(x, axis=1, keepdims=True),
np.std(x, axis=1, keepdims=True),
)
self.assertAllClose(knp.Std()(x), np.std(x))
self.assertAllClose(knp.Std(axis=1)(x), np.std(x, axis=1))
self.assertAllClose(
knp.Std(axis=1, keepdims=True)(x),
np.std(x, axis=1, keepdims=True),
)
def test_swapaxes(self):
x = np.arange(24).reshape([1, 2, 3, 4])
self.assertAllClose(
knp.swapaxes(x, 0, 1),
np.swapaxes(x, 0, 1),
)
self.assertAllClose(
knp.Swapaxes(0, 1)(x),
np.swapaxes(x, 0, 1),
)
def test_tan(self):
x = np.array([[1, -2, 3], [-3, 2, -1]])
self.assertAllClose(knp.tan(x), np.tan(x))
self.assertAllClose(knp.Tan()(x), np.tan(x))
def test_tanh(self):
x = np.array([[1, -2, 3], [-3, 2, -1]])
self.assertAllClose(knp.tanh(x), np.tanh(x))
self.assertAllClose(knp.Tanh()(x), np.tanh(x))
def test_tile(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.tile(x, [2, 3]), np.tile(x, [2, 3]))
self.assertAllClose(knp.Tile([2, 3])(x), np.tile(x, [2, 3]))
def test_trace(self):
x = np.arange(24).reshape([1, 2, 3, 4])
self.assertAllClose(knp.trace(x), np.trace(x))
self.assertAllClose(
knp.trace(x, axis1=2, axis2=3),
np.trace(x, axis1=2, axis2=3),
)
self.assertAllClose(
knp.Trace(axis1=2, axis2=3)(x),
np.trace(x, axis1=2, axis2=3),
)
def test_tril(self):
x = np.arange(24).reshape([1, 2, 3, 4])
self.assertAllClose(knp.tril(x), np.tril(x))
self.assertAllClose(knp.tril(x, -1), np.tril(x, -1))
self.assertAllClose(knp.Tril(-1)(x), np.tril(x, -1))
def test_triu(self):
x = np.arange(24).reshape([1, 2, 3, 4])
self.assertAllClose(knp.triu(x), np.triu(x))
self.assertAllClose(knp.triu(x, -1), np.triu(x, -1))
self.assertAllClose(knp.Triu(-1)(x), np.triu(x, -1))
def test_vstack(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [6, 5, 4]])
self.assertAllClose(knp.vstack([x, y]), np.vstack([x, y]))
self.assertAllClose(knp.Vstack()([x, y]), np.vstack([x, y]))
def test_floordiv(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.floor_divide(x, y), np.floor_divide(x, y))
self.assertAllClose(knp.floor_divide(x, z), np.floor_divide(x, z))
self.assertAllClose(knp.FloorDivide()(x, y), np.floor_divide(x, y))
self.assertAllClose(knp.FloorDivide()(x, z), np.floor_divide(x, z))
def test_xor(self):
x = np.array([[True, False], [True, True]])
y = np.array([[False, False], [True, False]])
self.assertAllClose(knp.logical_xor(x, y), np.logical_xor(x, y))
self.assertAllClose(knp.logical_xor(x, True), np.logical_xor(x, True))
self.assertAllClose(knp.logical_xor(True, x), np.logical_xor(True, x))
self.assertAllClose(knp.LogicalXor()(x, y), np.logical_xor(x, y))
self.assertAllClose(knp.LogicalXor()(x, True), np.logical_xor(x, True))
self.assertAllClose(knp.LogicalXor()(True, x), np.logical_xor(True, x))
class NumpyArrayCreateOpsCorrectnessTest(testing.TestCase):
def test_ones(self):
self.assertAllClose(knp.ones([2, 3]), np.ones([2, 3]))
self.assertAllClose(knp.Ones()([2, 3]), np.ones([2, 3]))
def test_zeros(self):
self.assertAllClose(knp.zeros([2, 3]), np.zeros([2, 3]))
self.assertAllClose(knp.Zeros()([2, 3]), np.zeros([2, 3]))
def test_eye(self):
self.assertAllClose(knp.eye(3), np.eye(3))
self.assertAllClose(knp.eye(3, 4), np.eye(3, 4))
self.assertAllClose(knp.eye(3, 4, 1), np.eye(3, 4, 1))
self.assertAllClose(knp.Eye()(3), np.eye(3))
self.assertAllClose(knp.Eye()(3, 4), np.eye(3, 4))
self.assertAllClose(knp.Eye()(3, 4, 1), np.eye(3, 4, 1))
def test_arange(self):
self.assertAllClose(knp.arange(3), np.arange(3))
self.assertAllClose(knp.arange(3, 7), np.arange(3, 7))
self.assertAllClose(knp.arange(3, 7, 2), np.arange(3, 7, 2))
self.assertAllClose(knp.Arange()(3), np.arange(3))
self.assertAllClose(knp.Arange()(3, 7), np.arange(3, 7))
self.assertAllClose(knp.Arange()(3, 7, 2), np.arange(3, 7, 2))
self.assertEqual(standardize_dtype(knp.arange(3).dtype), "int32")
def test_full(self):
self.assertAllClose(knp.full([2, 3], 0), np.full([2, 3], 0))
self.assertAllClose(knp.full([2, 3], 0.1), np.full([2, 3], 0.1))
self.assertAllClose(
knp.full([2, 3], np.array([1, 4, 5])),
np.full([2, 3], np.array([1, 4, 5])),
)
self.assertAllClose(knp.Full()([2, 3], 0), np.full([2, 3], 0))
self.assertAllClose(knp.Full()([2, 3], 0.1), np.full([2, 3], 0.1))
self.assertAllClose(
knp.Full()([2, 3], np.array([1, 4, 5])),
np.full([2, 3], np.array([1, 4, 5])),
)
def test_identity(self):
self.assertAllClose(knp.identity(3), np.identity(3))
self.assertAllClose(knp.Identity()(3), np.identity(3))
def test_tri(self):
self.assertAllClose(knp.tri(3), np.tri(3))
self.assertAllClose(knp.tri(3, 4), np.tri(3, 4))
self.assertAllClose(knp.tri(3, 4, 1), np.tri(3, 4, 1))
self.assertAllClose(knp.Tri()(3), np.tri(3))
self.assertAllClose(knp.Tri()(3, 4), np.tri(3, 4))
self.assertAllClose(knp.Tri()(3, 4, 1), np.tri(3, 4, 1))
| keras-core/keras_core/ops/numpy_test.py/0 | {
"file_path": "keras-core/keras_core/ops/numpy_test.py",
"repo_id": "keras-core",
"token_count": 74958
} | 46 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.optimizers import optimizer
@keras_core_export(["keras_core.optimizers.Adamax"])
class Adamax(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm.
Adamax, a variant of Adam based on the infinity norm, is a first-order
gradient-based optimization method. Due to its capability of adjusting the
learning rate based on data characteristics, it is suited to learn
time-variant process, e.g., speech data with dynamically changed noise
conditions. Default parameters follow those provided in the paper (see
references below).
Initialization:
```python
m = 0 # Initialize initial 1st moment vector
u = 0 # Initialize the exponentially weighted infinity norm
t = 0 # Initialize timestep
```
The update rule for parameter `w` with gradient `g` is described at the end
of section 7.1 of the paper (see the referenece section):
```python
t += 1
m = beta1 * m + (1 - beta) * g
u = max(beta2 * u, abs(g))
current_lr = learning_rate / (1 - beta1 ** t)
w = w - current_lr * m / (u + epsilon)
```
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="adamax",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Adamax optimizer has 2 types of variables: momentums (denoted as m),
exponentially weighted infinity norm (denoted as u).
Args:
var_list: list of model variables to build Adamax variables on.
"""
if self.built:
return
super().build(var_list)
self._m = []
self._u = []
for var in var_list:
self._m.append(
self.add_variable_from_reference(
reference_variable=var, name="momentum"
)
)
self._u.append(
self.add_variable_from_reference(
reference_variable=var, name="norm"
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
m = self._m[self._get_variable_index(variable)]
u = self._u[self._get_variable_index(variable)]
m.assign(m + (gradient - m) * (1 - self.beta_1))
u.assign(ops.maximum(self.beta_2 * u, ops.abs(gradient)))
variable.assign(
variable - (lr * m) / ((1 - beta_1_power) * (u + self.epsilon))
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Adamax.__doc__ = Adamax.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras-core/keras_core/optimizers/adamax.py/0 | {
"file_path": "keras-core/keras_core/optimizers/adamax.py",
"repo_id": "keras-core",
"token_count": 2133
} | 47 |
## How to contribute code
Follow these steps to submit your code contribution.
[You can find a list of issues that we are looking for contributors on here!](https://github.com/keras-team/keras-cv/labels/contribution-welcome)
### Step 1. Open an issue
Before making any changes, we recommend opening an issue (if one doesn't already
exist) and discussing your proposed changes. This way, we can give you feedback
and validate the proposed changes.
If your code change involves the fixing of a bug, please include a
[Colab](https://colab.research.google.com/) notebook that shows
how to reproduce the broken behavior.
If the changes are minor (simple bug fix or documentation fix), then feel free
to open a PR without discussion.
### Step 2. Make code changes
To make code changes, you need to fork the repository. You will need to set up a
development environment and run the unit tests. This is covered in section
"set up environment".
If your code change involves introducing a new API change, please see our
[API Design Guidelines](API_DESIGN.md).
**Notes**
- Make sure to add a new entry to [serialization tests](https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/serialization_test.py#L37) for new layers.
### Step 3. Create a pull request
Once the change is ready, open a pull request from your branch in your fork to
the master branch in [keras-team/keras-cv](https://github.com/keras-team/keras-cv).
### Step 4. Sign the Contributor License Agreement
After creating the pull request, you will need to sign the Google CLA agreement.
The agreement can be found at [https://cla.developers.google.com/clas](https://cla.developers.google.com/clas).
### Step 5. Code review
CI tests will automatically be run directly on your pull request. Their
status will be reported back via GitHub actions.
There may be
several rounds of comments and code changes before the pull request gets
approved by the reviewer.

### Step 6. Merging
Once the pull request is approved, a team member will take care of merging.
## Contributing models
When contributing new models, please validate model performance by providing training results. You can do this using our existing [ImageNet training script](https://github.com/keras-team/keras-cv/blob/master/examples/training/classification/imagenet/basic_training.py) or by contributing a custom training script of your own (see "Contributing training scripts" below). Training results can be added to the training history log with [this script](https://github.com/keras-team/keras-cv/blob/master/shell/weights/update_training_history.py), or shared with the team via Google Drive (we'll need TensorBoard logs as well as weights). Either way, the KerasCV team will need to upload the weights to our GCS bucket for distribution.
For an initial submission, trained weights do not need to exactly match paper-claimed results. As a baseline, let's shoot for 90% of the paper-claimed ImageNet top-1 accuracy. However, we should strive to improve these weights quickly to at least match paper-claimed results.
## Contributing training scripts
KerasCV is working to include a catalog of high-performing model training scripts for the models included in KerasCV.models and is welcoming contributions for these scripts. These training scripts serve as documentation of good training techniques and will be used to train weights that will be offered in KerasCV models through the package.
The KerasCV team will run submitted training scripts to produce weights for KerasCV, and will attribute strong weights to contributors via a training script ranking system. Stay tuned for more details about that.
Incremental improvements to existing training scripts are welcome, provided that they come with evidence of improved validation performance.
You can also open an issue to add weights for a specific model using a pre-existing script! In your issue, provide your training logs and resulting weights. Specify the arguments that were used to run the script, and provide support for those choices. If your weights beat our current weights, they'll become our default pre-trained weights for your model/task in KerasCV.models!
To contribute a new script, start by opening an issue and tagging @ianstenbit to discuss the task, dataset, and/or model for which you'd like to add a script. Once they've taken a look, you can prepare a PR to introduce the new training script.
See [this example script](https://github.com/keras-team/keras-cv/blob/master/examples/training/classification/imagenet/basic_training.py) for training ImageNet classification. Please follow the structure of this training script in contributing your own script. New scripts should either:
- Train a task for which we don't have a training script already
- Include a meaningfully different training approach for a given task
- Introduce a custom training method for a specific model or dataset, based on empirical evidence of efficacy.
When contributing training scripts or proposing runs, please include documentation to support decisions about training including hyperparameter choices. Examples of good documentation would be recent literature or a reference to a hyperparameter search.
Our default training scripts train using ImageNet. Because we cannot distribute this dataset, you will need to modify your dataloading step to load the dataset on your system if you wish to run training yourself. You are also welcome to locally train against a different dataset, provided that you include documentation in your PR supporting the claim that your script will still perform well against ImageNet.
We look forward to delivering great pre-trained models in KerasCV with the help of your contributions!
## Contributing custom ops
We do not plan to accept contributed custom ops due to the maintenance burden that they introduce. If there is a clear need for a specific custom op that should live in KerasCV, please consult the KerasCV team before implementing it, as we expect to reject contributions of custom ops by default.
We currently support only a small handful of ops that run on CPU and are not used at inference time.
If you are updating existing custom ops, you can re-compile the binaries from source using the instructions in the `Tests that require custom ops` section below.
## set up environment
Setting up your KerasCV development environment requires you to fork the KerasCV repository,
clone the repository, install dependencies, and execute `python setup.py develop`.
You can achieve this by running the following commands:
```shell
gh repo fork keras-team/keras-cv --clone --remote
cd keras-cv
pip install ".[tests]"
pip install -e .
```
The first line relies on having an installation of [the GitHub CLI](https://github.com/cli/cli).
Following these commands you should be able to run the tests using `pytest keras_cv`.
Please report any issues running tests following these steps.
## Run tests
KerasCV is tested using [PyTest](https://docs.pytest.org/en/6.2.x/).
### Run a test file
To run a test file, run `pytest path/to/file` from the root directory of keras_cv.
### Run a single test case
To run a single test, you can use `-k=<your_regex>`
to use regular expression to match the test you want to run. For example, you
can use the following command to run all the tests in `cut_mix_test.py`,
whose names contain `label`,
```
pytest keras_cv/layers/preprocessing/cut_mix_test.py -k="label"
```
### Run all tests
You can run the unit tests for KerasCV by running:
```
pytest keras_cv/
```
## Formatting the Code
We use `flake8`, `isort`, `black` and `clang-format` for code formatting. You can run
the following commands manually every time you want to format your code:
- Run `shell/format.sh` to format your code
- Run `shell/lint.sh` to check the result.
If after running these the CI flow is still failing, try updating `flake8`, `isort`, `black` and `clang-format`.
This can be done by running `pip install --upgrade black`, `pip install --upgrade flake8`,
`pip install --upgrade isort` and `pip install --upgrade clang-format`
Note: The linting checks could be automated activating
pre-commit hooks with `git config core.hooksPath .github/.githooks`
## Community Guidelines
This project follows [Google's Open Source Community Guidelines](https://opensource.google/conduct/).
| keras-cv/CONTRIBUTING.md/0 | {
"file_path": "keras-cv/CONTRIBUTING.md",
"repo_id": "keras-cv",
"token_count": 2090
} | 48 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomBrightness
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomBrightness(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly adjusts brightness during training.
This layer will randomly increase/reduce the brightness for the input RGB
images.
At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the brightness of the input.
Note that different brightness adjustment factors
will be apply to each the images in the batch.
Args:
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
factor is used to determine the lower bound and upper bound of the
brightness adjustment. A float value will be chosen randomly between
the limits. When -1.0 is chosen, the output image will be black, and
when 1.0 is chosen, the image will be fully white. When only one float
is provided, eg, 0.2, then -0.2 will be used for lower bound and 0.2
will be used for upper bound.
value_range: Optional list/tuple of 2 floats for the lower and upper limit
of the values of the input data, defaults to [0.0, 255.0]. Can be
changed to e.g. [0.0, 1.0] if the image input has been scaled before
this layer. The brightness adjustment will be scaled to this range, and
the output values will be clipped to this range.
seed: optional integer, for fixed RNG behavior.
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
`factor`. By default, the layer will output floats. The output value will
be clipped to the range `[0, 255]`, the valid range of RGB colors, and
rescaled based on the `value_range` if needed.
```
"""
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
if isinstance(factor, float) or isinstance(factor, int):
factor = (-factor, factor)
self.factor = preprocessing_utils.parse_factor(
factor, min_value=-1, max_value=1
)
self.value_range = value_range
self.seed = seed
def augment_image(self, image, transformation, **kwargs):
return self._brightness_adjust(image, transformation)
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def get_random_transformation(self, **kwargs):
rgb_delta_shape = (1, 1, 1)
random_rgb_delta = self.factor(shape=rgb_delta_shape)
random_rgb_delta = random_rgb_delta * (
self.value_range[1] - self.value_range[0]
)
return random_rgb_delta
def _brightness_adjust(self, image, rgb_delta):
rank = image.shape.rank
if rank != 3:
raise ValueError(
"Expected the input image to be rank 3. Got "
f"inputs.shape = {image.shape}"
)
rgb_delta = tf.cast(rgb_delta, image.dtype)
image += rgb_delta
return tf.clip_by_value(image, self.value_range[0], self.value_range[1])
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomBrightnessTest(tf.test.TestCase):
def test_consistency_with_old_impl_rescaled_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape)
layer = RandomBrightness(factor=fixed_factor)
old_layer = OldRandomBrightness(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomBrightness(factor=fixed_factor)
old_layer = OldRandomBrightness(factor=fixed_factor)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomBrightness, OldRandomBrightness]
aug_args = {"factor": (0.5)}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_brightness.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_brightness.py",
"repo_id": "keras-cv",
"token_count": 3414
} | 49 |
"""
Title: 3D Object Detection with KerasCV
Author: Ian Stenbit, Zhaoqi Leng (Waymo), Guowang Li (Waymo)
Date created: 2023/04/27
Last modified: 2023/04/27
Description: Use KerasCV to train a 3D object detection model for LIDAR data.
Accelerator: GPU
"""
"""
KerasCV offers a set of APIs to train LIDAR-based 3D object detection models,
including dataloading, augmentation, model training, and metric evaluation.
problems. These APIs were designed and implemented in partnership with Waymo.
In this guide, we'll take KerasCV's 3D object detection API for a spin by
training a CenterPillar model for Waymo's Open Dataset, which is a 3D object
detection task for detecting cars and pedestrians for an autonomous vehicle.
"""
"""shell
!pip install --upgrade git+https://github.com/keras-team/keras-cv
!pip install tensorflow==2.11.0
!pip install waymo-open-dataset-tf-2.11.0==1.5.1
"""
import tensorflow as tf
from tensorflow import keras
import keras_cv
from keras_cv.callbacks import WaymoEvaluationCallback
from keras_cv.datasets.waymo import convert_to_center_pillar_inputs
from keras_cv.datasets.waymo import load
from keras_cv.datasets.waymo import transformer
from keras_cv.layers import CenterNetLabelEncoder
from keras_cv.layers import DynamicVoxelization
from keras_cv.models.object_detection_3d import CenterPillarBackbone
from keras_cv.models.object_detection_3d import MultiHeadCenterPillar
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassDetectionHead,
)
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassHeatmapDecoder,
)
"""
3D object detection is the process of identifying, classifying,
and localizing objects within a 3D space. Inputs are often in the form point
clouds, although 2D images are sometimes used as inputs as well. KerasCV
currently supports point cloud inputs for 3D object detection.
Point cloud inputs to 3D object detection models typically come from LIDAR
sensors, and are generally loosely structured.
In KerasCV, we adopt a data format where point clouds are represented as a
dictionary with the following structure:
```python
point_cloud = {
"point_xyz": FloatTensor[batch_size, 3]
"point_features": FloatTensor[batch_size, num_features]
"point_mask": BooleanTensor[batch_size]
}
```
The `point_xyz` field represents the XYZ coordinates of each point in the
point cloud.
The `point_features` field represents the LIDAR features of each point in the
poin cloud. Typical features include range, intensity, and elongation.
In KerasCV, 3D box targets for object detection are represented as vertical
pillars rotated with respect to the Z axis. We encode each box as a list (or
Tensor) of 7 floats: the X, Y, and Z coordinates of the box's center, the width,
height, and depth of the box, and the rotation of the box with respect to the
Z axis. (This rotation is referrred to as `phi` and is always in radians).
KerasCV's first 3D object detection model offering is a center-based model like
the one proposed in https://arxiv.org/pdf/2006.11275.pdf.
Let's get to 3D modelling!
We'll start by loading up the Waymo Open Dataset. KerasCV provides a
`waymo_open_dataset.load` function to load the Waymo Open Dataset into our
data format.
"""
# Note that we can't distribute WOD directly -- you'll need to download it
# from waymo.com/open and put the data somewhere where your training job
# can access it.
data_shard = "./training-data"
dataset = load(data_shard)
# By default, WOD point clouds are globally positioned, but for object detection
# we want them with respect to the vehicle, so we transform them to the vehicle
# frame of reference.
dataset = dataset.map(transformer.transform_to_vehicle_frame)
# Because number of points is dynamic, we pad them to make our inputs batchable.
dataset = dataset.map(transformer.pad_or_trim_tensors)
# Then we can easily reformat the tensors into KerasCV's data format!
dataset = dataset.map(convert_to_center_pillar_inputs)
# We use a small batch size here on CPU. Generally, point clouds can be pretty
# large, so batch sizes are often smaller than in the 2D object detection world.
dataset = dataset.batch(1)
"""
Loading up the Waymo Open Dataset can be a bit tricky, but this makes it pretty
simple!
One important note: Waymo Open Dataset is distributed as TFRecords representing
a Waymo Open Dataset `Frame` proto. This cannot be deserialized in to Tensors
inside of the TensorFlow graph, so this can cause CPU throttling during
training.
Therefore, KerasCV offers a utility for transforming Waymo Open Dataset frames
into tf.Example records which can be more efficiently loaded into a TF graph
for later training. The utility can be found at
https://github.com/keras-team/keras-cv/blob/master/examples/training/object_detection_3d/waymo/serialize_records.py
Next up, let's augment our data! In partnership with Waymo, KerasCV offers a
set of state-of-the-art 3D augmentations for LIDAR data and 3D boxes. They
behave like all Keras preprocessing layers, and they're very easy to set up.
"""
augmentations = keras.Sequential(
[
keras_cv.layers.GlobalRandomFlip(),
keras_cv.layers.GlobalRandomRotation(max_rotation_angle_z=3.14),
]
)
dataset = dataset.map(augmentations)
"""
In just a few lines of code, we've augmented our input data using a few of the
3D augmentations offered in KerasCV.
Next, we'll create a `MultiHeadCenterPillar` model to train. These models are
very configurable, and the configuration can be a bit overwhelming at first.
So let's start by defining (and explaining!) some of the configuration.
For a more in-depth understanding of how the model works, check out
https://arxiv.org/pdf/2006.11275.pdf.
"""
"""
Our model will group points into voxels in 3D space, and we need to specify
how large these voxels will be. Here, we define the width, length, and height
of each voxel in the units used by the input data (meters, in the case of
Waymo Open Dataset).
Because we're predicting vertical boxes, it's common to use arbitrarily tall
voxels, so in this case we use 1000 for the z dimension.
"""
voxel_size = [0.32, 0.32, 1000]
"""
For voxelization, we also need to specify the global volume of our voxel space,
which represents the overall target area where we will identify boxes. Here
we use a range of -256 * voxel_size to 256 * voxel_size for the x and y
size, and -20 to 20 for the z size. As a result, we will produce voxel features
in an overall grid of 512x512x1 voxels.
"""
# 81.92 = 256 * 0.32
spatial_size = [-81.92, 81.92, -81.92, 81.92, -20, 20]
"""
After voxelizing points, we'll run the results through a point net, which is
a dense network with a configurable feature size. Here we define this feature
size.
"""
voxelization_feature_size = 128
"""
We'll also want to know a prior for the length, width, and height of each of
the classes we're trying to detect. This is somewhat akin to the concept of
anchor sizes in 2D object detection, but is used for numerical regularization
instead of prediction anchoring in this case.
"""
car_anchor_size = [4.5, 2.0, 1.6]
pedestrian_anchor_size = [0.6, 0.8, 1.8]
"""
Now we can build our model!
We'll define a function to create the model so that we can initialize it inside
of a tf.distribute scope later on.
"""
def build_centerpillar_model():
"""
Our first model component is a voxelization layer. This will be used to
dynamically map coordinates of a point to a voxel in 3D space.
"""
voxelization_layer = DynamicVoxelization(
voxel_size=voxel_size,
spatial_size=spatial_size,
)
"""
Next, we'll need a decoder component to decode predictions into 3D boxes. To
do this, we'll need to specify how many heading bins we're using for each
class, the anchor size for each class, and a pooling size for each class.
"""
# 12 heading bins for cars, 4 for pedestrians.
num_heading_bins = [12, 4]
decoder = MultiClassHeatmapDecoder(
num_classes=2,
num_head_bin=num_heading_bins,
anchor_size=[car_anchor_size, pedestrian_anchor_size],
max_pool_size=[7, 3],
max_num_box=[800, 400],
heatmap_threshold=[0.1, 0.1],
voxel_size=voxel_size,
spatial_size=spatial_size,
)
"""
Finally, we'll create a detection head and then instantiate our full model.
Now we can compile the model and start training!
"""
multiclass_head = MultiClassDetectionHead(
num_classes=2,
num_head_bin=num_heading_bins,
)
model = MultiHeadCenterPillar(
backbone=CenterPillarBackbone.from_preset(
"center_pillar_waymo_open_dataset"
),
voxel_net=voxelization_layer,
multiclass_head=multiclass_head,
prediction_decoder=decoder,
)
return model
"""
Before we start training our model, we'll need to turn our labels into a format
that our model can learn and later predict.
We do this using a label encoder (much like we do in 2D object detection).
"""
label_encoder = CenterNetLabelEncoder(
voxel_size=voxel_size,
max_radius=[8.0, 8.0, 0],
spatial_size=spatial_size,
num_classes=2,
# The maximum number of target boxes that we should produce per class
# (in this case 1024 for cars and 512 for pedestrians)
top_k_heatmap=[1024, 512],
)
dataset = dataset.map(label_encoder, num_parallel_calls=tf.data.AUTOTUNE)
# Up to this point, our data has been in one dictionary per-batch, but
# now we split it up into a standard x, y tuple for training
def separate_points_and_boxes(y):
x = y["point_clouds"]
del y["point_clouds"]
return x, y
dataset = dataset.map(
separate_points_and_boxes, num_parallel_calls=tf.data.AUTOTUNE
)
"""
Now we can build and compile our model!
"""
car_box_loss = keras_cv.losses.CenterNetBoxLoss(
num_heading_bins=12, anchor_size=car_anchor_size, reduction="sum"
)
pedestrian_box_loss = keras_cv.losses.CenterNetBoxLoss(
num_heading_bins=4, anchor_size=pedestrian_anchor_size, reduction="sum"
)
model = build_centerpillar_model()
model.compile(
optimizer="adam",
heatmap_loss=keras_cv.losses.BinaryPenaltyReducedFocalCrossEntropy(
reduction="sum"
),
box_loss=[car_box_loss, pedestrian_box_loss],
)
"""
Finally, we can train and evaluate our model!
We offer a `WODDetectionEvaluator` callback to easily evaluate Waymo's
detection metrics on an evaluation data set. Note that your evaluation dataset's
labels will be stored in main memory during metric evaluation.
"""
model.fit(
dataset,
epochs=5,
callbacks=[WaymoEvaluationCallback(dataset.take(20).cache())],
)
| keras-cv/examples/training/object_detection_3d/waymo/train_pillars.py/0 | {
"file_path": "keras-cv/examples/training/object_detection_3d/waymo/train_pillars.py",
"repo_id": "keras-cv",
"token_count": 3487
} | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.