text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""All TF-Keras metrics."""
# isort: off
import warnings
from tensorflow.python.util.tf_export import keras_export
# Base classes and utilities
from tf_keras.metrics.base_metric import Mean
from tf_keras.metrics.base_metric import MeanMetricWrapper
from tf_keras.metrics.base_metric import MeanTensor
from tf_keras.metrics.base_metric import Metric
from tf_keras.metrics.base_metric import Reduce
from tf_keras.metrics.base_metric import Sum
from tf_keras.metrics.base_metric import SumOverBatchSize
from tf_keras.metrics.base_metric import SumOverBatchSizeMetricWrapper
from tf_keras.metrics.base_metric import clone_metric
from tf_keras.metrics.base_metric import clone_metrics
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.saving.serialization_lib import deserialize_keras_object
from tf_keras.saving.serialization_lib import serialize_keras_object
from tf_keras.metrics.py_metric import PyMetric
# Individual metric classes
# Accuracy metrics
from tf_keras.metrics.accuracy_metrics import Accuracy
from tf_keras.metrics.accuracy_metrics import BinaryAccuracy
from tf_keras.metrics.accuracy_metrics import CategoricalAccuracy
from tf_keras.metrics.accuracy_metrics import SparseCategoricalAccuracy
from tf_keras.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
from tf_keras.metrics.accuracy_metrics import TopKCategoricalAccuracy
from tf_keras.metrics.accuracy_metrics import accuracy
from tf_keras.metrics.accuracy_metrics import binary_accuracy
from tf_keras.metrics.accuracy_metrics import categorical_accuracy
from tf_keras.metrics.accuracy_metrics import sparse_categorical_accuracy
from tf_keras.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy
from tf_keras.metrics.accuracy_metrics import top_k_categorical_accuracy
# Probabilistic metrics
from tf_keras.metrics.probabilistic_metrics import BinaryCrossentropy
from tf_keras.metrics.probabilistic_metrics import CategoricalCrossentropy
from tf_keras.metrics.probabilistic_metrics import KLDivergence
from tf_keras.metrics.probabilistic_metrics import Poisson
from tf_keras.metrics.probabilistic_metrics import SparseCategoricalCrossentropy
from tf_keras.metrics.probabilistic_metrics import binary_crossentropy
from tf_keras.metrics.probabilistic_metrics import categorical_crossentropy
from tf_keras.metrics.probabilistic_metrics import poisson
from tf_keras.metrics.probabilistic_metrics import kullback_leibler_divergence
from tf_keras.metrics.probabilistic_metrics import (
sparse_categorical_crossentropy,
)
# Regression metrics
from tf_keras.metrics.regression_metrics import CosineSimilarity
from tf_keras.metrics.regression_metrics import LogCoshError
from tf_keras.metrics.regression_metrics import MeanAbsoluteError
from tf_keras.metrics.regression_metrics import MeanAbsolutePercentageError
from tf_keras.metrics.regression_metrics import MeanRelativeError
from tf_keras.metrics.regression_metrics import MeanSquaredError
from tf_keras.metrics.regression_metrics import MeanSquaredLogarithmicError
from tf_keras.metrics.regression_metrics import RootMeanSquaredError
from tf_keras.metrics.regression_metrics import R2Score
from tf_keras.metrics.regression_metrics import cosine_similarity
from tf_keras.metrics.regression_metrics import logcosh
from tf_keras.metrics.regression_metrics import mean_absolute_error
from tf_keras.metrics.regression_metrics import mean_absolute_percentage_error
from tf_keras.metrics.regression_metrics import mean_squared_error
from tf_keras.metrics.regression_metrics import mean_squared_logarithmic_error
# Confusion metrics
from tf_keras.metrics.confusion_metrics import AUC
from tf_keras.metrics.confusion_metrics import FalseNegatives
from tf_keras.metrics.confusion_metrics import FalsePositives
from tf_keras.metrics.confusion_metrics import Precision
from tf_keras.metrics.confusion_metrics import PrecisionAtRecall
from tf_keras.metrics.confusion_metrics import Recall
from tf_keras.metrics.confusion_metrics import RecallAtPrecision
from tf_keras.metrics.confusion_metrics import SensitivityAtSpecificity
from tf_keras.metrics.confusion_metrics import SensitivitySpecificityBase
from tf_keras.metrics.confusion_metrics import SpecificityAtSensitivity
from tf_keras.metrics.confusion_metrics import TrueNegatives
from tf_keras.metrics.confusion_metrics import TruePositives
# F-Scores
from tf_keras.metrics.f_score_metrics import FBetaScore
from tf_keras.metrics.f_score_metrics import F1Score
# IoU metrics
from tf_keras.metrics.iou_metrics import BinaryIoU
from tf_keras.metrics.iou_metrics import IoU
from tf_keras.metrics.iou_metrics import MeanIoU
from tf_keras.metrics.iou_metrics import OneHotIoU
from tf_keras.metrics.iou_metrics import OneHotMeanIoU
# Hinge metrics
from tf_keras.metrics.hinge_metrics import CategoricalHinge
from tf_keras.metrics.hinge_metrics import Hinge
from tf_keras.metrics.hinge_metrics import SquaredHinge
from tf_keras.metrics.hinge_metrics import categorical_hinge
from tf_keras.metrics.hinge_metrics import squared_hinge
from tf_keras.metrics.hinge_metrics import hinge
# Aliases
acc = ACC = accuracy
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
log_cosh = logcosh
cosine_proximity = cosine_similarity
@keras_export("keras.metrics.serialize")
def serialize(metric, use_legacy_format=False):
"""Serializes metric function or `Metric` instance.
Args:
metric: A TF-Keras `Metric` instance or a metric function.
Returns:
Metric configuration dictionary.
"""
if metric is None:
return None
if not isinstance(metric, Metric):
warnings.warn(
"The `keras.metrics.serialize()` API should only be used for "
"objects of type `keras.metrics.Metric`. Found an instance of "
f"type {type(metric)}, which may lead to improper serialization."
)
if use_legacy_format:
return legacy_serialization.serialize_keras_object(metric)
return serialize_keras_object(metric)
@keras_export("keras.metrics.deserialize")
def deserialize(config, custom_objects=None, use_legacy_format=False):
"""Deserializes a serialized metric class/function instance.
Args:
config: Metric configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A TF-Keras `Metric` instance or a metric function.
"""
if use_legacy_format:
return legacy_serialization.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="metric function",
)
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="metric function",
)
@keras_export("keras.metrics.get")
def get(identifier):
"""Retrieves a TF-Keras metric as a `function`/`Metric` class instance.
The `identifier` may be the string name of a metric function or class.
>>> metric = tf.keras.metrics.get("categorical_crossentropy")
>>> type(metric)
<class 'function'>
>>> metric = tf.keras.metrics.get("CategoricalCrossentropy")
>>> type(metric)
<class '...metrics.CategoricalCrossentropy'>
You can also specify `config` of the metric to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Metric` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> metric = tf.keras.metrics.get(identifier)
>>> type(metric)
<class '...metrics.CategoricalCrossentropy'>
Args:
identifier: A metric identifier. One of None or string name of a metric
function/class or metric configuration dictionary or a metric function
or a metric class instance
Returns:
A TF-Keras metric as a `function`/ `Metric` class instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, dict):
use_legacy_format = "module" not in identifier
return deserialize(identifier, use_legacy_format=use_legacy_format)
elif isinstance(identifier, str):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError(f"Could not interpret metric identifier: {identifier}")
| tf-keras/tf_keras/metrics/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/__init__.py",
"repo_id": "tf-keras",
"token_count": 3204
} | 189 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras metrics."""
import json
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import metrics
from tf_keras.testing_infra import test_combinations
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class PoissonTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred))
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
poisson_obj = metrics.Poisson(name="poisson", dtype=tf.int32)
self.assertEqual(poisson_obj.name, "poisson")
self.assertEqual(poisson_obj._dtype, tf.int32)
poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config())
self.assertEqual(poisson_obj2.name, "poisson")
self.assertEqual(poisson_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))
update_op = poisson_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = poisson_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(
(2, 3)
)
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class KLDivergenceTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([0.4, 0.9, 0.12, 0.36, 0.3, 0.4]).reshape((2, 3))
y_true = np.asarray([0.5, 0.8, 0.12, 0.7, 0.43, 0.8]).reshape((2, 3))
self.batch_size = 2
self.expected_results = np.multiply(y_true, np.log(y_true / y_pred))
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
k_obj = metrics.KLDivergence(name="kld", dtype=tf.int32)
self.assertEqual(k_obj.name, "kld")
self.assertEqual(k_obj._dtype, tf.int32)
k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())
self.assertEqual(k_obj2.name, "kld")
self.assertEqual(k_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))
update_op = k_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = k_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(
(2, 3)
)
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / (1.2 + 3.4)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class BinaryCrossentropyTest(tf.test.TestCase):
def test_config(self):
bce_obj = metrics.BinaryCrossentropy(
name="bce", dtype=tf.int32, label_smoothing=0.2
)
self.assertEqual(bce_obj.name, "bce")
self.assertEqual(bce_obj._dtype, tf.int32)
old_config = bce_obj.get_config()
self.assertAllClose(old_config["label_smoothing"], 0.2, 1e-3)
# Check save and restore config
bce_obj2 = metrics.BinaryCrossentropy.from_config(old_config)
self.assertEqual(bce_obj2.name, "bce")
self.assertEqual(bce_obj2._dtype, tf.int32)
new_config = bce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
result = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Reduced metric = 7.665 / 2
self.assertAllClose(self.evaluate(result), 3.833, atol=1e-3)
def test_unweighted_with_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
y_pred = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
result = bce_obj(y_true, y_pred)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced metric = (0 + 66.666) / 2
self.assertAllClose(self.evaluate(result), 33.333, atol=1e-3)
def test_weighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = tf.constant([1.5, 2.0])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Weighted metric = [7.665 * 1.5, 0]
# Reduced metric = 7.665 * 1.5 / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 3.285, atol=1e-3)
def test_weighted_from_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
y_pred = tf.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0]])
sample_weight = tf.constant([2.0, 2.5])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted metric = [0, 66.666 * 2.5]
# Reduced metric = 66.666 * 2.5 / (2 + 2.5)
self.assertAllClose(self.evaluate(result), 37.037, atol=1e-3)
def test_label_smoothing(self):
logits = tf.constant(((100.0, -100.0, -100.0)))
y_true = tf.constant(((1, 0, 1)))
label_smoothing = 0.1
# Metric: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# After label smoothing, label 1 becomes 1 - 0.5L
# label 0 becomes 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = metrics.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
result = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAllClose(expected_value, self.evaluate(result), atol=1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
cce_obj = metrics.CategoricalCrossentropy(
name="cce", dtype=tf.int32, label_smoothing=0.2
)
self.assertEqual(cce_obj.name, "cce")
self.assertEqual(cce_obj._dtype, tf.int32)
old_config = cce_obj.get_config()
self.assertAllClose(old_config["label_smoothing"], 0.2, 1e-3)
# Check save and restore config
cce_obj2 = metrics.CategoricalCrossentropy.from_config(old_config)
self.assertEqual(cce_obj2.name, "cce")
self.assertEqual(cce_obj2._dtype, tf.int32)
new_config = cce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Reduced metric = (0.051 + 2.302) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = tf.constant([1.5, 2.0])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Weighted metric = [0.051 * 1.5, 2.302 * 2.]
# Reduced metric = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = tf.constant([1.5, 2.0])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
# Label smoothing: z' = z * (1 - L) + L/n,
# where L = label smoothing value and n = num classes
# Label value 1 becomes: 1 - L + L/n
# Label value 0 becomes: L/n
# y_true with label_smoothing = [[0.0333, 0.9333, 0.0333],
# [0.0333, 0.0333, 0.9333]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[-0.26641, -0.00042, -0.29971],
# [-0.23316, -0.00006, -6.53479]]
# xent = [0.56654, 6.76801]
# Reduced xent = (0.56654 + 6.76801) / 2
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
loss = cce_obj(y_true, logits)
self.assertAllClose(self.evaluate(loss), 3.667, atol=1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SparseCategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
scce_obj = metrics.SparseCategoricalCrossentropy(
name="scce", dtype=tf.int32
)
self.assertEqual(scce_obj.name, "scce")
self.assertEqual(scce_obj.dtype, tf.int32)
old_config = scce_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config
scce_obj2 = metrics.SparseCategoricalCrossentropy.from_config(
old_config
)
self.assertEqual(scce_obj2.name, "scce")
self.assertEqual(scce_obj2.dtype, tf.int32)
new_config = scce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_ignore_class(self):
scce_obj = metrics.SparseCategoricalCrossentropy(ignore_class=-1)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([-1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), 2.3026, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# y_true * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = tf.constant([1.5, 2.0])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Weighted xent = [0.051 * 1.5, 2.302 * 2.]
# Reduced xent = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_ignore_class(self):
scce_obj = metrics.SparseCategoricalCrossentropy(ignore_class=-1)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2, -1])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1], [0.1, 0.8, 0.1]])
sample_weight = tf.constant([1.5, 2.0, 1.5])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = tf.constant([1.5, 2.0])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_axis(self):
scce_obj = metrics.SparseCategoricalCrossentropy(axis=0)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.1], [0.95, 0.8], [0, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# logits = log(y`) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 0], [1, 0], [0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# sum(exp(logits)) = [1, 1]
# softmax = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# log(softmax) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# y * log(softmax) = [[0, 0], [-0.0513, 0], [0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
class BinaryTruePositives(metrics.Metric):
def __init__(self, name="binary_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="tp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, dtype=self.dtype)
sample_weight = tf.__internal__.ops.broadcast_weights(
sample_weight, values
)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/metrics/probabilistic_metrics_test.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/probabilistic_metrics_test.py",
"repo_id": "tf-keras",
"token_count": 12405
} | 190 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests keras.Model works properly with mixed precision."""
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl import flags
from absl.testing import parameterized
from tf_keras import backend
from tf_keras import layers
from tf_keras import models
from tf_keras.applications import densenet
from tf_keras.applications import efficientnet
from tf_keras.applications import inception_resnet_v2
from tf_keras.applications import inception_v3
from tf_keras.applications import mobilenet
from tf_keras.applications import nasnet
from tf_keras.applications import resnet
from tf_keras.applications import vgg16
from tf_keras.applications import xception
from tf_keras.engine import base_layer_utils
from tf_keras.engine import input_spec
from tf_keras.engine import sequential
from tf_keras.layers import core
from tf_keras.mixed_precision import loss_scale_optimizer
from tf_keras.mixed_precision import policy
from tf_keras.mixed_precision import test_util as mp_test_util
from tf_keras.optimizers import optimizer_v1
from tf_keras.optimizers import sgd
from tf_keras.optimizers.legacy import gradient_descent
from tf_keras.saving import object_registration
from tf_keras.saving.legacy import save
from tf_keras.saving.serialization_lib import SafeModeScope
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = tf.distribute.get_strategy
def create_mirrored_strategy():
"""Create a MirroredStrategy, using a GPU if it is available."""
if tf.config.list_logical_devices("GPU"):
return tf.distribute.MirroredStrategy(["cpu:0", "gpu:0"])
else:
return tf.distribute.MirroredStrategy(["cpu:0"])
TESTCASES = (
{"testcase_name": "base", "strategy_fn": default_strategy_fn},
{"testcase_name": "distribute", "strategy_fn": create_mirrored_strategy},
)
class KerasModelTest(test_combinations.TestCase):
"""Test mixed precision with TF-Keras models."""
def _skip_if_strategy_unsupported(self, strategy_fn):
if (
strategy_fn != default_strategy_fn
and test_utils.get_model_type() == "subclass"
):
self.skipTest(
"Non-default strategies are unsupported with subclassed models"
)
def _skip_if_save_format_unsupported(self, save_format):
model_type = test_utils.get_model_type()
if save_format == "h5" and model_type == "subclass":
self.skipTest(
"Saving subclassed models with the HDF5 format is unsupported"
)
if (
save_format == "tf"
and model_type == "subclass"
and not tf.executing_eagerly()
):
self.skipTest(
"b/148820505: This combination of features is currently broken."
)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
{"testcase_name": "base", "strategy_fn": default_strategy_fn},
{
"testcase_name": "distribute",
"strategy_fn": create_mirrored_strategy,
},
{
"testcase_name": "operator",
"strategy_fn": create_mirrored_strategy,
"use_operator": True,
},
{
"testcase_name": "regularizer",
"strategy_fn": create_mirrored_strategy,
"use_regularizer": True,
},
{
"testcase_name": "get_config",
"strategy_fn": create_mirrored_strategy,
"get_config": True,
"use_regularizer": True,
},
{
"testcase_name": "saved_model",
"strategy_fn": default_strategy_fn,
"save_format": "tf",
"use_regularizer": True,
},
{
"testcase_name": "saved_model_input_spec",
"strategy_fn": default_strategy_fn,
"save_format": "tf",
"use_regularizer": True,
"use_input_spec": True,
},
{
"testcase_name": "h5",
"strategy_fn": default_strategy_fn,
"save_format": "h5",
"use_regularizer": True,
},
{
"testcase_name": "saved_model_distribute",
"strategy_fn": create_mirrored_strategy,
"save_format": "tf",
"use_regularizer": True,
},
{
"testcase_name": "saved_model_legacy_distribute",
"strategy_fn": create_mirrored_strategy,
"save_format": "tf",
"use_regularizer": True,
"use_legacy_optimizer": True,
},
{
"testcase_name": "saved_model_input_spec_distribute",
"strategy_fn": create_mirrored_strategy,
"save_format": "tf",
"use_regularizer": True,
"use_input_spec": True,
},
{
"testcase_name": "h5_distribute",
"strategy_fn": create_mirrored_strategy,
"save_format": "h5",
"use_regularizer": True,
},
{
"testcase_name": "h5_legacy_distribute",
"strategy_fn": create_mirrored_strategy,
"save_format": "h5",
"use_regularizer": True,
"use_legacy_optimizer": True,
},
)
def test_model(
self,
strategy_fn,
use_operator=False,
use_regularizer=False,
policy_name="mixed_float16",
get_config=False,
save_format=None,
use_input_spec=False,
use_legacy_optimizer=False,
):
self._skip_if_strategy_unsupported(strategy_fn)
self._skip_if_save_format_unsupported(save_format)
if not tf.__internal__.tf2.enabled():
# The non-legacy optimizer is only supported in TF2
use_legacy_optimizer = True
if use_regularizer:
weight_regularizer = mp_test_util.IdentityRegularizer()
activity_regularizer = mp_test_util.ReduceSumRegularizer()
else:
weight_regularizer = activity_regularizer = None
with strategy_fn().scope():
with policy.policy_scope(policy_name):
layer = mp_test_util.MultiplyLayer(
assert_type=tf.float16,
use_operator=use_operator,
regularizer=weight_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(1,),
)
if use_input_spec:
layer.input_spec = input_spec.InputSpec(shape=(None, 1))
model = test_utils.get_model_from_layers(
[layer], input_shape=(1,), input_dtype=tf.float16
)
if get_config:
config = model.get_config()
model = model.__class__.from_config(
config,
custom_objects={
"MultiplyLayer": mp_test_util.MultiplyLayer
},
)
(layer,) = (
layer
for layer in model.layers
if isinstance(layer, mp_test_util.MultiplyLayer)
)
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
# Learning rate is small enough that if applied to a float16
# variable, the variable will not change. So this tests the
# learning rate not applied to a float16 value, but instead the
# float32 variable.
learning_rate = 2**-14
if use_legacy_optimizer:
opt = gradient_descent.SGD(learning_rate)
else:
opt = sgd.SGD(learning_rate)
# Use a fixed loss scale, as this test will fail if gradients
# are skipped for a step due to dynamic loss scaling.
opt = loss_scale_optimizer.BaseLossScaleOptimizer(
opt, dynamic=False, initial_scale=8
)
model.compile(
opt,
loss=loss_fn,
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 2 ** -14 subtracted
# from it.
expected = 1 - 2**-14
if use_regularizer:
# Weight and activity regularizer each add another 2 ** -14 to the
# gradient.
expected -= 2 * 2**-14
self.assertEqual(backend.eval(layer.v), expected)
if save_format:
with object_registration.CustomObjectScope(
{
"MultiplyLayer": mp_test_util.MultiplyLayer,
"loss_fn": loss_fn,
}
):
self._test_saving(model, dataset, save_format, use_regularizer)
def _test_saving(self, model, dataset, save_format, use_regularizer):
# Save and load model, asserting variable does not change
save_path = os.path.join(self.get_temp_dir(), "model")
model.save(save_path, save_format=save_format)
model = save.load_model(save_path)
(layer,) = (
layer
for layer in model.layers
if "MultiplyLayer" in layer.__class__.__name__
)
expected = 1 - 2**-14
if use_regularizer:
expected -= 2 * 2**-14
self.assertEqual(backend.eval(layer.v), expected)
# Continue training, and assert variable is correct value
model.fit(dataset)
new_expected = expected - 2**-14
if use_regularizer:
new_expected -= 2 * 2**-14
self.assertEqual(backend.eval(layer.v), new_expected)
# Load saved model again, and assert variable is previous value
model = save.load_model(save_path)
(layer,) = (
layer
for layer in model.layers
if "MultiplyLayer" in layer.__class__.__name__
)
self.assertEqual(backend.eval(layer.v), expected)
# Ensure various dtype-related aspects of the layer are correct
self.assertEqual(layer.dtype, "float32")
self.assertEqual(layer.dtype_policy.name, "mixed_float16")
self.assertEqual(layer.v.dtype, "float32")
self.assertEqual(layer(np.ones((2, 1))).dtype, "float16")
self.assertEqual(type(model.dtype_policy), policy.Policy)
if tf.__internal__.tf2.enabled():
self.assertEqual(
layer.get_config()["dtype"],
{
"module": "keras.mixed_precision",
"class_name": "Policy",
"config": {"name": "mixed_float16"},
"registered_name": None,
},
)
else:
self.assertEqual(
layer.get_config()["dtype"],
{
"class_name": "Policy",
"config": {"name": "mixed_float16"},
},
)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
{"testcase_name": "base", "strategy_fn": default_strategy_fn},
{
"testcase_name": "distribute",
"strategy_fn": create_mirrored_strategy,
},
)
def test_fixed_loss_scaling(self, strategy_fn):
# The non-legacy optimizer is only supported in TF2
use_legacy_optimizer = not tf.__internal__.tf2.enabled()
# Note: We do not test mixed precision in this method, only loss
# scaling.
loss_scale = 8.0
batch_size = 4
with strategy_fn().scope():
x = layers.Input(shape=(1,), batch_size=batch_size)
layer = mp_test_util.MultiplyLayer()
y = layer(x)
# The gradient of 'y' at this point is 1. With loss scaling, the
# gradient is 'loss_scale'. We divide by the batch size since the
# loss is averaged across batch elements.
expected_gradient = loss_scale / batch_size
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
[expected_gradient]
)
)
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
if use_legacy_optimizer:
opt = gradient_descent.SGD(1.0)
else:
opt = sgd.SGD(1.0)
opt = loss_scale_optimizer.BaseLossScaleOptimizer(
opt, dynamic=False, initial_scale=loss_scale
)
model.compile(
opt, loss=loss_fn, run_eagerly=test_utils.should_run_eagerly()
)
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# Variable starts at 1, and should have gradient of 1 subtracted from
# it.
expected = 0
self.assertEqual(backend.eval(layer.v), expected)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
{"testcase_name": "base", "strategy_fn": default_strategy_fn},
{
"testcase_name": "distribute",
"strategy_fn": create_mirrored_strategy,
},
{
"testcase_name": "loss_scaling",
"strategy_fn": create_mirrored_strategy,
"use_loss_scaling": True,
},
)
def test_advanced_model(self, strategy_fn, use_loss_scaling=False):
# The advanced model tests mixed-precision-related features that would
# occur in a resnet50 model. It tests a model that has:
# * Multiple layers, some which use auto-cast variables and some which
# do not
# * Regularization on some variables and not others.
# * A fixed loss scale (if use_loss_scaling is True)
strategy = strategy_fn()
if use_loss_scaling:
loss_scale = 8.0
learning_rate = 2**-14
# The non-legacy optimizer is only supported in TF2
use_legacy_optimizer = not tf.__internal__.tf2.enabled()
with strategy.scope():
with policy.policy_scope(policy.Policy("mixed_float16")):
x = layers.Input(shape=(1,), batch_size=2)
layer1 = mp_test_util.MultiplyLayer(
assert_type=tf.float16,
regularizer=mp_test_util.IdentityRegularizer(),
use_operator=True,
)
layer2 = mp_test_util.MultiplyLayerWithoutAutoCast(
assert_type=tf.float16, use_operator=True
)
layer3 = mp_test_util.MultiplyLayer(
assert_type=tf.float16, use_operator=False
)
layer4 = mp_test_util.MultiplyLayerWithoutAutoCast(
assert_type=tf.float16,
regularizer=mp_test_util.IdentityRegularizer(),
use_operator=False,
)
y = layer1(x)
y = layer2(y)
y = layer3(y)
y = layer4(y)
if use_loss_scaling:
# The gradient of 'y' at this point is 1. With loss scaling,
# the gradient is 'loss_scale'. We divide by the batch size
# of 2 since the loss is averaged across batch elements.
expected_gradient = loss_scale / 2
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=tf.float16,
expected_gradient=[expected_gradient],
)
)
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
if use_legacy_optimizer:
opt = gradient_descent.SGD(learning_rate)
else:
opt = sgd.SGD(learning_rate)
if use_loss_scaling:
opt = loss_scale_optimizer.BaseLossScaleOptimizer(
opt, dynamic=False, initial_scale=loss_scale
)
model.compile(
opt,
loss=loss_fn,
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones((2, 1))
y = np.ones((2, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)
model.fit(dataset)
for layer in (layer1, layer2, layer3, layer4):
if layer.losses:
# Layer has weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate)
else:
# Layer does not have weight regularizer
self.assertEqual(backend.eval(layer.v), 1 - learning_rate)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(
{"testcase_name": "base", "strategy_fn": default_strategy_fn},
{
"testcase_name": "distribute",
"strategy_fn": create_mirrored_strategy,
},
{
"testcase_name": "get_config",
"strategy_fn": create_mirrored_strategy,
"get_config": True,
},
)
def test_dynamic_loss_scaling(self, strategy_fn, get_config=False):
strategy = strategy_fn()
initial_loss_scale = 2.0
batch_size = 4
expected_gradient = backend.variable(
[initial_loss_scale / batch_size], dtype=tf.float16
)
# If this variable is set to True, the model below will have NaN
# gradients
have_nan_gradients = backend.variable(False, dtype=tf.bool)
with strategy.scope():
opt = sgd.SGD(1.0)
opt = loss_scale_optimizer.BaseLossScaleOptimizer(
opt, initial_scale=initial_loss_scale, dynamic_growth_steps=2
)
with policy.policy_scope("mixed_float16"):
x = layers.Input(
shape=(1,), batch_size=batch_size, dtype=tf.float16
)
layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)
y = layer(x)
identity_with_nan_grads = (
mp_test_util.create_identity_with_nan_gradients_fn(
have_nan_gradients
)
)
y = core.Lambda(identity_with_nan_grads)(y)
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=tf.float16,
expected_gradient=expected_gradient,
)
)
y = core.Lambda(identity_with_grad_check_fn)(y)
model = models.Model(inputs=x, outputs=y)
if get_config:
config = model.get_config()
with SafeModeScope(safe_mode=False):
model = model.__class__.from_config(
config,
custom_objects={
"MultiplyLayer": mp_test_util.MultiplyLayer
},
)
(layer,) = (
layer
for layer in model.layers
if isinstance(layer, mp_test_util.MultiplyLayer)
)
def loss_fn(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred)
model.compile(
opt,
loss=loss_fn,
run_eagerly=test_utils.should_run_eagerly(),
)
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# The variables starts with 1 and has a gradient of 1, so will go down
# by 1 each step.
self.assertEqual(backend.eval(layer.v), 0)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -1)
# There have been two steps without NaNs, so the loss scale will double
backend.set_value(
expected_gradient, backend.get_value(expected_gradient * 2)
)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -2)
# Next test with NaN gradients.
backend.set_value(have_nan_gradients, True)
model.fit(dataset)
# Variable should not be updated
self.assertEqual(backend.eval(layer.v), -2)
# Test with finite gradients again
backend.set_value(have_nan_gradients, False)
# The loss scale will be halved due to the NaNs, so the gradient will
# also be halved
backend.set_value(
expected_gradient, backend.get_value(expected_gradient / 2)
)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -3)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_compile_wraps_with_loss_scale_optimizer(self):
x = layers.Input(shape=(1,))
y = mp_test_util.MultiplyLayer()(x)
# The non-legacy optimizer is only supported in TF2
use_legacy_optimizer = (
not tf.__internal__.tf2.enabled() or not tf.executing_eagerly()
)
with policy.policy_scope("mixed_float16"):
# Test optimizer is automatically wrapped with LSO
model = models.Model(x, y)
if use_legacy_optimizer:
optimizer = gradient_descent.SGD(1.0)
else:
optimizer = sgd.SGD(1.0)
model.compile(optimizer, "mse")
self.assertIsInstance(
model.optimizer, loss_scale_optimizer.BaseLossScaleOptimizer
)
self.assertEqual(
backend.get_value(model.optimizer.learning_rate), 1.0
)
# Test optimizer specified as string is automatically wrapped in LSO
model = models.Model(x, y)
model.compile("sgd", "mse")
self.assertIsInstance(
model.optimizer, loss_scale_optimizer.BaseLossScaleOptimizer
)
# Test if an LSO is passed, optimizer is not automatically wrapped
# with another LSO
model = models.Model(x, y)
if use_legacy_optimizer:
optimizer = gradient_descent.SGD(1.0)
else:
optimizer = sgd.SGD(1.0)
optimizer = loss_scale_optimizer.BaseLossScaleOptimizer(
optimizer, dynamic_growth_steps=2
)
model.compile(optimizer, "mse")
self.assertIsInstance(
model.optimizer, loss_scale_optimizer.BaseLossScaleOptimizer
)
self.assertEqual(model.optimizer.dynamic_growth_steps, 2)
with policy.policy_scope("mixed_bfloat16"):
# Test mixed_bfloat16 models are not automatically wrapped with LSO
model = models.Model(x, y)
if use_legacy_optimizer:
optimizer = gradient_descent.SGD(1.0)
else:
optimizer = sgd.SGD(1.0)
model.compile(optimizer, "mse")
self.assertNotIsInstance(
model.optimizer, loss_scale_optimizer.BaseLossScaleOptimizer
)
self.assertIsInstance(
model.optimizer,
gradient_descent.SGD if use_legacy_optimizer else sgd.SGD,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_pass_invalid_optimizer_with_loss_scaling(self):
with policy.policy_scope(policy.Policy("mixed_float16")):
x = layers.Input(shape=(1,))
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(x, y)
if tf.executing_eagerly():
error_msg = "Use a `tf.keras` Optimizer instead"
else:
error_msg = 'optimizer" must be an instance of '
with self.assertRaisesRegex(ValueError, error_msg):
model.compile(optimizer_v1.SGD(1.0), "mse")
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_functional_model_loss_dtype(self):
with policy.policy_scope("float16"):
x = layers.Input(shape=(1,))
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(x, y)
model.add_loss(tf.cast(y, "float32"))
# The loss should not be casted to the policy's dtype.
self.assertEqual(model.losses[0].dtype, "float32")
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
{
"testcase_name": "base",
"strategy_fn": default_strategy_fn,
},
{
"testcase_name": "distribute",
"strategy_fn": create_mirrored_strategy,
},
{
"testcase_name": "base_h5",
"strategy_fn": default_strategy_fn,
"h5": True,
},
{
"testcase_name": "distribute_h5",
"strategy_fn": create_mirrored_strategy,
"h5": True,
},
)
def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
with strategy_fn().scope():
with policy.policy_scope("mixed_float16"):
x = layers.Input(shape=(1,), batch_size=2)
layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)
y = layer(x)
model = models.Model(inputs=x, outputs=y)
model.set_weights([np.array(100.0)])
x = np.ones((2, 1))
self.assertAllClose(backend.get_value(model(x)), x * 100.0)
suffix = ".h5" if h5 else ""
weights_file = os.path.join(self.get_temp_dir(), "weights" + suffix)
model.save_weights(weights_file)
model.set_weights([np.array(200.0)])
self.assertAllClose(backend.get_value(model(x)), x * 200.0)
model.load_weights(weights_file)
self.assertAllClose(backend.get_value(model(x)), x * 100.0)
self.assertEqual(model.get_weights(), [np.array(100.0)])
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
{
"testcase_name": "base",
"strategy_fn": default_strategy_fn,
},
{
"testcase_name": "distribute",
"strategy_fn": create_mirrored_strategy,
},
{
"testcase_name": "distribute_legacy",
"strategy_fn": create_mirrored_strategy,
"use_legacy_optimizer": True,
},
{
"testcase_name": "different_var_name",
"strategy_fn": default_strategy_fn,
"var_name": "w",
},
{
"testcase_name": "different_var_name_distribute",
"strategy_fn": create_mirrored_strategy,
"var_name": "w",
},
)
def test_save_slot_variables_with_autocast_vars(
self, strategy_fn, var_name="v", use_legacy_optimizer=False
):
if not tf.__internal__.tf2.enabled():
# The non-legacy optimizer is only supported in TF2
use_legacy_optimizer = True
p = policy.Policy("mixed_float16")
with strategy_fn().scope(), policy.policy_scope(p):
x = layers.Input(shape=(2,), batch_size=2)
# Having a var_name other than 'v' tests that a fixed bug
# (b/134713714) does not reoccur. The bug was that a crash would
# occur when saving a checkpoint where an AutoCastVariable with a
# slot variable would have a different name than the layer
# attribute's name (layer.v in this case).
layer = mp_test_util.MultiplyLayer(
assert_type=tf.float16, var_name=var_name
)
y = layer(x)
model = models.Model(inputs=x, outputs=y)
if use_legacy_optimizer:
opt = gradient_descent.SGD(1.0, 1.0)
else:
opt = sgd.SGD(1.0, 1.0)
opt = loss_scale_optimizer.BaseLossScaleOptimizer(
opt, dynamic=False, initial_scale=1
)
model.compile(
optimizer=opt,
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
def get_momentum_slot():
if use_legacy_optimizer:
return opt.get_slot(layer.v, "momentum")
else:
return opt.inner_optimizer.momentums[0]
model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
weights_file = os.path.join(self.get_temp_dir(), "weights")
model.save_weights(weights_file)
saved_slot = backend.get_value(get_momentum_slot())
model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
new_slot = backend.get_value(get_momentum_slot())
self.assertNotEqual(new_slot, saved_slot)
model.load_weights(weights_file)
restored_slot = backend.get_value(get_momentum_slot())
self.assertEqual(restored_slot, saved_slot)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(*TESTCASES)
def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):
strategy = strategy_fn()
if (
isinstance(strategy, tf.distribute.MirroredStrategy)
and not tf.executing_eagerly()
):
# TODO(b/121381184): Enable running the test in this case.
return
# The non-legacy optimizer is only supported in TF2
use_legacy_optimizer = not tf.__internal__.tf2.enabled()
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=tf.float32)
y = mp_test_util.MultiplyLayer(assert_type=tf.float32)(x)
model = models.Model(inputs=x, outputs=y)
if use_legacy_optimizer:
opt = gradient_descent.SGD(1.0)
else:
opt = sgd.SGD(1.0)
opt = loss_scale_optimizer.BaseLossScaleOptimizer(
opt, initial_scale=1.0, dynamic_growth_steps=2.0
)
model.compile(
optimizer=opt,
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(opt.loss_scale), 2)
self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
# Save model weights.
save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_prefix)
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
self.assertEqual(backend.get_value(opt.loss_scale), 4)
self.assertEqual(backend.get_value(opt.dynamic_counter), 0)
# Load model weights and ensure loss scale weights are restored.
model.load_weights(save_prefix)
self.assertEqual(backend.get_value(opt.loss_scale), 2)
self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
@test_combinations.run_all_keras_modes
def test_restore_old_loss_scale_checkpoint(self):
# Ensure a checkpoint from TF 2.2 can be loaded. The checkpoint format
# of LossScaleOptimizer changed, but old checkpoints can still be loaded
# into the legacy optimizers.
opt = gradient_descent.SGD(0.1, momentum=0.1)
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
model = sequential.Sequential(
[
core.Dense(
2,
)
]
)
# The checkpoint and expected values were obtained from the program in
# testdata/BUILD.
ckpt_dir = os.path.join(
flags.FLAGS["test_srcdir"].value,
"org_keras/tf_keras",
"mixed_precision/testdata/lso_ckpt_tf2.2",
)
# ckpt_dir = test.test_src_dir_path(
# 'python/tf_keras/mixed_precision/testdata/lso_ckpt_tf2.2')
model.load_weights(os.path.join(ckpt_dir, "ckpt"))
model.compile(opt, "mse", run_eagerly=test_utils.should_run_eagerly())
model(np.zeros((2, 2))) # Create model weights
opt._create_all_weights(model.weights)
expected_kernel = np.array(
[[9.229685, 10.901115], [10.370763, 9.757362]]
)
expected_slot = np.array([[10.049943, 9.917691], [10.049943, 9.917691]])
self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
self.assertAllClose(
self.evaluate(opt.get_slot(model.weights[0], "momentum")),
expected_slot,
)
self.assertEqual(self.evaluate(opt.loss_scale), 32768)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
# Check restoring works even after the model is compiled and the weights
# have been created.
model.fit(np.random.normal(size=(2, 2)), np.random.normal(size=(2, 2)))
self.assertNotAllClose(self.evaluate(model.weights[0]), expected_kernel)
self.assertNotAllClose(
self.evaluate(opt.get_slot(model.weights[0], "momentum")),
expected_slot,
)
model.load_weights(os.path.join(ckpt_dir, "ckpt"))
self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
self.assertAllClose(
self.evaluate(opt.get_slot(model.weights[0], "momentum")),
expected_slot,
)
self.assertEqual(self.evaluate(opt.loss_scale), 32768)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
def test_restore_old_saved_model(self):
saved_model_dir = os.path.join(
flags.FLAGS["test_srcdir"].value,
"org_keras/tf_keras",
"mixed_precision/testdata/lso_savedmodel_tf2.2",
)
model = save.load_model(saved_model_dir)
expected_kernel = np.array(
[[9.229685, 10.901115], [10.370763, 9.757362]]
)
self.assertAllClose(backend.eval(model.weights[0]), expected_kernel)
self.assertEqual(
type(model.optimizer), loss_scale_optimizer.LossScaleOptimizer
)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
{
"testcase_name": "base",
"strategy_fn": default_strategy_fn,
},
{
"testcase_name": "distribute",
"strategy_fn": create_mirrored_strategy,
},
{
"testcase_name": "base_h5",
"strategy_fn": default_strategy_fn,
"h5": True,
},
{
"testcase_name": "distribute_h5",
"strategy_fn": create_mirrored_strategy,
"h5": True,
},
)
def test_save_model_with_dynamic_loss_scaling(self, strategy_fn, h5=False):
# TODO(reedwm): Support and test saving model with a mixed_[b]float16
# policy as well.
strategy = strategy_fn()
if (
isinstance(strategy, tf.distribute.MirroredStrategy)
and not tf.executing_eagerly()
):
# TODO(b/121381184): Enable running the test in this case.
return
# Create and run model.
with strategy.scope():
x = layers.Input(shape=(2,), batch_size=2, dtype=tf.float32)
y = mp_test_util.MultiplyLayer()(x)
model = models.Model(inputs=x, outputs=y)
# Only test the legacy optimizer. The new optimizer does not
# support saving optimizer weights.
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=1.0, dynamic_growth_steps=2.0
)
model.compile(
optimizer=opt,
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.ones((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(opt.loss_scale), 2)
self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
(weight,) = model.trainable_weights
orig_weight = backend.get_value(weight)
# Save model weights.
save_path = os.path.join(self.get_temp_dir(), "model")
model.save(save_path, save_format="h5" if h5 else "tf")
# Run model again for 1 step (2 examples with a batch size of 2)
model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
new_weight = backend.get_value(weight)
self.assertNotEqual(new_weight, orig_weight)
self.assertEqual(backend.get_value(opt.loss_scale), 4)
self.assertEqual(backend.get_value(opt.dynamic_counter), 0)
# Load model weights and ensure loss scale weights are restored.
model = save.load_model(
save_path,
custom_objects={"MultiplyLayer": mp_test_util.MultiplyLayer},
)
(weight,) = model.trainable_weights
loaded_weight = backend.get_value(weight)
self.assertEqual(loaded_weight, orig_weight)
# Currently the loss scale isn't always saved when the model is saved
# with Model.save(). So we assert the loss scale either has the value
# when it was saved, or the value it was initialized with.
# TODO(reedwm): Always save/restore the loss scale with Model.save().
self.assertIn(backend.get_value(model.optimizer.loss_scale), (1, 2))
self.assertIn(
backend.get_value(model.optimizer.dynamic_counter), (0, 1)
)
# Test optimizer attributes and type
self.assertEqual(model.optimizer.initial_scale, 1.0)
self.assertEqual(model.optimizer.dynamic_growth_steps, 2.0)
self.assertEqual(
type(model.optimizer), loss_scale_optimizer.LossScaleOptimizer
)
class ApplicationModelTest(test_combinations.TestCase):
"""Tests that application models can be built with mixed precision.
This does not test that such models can be trained in mixed precision, as
doing so takes too much time for a unit test.
"""
@parameterized.named_parameters(
("densenet", densenet.DenseNet121),
("efficientnet", efficientnet.EfficientNetB0),
("inception_resnet_v2", inception_resnet_v2.InceptionResNetV2),
("inception_v3", inception_v3.InceptionV3),
("mobilenet", mobilenet.MobileNet),
("nasnet", nasnet.NASNetMobile),
("vgg16", vgg16.VGG16),
("xception", xception.Xception),
("resnet50", resnet.ResNet50),
)
def test_application_model(self, app):
# Run on CPU since model weights may exhaust GPU memory
with policy.policy_scope("mixed_float16"), tf.device("/CPU:0"):
app(weights=None)
if __name__ == "__main__":
base_layer_utils.enable_v2_dtype_behavior()
tf.test.main()
| tf-keras/tf_keras/mixed_precision/model_test.py/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/model_test.py",
"repo_id": "tf-keras",
"token_count": 20570
} | 191 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `models.py` (model cloning, mainly)."""
import functools
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import backend
from tf_keras import metrics
from tf_keras import models
from tf_keras.optimizers import optimizer_v1
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class TestModel(keras.Model):
"""A model subclass."""
def __init__(self, n_outputs=4, trainable=True):
"""A test class with one dense layer and number of outputs as a
variable."""
super().__init__()
self.layer1 = keras.layers.Dense(n_outputs)
self.n_outputs = tf.Variable(n_outputs, trainable=trainable)
def call(self, x):
return self.layer1(x)
def _get_layers(input_shape=(4,), add_input_layer=False):
if add_input_layer:
model_layers = [
keras.layers.InputLayer(input_shape=input_shape),
keras.layers.Dense(4),
]
elif input_shape:
model_layers = [keras.layers.Dense(4, input_shape=input_shape)]
else:
model_layers = [keras.layers.Dense(4)]
model_layers += [
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.5),
keras.layers.Dense(4),
]
return model_layers
def _get_model(input_shape=(4,)):
model_layers = _get_layers(input_shape=None, add_input_layer=False)
return test_utils.get_model_from_layers(
model_layers, input_shape=input_shape
)
class TestModelCloning(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
{
"testcase_name": "has_input_layer",
"input_shape": (4,),
"add_input_layer": True,
"share_weights": False,
},
{
"testcase_name": "no_input_layer",
"input_shape": None,
"add_input_layer": False,
"share_weights": False,
},
{
"testcase_name": "has_input_layer_share_weights",
"input_shape": (4,),
"add_input_layer": True,
"share_weights": True,
},
{
"testcase_name": "no_input_layer_share_weights",
"input_shape": None,
"add_input_layer": False,
"share_weights": True,
},
]
)
def test_clone_sequential_model(
self, input_shape, add_input_layer, share_weights
):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_sequential_model,
layer_fn=models.share_weights,
)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
model = models.Sequential(_get_layers(input_shape, add_input_layer))
# Sanity check
self.assertEqual(
isinstance(
list(
model._flatten_layers(include_self=False, recursive=False)
)[0],
keras.layers.InputLayer,
),
add_input_layer,
)
self.assertEqual(model._is_graph_network, add_input_layer)
# With placeholder creation -- clone model should have an InputLayer
# if the original model has one.
new_model = clone_fn(model)
self.assertEqual(
isinstance(
list(
new_model._flatten_layers(
include_self=False, recursive=False
)
)[0],
keras.layers.InputLayer,
),
add_input_layer,
)
self.assertEqual(new_model._is_graph_network, model._is_graph_network)
if (
input_shape
and not tf.compat.v1.executing_eagerly_outside_functions()
):
# update ops from batch norm needs to be included
self.assertGreaterEqual(len(new_model.updates), 2)
# On top of new tensor -- clone model should always have an InputLayer.
input_a = keras.Input(shape=(4,), name="a")
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(
list(
new_model._flatten_layers(include_self=False, recursive=False)
)[0],
keras.layers.InputLayer,
)
# The new models inputs should have the properties of the new input
# tensor
if tf.__internal__.tf2.enabled():
# In TF1, the new model will be a:0
self.assertEqual(new_model.input_names[0], input_a.name)
self.assertEqual(new_model.inputs[0].shape, input_a.shape)
self.assertTrue(new_model._is_graph_network)
# On top of new, non-Keras tensor -- clone model should always have an
# InputLayer.
if not tf.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an
# error saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
new_model = clone_fn(model, input_tensors=input_a)
self.assertIsInstance(
list(
new_model._flatten_layers(
include_self=False, recursive=False
)
)[0],
keras.layers.InputLayer,
)
self.assertTrue(new_model._is_graph_network)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
{"testcase_name": "clone_weights", "share_weights": False},
{"testcase_name": "share_weights", "share_weights": True},
]
)
def test_clone_functional_model(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model,
layer_fn=models.share_weights,
)
else:
clone_fn = keras.models.clone_model
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(
4,
)
dense_2 = keras.layers.Dense(
4,
)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_a = keras.layers.BatchNormalization()(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# With placeholder creation
new_model = clone_fn(model)
if not tf.compat.v1.executing_eagerly_outside_functions():
self.assertGreaterEqual(len(new_model.updates), 2)
new_model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name="a")
input_b = keras.Input(shape=(4,), name="b")
new_input_tensors = [input_a, input_b]
new_model = keras.models.clone_model(
model, input_tensors=new_input_tensors
)
if not tf.compat.v1.executing_eagerly_outside_functions():
self.assertLen(new_model.updates, 2)
new_model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
new_model.train_on_batch([val_a, val_b], val_out)
# New model should use provided input tensors
self.assertListEqual(new_model.inputs, new_input_tensors)
# On top of new, non-Keras tensors
if not tf.executing_eagerly():
# TODO(b/121277734):Skip Eager contexts, as Input() layers raise an
# error saying they should not be used with EagerTensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = clone_fn(model, input_tensors=[input_a, input_b])
self.assertGreaterEqual(len(new_model.updates), 2)
new_model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
new_model.train_on_batch(None, val_out)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
{"testcase_name": "clone_weights", "share_weights": False},
{"testcase_name": "share_weights", "share_weights": True},
]
)
def test_clone_functional_with_masking(self, share_weights):
if share_weights:
clone_fn = functools.partial(
keras.models._clone_functional_model,
layer_fn=models.share_weights,
)
else:
clone_fn = keras.models.clone_model
x = np.array([[[1.0], [1.0]], [[0.0], [0.0]]])
inputs = keras.Input((2, 1))
outputs = keras.layers.Masking(mask_value=0)(inputs)
outputs = keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer="one")
)(outputs)
model = keras.Model(inputs, outputs)
model = clone_fn(model)
model.compile(
loss="mse",
optimizer=test_utils.get_v2_optimizer("adam"),
run_eagerly=test_utils.should_run_eagerly(),
)
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.0)
def test_clone_rnn(self):
# Test cloning a model with multiple cells in an RNN. This exercises a
# few "fancier" features such as the `Bidrectional` wrapper and
# `StackedRNNCells` under the hood.
inputs = keras.Input(shape=(3, 3))
cells = [
keras.layers.LSTMCell(
units=32,
enable_caching_device=True,
implementation=2,
activation="relu",
)
]
rnn = keras.layers.RNN(cells, return_sequences=True)
outputs = keras.layers.Bidirectional(rnn)(inputs)
outputs = keras.layers.Dense(12, activation="softmax", name="scores")(
outputs
)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
loss=keras.losses.CategoricalCrossentropy(),
optimizer=keras.optimizers.legacy.rmsprop.RMSprop(lr=0.01),
metrics=["accuracy"],
)
keras.models.clone_model(model)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(
seq_model, input_tensors=[x, x]
)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
def test_functional_cloning_does_not_create_unnecessary_placeholders(self):
with tf.Graph().as_default():
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
graph = tf.Graph()
with graph.as_default():
x = tf.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
def test_sequential_cloning_does_not_create_unnecessary_placeholders(self):
with tf.Graph().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
graph = tf.Graph()
with graph.as_default():
x = tf.ones((10, 4))
_ = keras.models.clone_model(model, input_tensors=[x])
has_placeholder = _has_placeholder(graph)
self.assertFalse(has_placeholder)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
{"testcase_name": "clone_weights", "share_weights": False},
{"testcase_name": "share_weights", "share_weights": True},
]
)
def test_functional_cloning_with_tensor_kwarg(self, share_weights):
"""Test that cloning works with models that use Tensor kwargs."""
if share_weights:
clone_fn = functools.partial(
keras.models.clone_model, clone_function=models.share_weights
)
else:
clone_fn = keras.models.clone_model
class LayerWithTensorKwarg(keras.layers.Layer):
def call(self, inputs, tensor=None):
if tensor is not None:
return inputs * tf.cast(tensor, tf.float32)
else:
return inputs
inputs = keras.layers.Input(shape=(3))
t = tf.sequence_mask(tf.shape(inputs)[1])
model = keras.models.Model(inputs, LayerWithTensorKwarg()(inputs, t))
model.add_loss(tf.reduce_sum(model.outputs))
input_arr = np.random.random((1, 3)).astype(np.float32)
clone = clone_fn(model)
if tf.executing_eagerly():
clone(input_arr)
loss = clone.losses[0]
else:
with self.session() as sess:
clone(input_arr)
if share_weights:
self.skipTest(
"Weight sharing with inputs in call **kwargs does "
"not work correctly in v1"
)
else:
feed_dict = {clone.input: input_arr}
loss = sess.run(clone.losses[0], feed_dict=feed_dict)
self.assertAllClose(np.sum(input_arr), loss)
def _has_placeholder(graph):
ops_types = [op.type for op in graph.get_operations()]
return any("Placeholder" in s for s in ops_types)
class CheckpointingTests(test_combinations.TestCase):
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_optimizer_dependency(self):
model = _get_model()
opt = tf.compat.v1.train.AdamOptimizer(0.01)
model.compile(
optimizer=opt,
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
x=np.array([[1.0, 2.0, 3.0, 4.0]]),
y=np.array([[1.0, 1.0, 1.0, 1.0]]),
epochs=2,
)
save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
beta1_power, _ = opt._get_beta_accumulators()
self.evaluate(beta1_power.assign(12.0))
model.save_weights(save_prefix)
self.evaluate(beta1_power.assign(13.0))
model.load_weights(save_prefix)
self.assertEqual(12.0, self.evaluate(beta1_power))
@test_combinations.run_all_keras_modes
class TestModelBackend(test_combinations.TestCase):
def test_model_backend_float64_use_cases(self):
# Test case for GitHub issue 19318
floatx = keras.backend.floatx()
keras.backend.set_floatx("float64")
x = keras.Input((5,))
y = keras.layers.Dense(1)(x)
model = keras.models.Model(x, y)
model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
keras.backend.set_floatx(floatx)
class TestCloneAndBuildModel(test_combinations.TestCase):
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_clone_and_build_non_compiled_model(self):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model = _get_model()
with self.assertRaisesRegex(ValueError, "has not been compiled"):
models.clone_and_build_model(model, compile_clone=True)
is_subclassed = test_utils.get_model_type() == "subclass"
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=False, in_place_reset=is_subclassed
)
with self.assertRaisesRegex(RuntimeError, "must compile"):
new_model.evaluate(inp, out)
with self.assertRaisesRegex(RuntimeError, "must compile"):
new_model.train_on_batch(inp, out)
new_model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
new_model.train_on_batch(inp, out)
# Create new tensors for inputs.
input_a = keras.Input(shape=(4,))
new_model = models.clone_and_build_model(
model,
input_tensors=input_a,
compile_clone=False,
in_place_reset=is_subclassed,
)
with self.assertRaisesRegex(RuntimeError, "must compile"):
new_model.evaluate(inp, out)
with self.assertRaisesRegex(RuntimeError, "must compile"):
new_model.train_on_batch(inp, out)
new_model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
run_eagerly=test_utils.should_run_eagerly(),
)
new_model.train_on_batch(inp, out)
def _assert_same_compile_params(self, model):
"""Assert that two models have the same compile parameters."""
self.assertEqual("mse", model.loss)
self.assertIsInstance(
model.optimizer,
(
optimizer_v1.RMSprop,
keras.optimizers.legacy.rmsprop.RMSprop,
),
)
def _clone_and_build_test_helper(self, model, model_type):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
is_subclassed = model_type == "subclass"
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=True, in_place_reset=is_subclassed
)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
# Create new tensors for inputs.
input_a = keras.Input(shape=(4,), name="a")
new_model = models.clone_and_build_model(
model,
input_tensors=input_a,
compile_clone=True,
in_place_reset=is_subclassed,
)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
new_model = models.clone_and_build_model(
model,
input_tensors=input_a,
target_tensors=None,
compile_clone=True,
in_place_reset=is_subclassed,
)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_clone_and_build_compiled(self):
model = _get_model()
model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
metrics=["acc", metrics.categorical_accuracy],
run_eagerly=test_utils.should_run_eagerly(),
)
self._clone_and_build_test_helper(model, test_utils.get_model_type())
@test_combinations.run_all_keras_modes
def test_clone_and_build_sequential_without_inputs_defined(self):
model = models.Sequential(_get_layers(input_shape=None))
model.compile(
test_utils.get_v2_optimizer("rmsprop"),
"mse",
metrics=["acc", metrics.categorical_accuracy],
run_eagerly=test_utils.should_run_eagerly(),
)
self._clone_and_build_test_helper(model, "sequential")
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model.train_on_batch(inp, out)
self._clone_and_build_test_helper(model, "sequential")
def assert_optimizer_iterations_increases(self, optimizer):
model = _get_model()
model.compile(
optimizer,
"mse",
metrics=["acc", metrics.categorical_accuracy],
run_eagerly=test_utils.should_run_eagerly(),
)
global_step = keras.backend.variable(123, dtype=tf.int64)
clone_model = models.clone_and_build_model(
model,
compile_clone=True,
optimizer_iterations=global_step,
in_place_reset=(test_utils.get_model_type() == "subclass"),
)
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
clone_model.train_on_batch(inp, out)
self.assertEqual(backend.eval(global_step), 124)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_replace_tf_optimizer_iterations_variable(self):
if tf.executing_eagerly():
self.skipTest("v1 optimizers not supported with eager.")
self.assert_optimizer_iterations_increases(
tf.compat.v1.train.AdamOptimizer(0.01)
)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
def test_replace_keras_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases("adam")
def test_clone_optimizer_in_different_graph(self):
with tf.Graph().as_default():
with self.session():
model = test_utils.get_small_sequential_mlp(3, 4)
optimizer = keras.optimizers.legacy.adam.Adam()
model.compile(
optimizer,
"mse",
metrics=["acc", metrics.categorical_accuracy],
)
model.fit(
x=np.array([[1.0, 2.0, 3.0, 4.0]]),
y=np.array([[1.0, 1.0, 1.0, 1.0]]),
epochs=1,
)
optimizer_config = optimizer.get_config()
with tf.Graph().as_default():
with self.session():
with self.assertRaisesRegex(
ValueError, "Cannot use the given session"
):
models.clone_and_build_model(model, compile_clone=True)
# The optimizer_config object allows the model to be cloned in a
# different graph.
models.clone_and_build_model(
model, compile_clone=True, optimizer_config=optimizer_config
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/models/cloning_test.py/0 | {
"file_path": "tf-keras/tf_keras/models/cloning_test.py",
"repo_id": "tf-keras",
"token_count": 11996
} | 192 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adadelta Optimizer."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.optimizers.legacy import adadelta
from tf_keras.testing_infra import test_combinations
_DATA_TYPES = [tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128]
class AdadeltaOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def doTestBasic(self, use_resource=False, use_callable_params=False):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in _DATA_TYPES:
for grad in [0.2, 0.1, 0.01]:
for lr in [1.0, 0.5, 0.1]:
var0_init = [1.0, 2.0]
var1_init = [3.0, 4.0]
if use_resource:
var0 = tf.Variable(var0_init, dtype=dtype)
var1 = tf.Variable(var1_init, dtype=dtype)
else:
var0 = tf.Variable(var0_init, dtype=dtype)
var1 = tf.Variable(var1_init, dtype=dtype)
grads = tf.constant([grad, grad], dtype=dtype)
accum = 0.0
accum_update = 0.0
# ADADELTA gradient optimizer
rho = 0.95
epsilon = 1e-8
if use_callable_params:
adadelta_opt = adadelta.Adadelta(
learning_rate=lambda: lr,
rho=lambda: rho,
epsilon=epsilon,
)
else:
adadelta_opt = adadelta.Adadelta(
learning_rate=lr, rho=rho, epsilon=epsilon
)
if not tf.executing_eagerly():
adadelta_update = adadelta_opt.apply_gradients(
zip([grads, grads], [var0, var1])
)
self.evaluate(
tf.compat.v1.global_variables_initializer()
)
# Assign slots
slot = [None] * 2
slot_update = [None] * 2
slot[0] = adadelta_opt.get_slot(var0, "accum_grad")
self.assertEqual(slot[0].shape, var0.shape)
slot_update[0] = adadelta_opt.get_slot(
var0, "accum_var"
)
self.assertEqual(slot_update[0].shape, var0.shape)
slot[1] = adadelta_opt.get_slot(var1, "accum_grad")
self.assertEqual(slot[1].shape, var1.shape)
slot_update[1] = adadelta_opt.get_slot(
var1, "accum_var"
)
self.assertEqual(slot_update[1].shape, var1.shape)
# Fetch params to validate initial values
self.assertAllClose(var0_init, self.evaluate(var0))
self.assertAllClose(var1_init, self.evaluate(var1))
update = [None] * num_updates
tot_update = 0
for step in range(num_updates):
# Run adadelta update for comparison
if not tf.executing_eagerly():
self.evaluate(adadelta_update)
else:
adadelta_opt.apply_gradients(
zip([grads, grads], [var0, var1])
)
# Perform initial update without previous accum values
accum = accum * rho + (grad**2) * (1 - rho)
update[step] = (
np.sqrt(accum_update + epsilon)
* (1.0 / np.sqrt(accum + epsilon))
* grad
)
accum_update = accum_update * rho + (
update[step] ** 2
) * (1.0 - rho)
tot_update += update[step] * lr
if not tf.executing_eagerly():
# Check that the accumulators have been updated
# TODO(lxuechen): This is hard to test in eager mode
for slot_idx in range(2):
self.assertAllCloseAccordingToType(
np.array(
[accum, accum],
dtype=dtype.as_numpy_dtype(0),
),
self.evaluate(slot[slot_idx]),
rtol=1e-5,
)
self.assertAllCloseAccordingToType(
np.array(
[accum_update, accum_update],
dtype=dtype.as_numpy_dtype(0),
),
self.evaluate(slot_update[slot_idx]),
rtol=1e-5,
)
# Check that the parameters have been updated
self.assertAllCloseAccordingToType(
np.array(
[
var0_init[0] - tot_update,
var0_init[1] - tot_update,
],
dtype=dtype.as_numpy_dtype(0),
),
self.evaluate(var0),
rtol=1e-5,
)
self.assertAllCloseAccordingToType(
np.array(
[
var1_init[0] - tot_update,
var1_init[1] - tot_update,
],
dtype=dtype.as_numpy_dtype(0),
),
self.evaluate(var1),
rtol=1e-5,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_resource=True, use_callable_params=True)
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = tf.matmul(
tf.compat.v1.nn.embedding_lookup([var0], [0]), x
)
return pred * pred
sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize(
loss, var_list=[var0]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0]], self.evaluate(var0)
)
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], self.evaluate(var0)
)
def testConstructAdadeltaWithLR(self):
opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.0)
opt_2 = adadelta.Adadelta(
learning_rate=0.1, rho=0.9, epsilon=1.0, lr=1.0
)
opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.0)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt_2.lr, tf.Variable)
self.assertIsInstance(opt_3.lr, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testConstructAdadeltaWithEpsilonValues(self):
opt = adadelta.Adadelta(epsilon=None)
self.assertEqual(opt.epsilon, 1e-7)
opt = adadelta.Adadelta(epsilon=1e-8)
self.assertEqual(opt.epsilon, 1e-8)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/legacy/adadelta_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/adadelta_test.py",
"repo_id": "tf-keras",
"token_count": 5921
} | 193 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
import copy
import itertools
import math
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.optimizers.legacy import rmsprop
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
_DATA_TYPES = [tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128]
_TEST_PARAM_VALUES = [
# learning_rate, rho, momentum, epsilon, centered
[0.05, 0.9, 0.0, 1e-3, True],
[0.05, 0.9, 0.0, 1e-3, False],
[0.1, 0.9, 0.0, 1e-3, True],
[0.01, 0.9, 0.0, 1e-5, True],
[0.01, 0.9, 0.9, 1e-5, True],
]
_TESTPARAMS = [
[data_type] + values
for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
]
class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _rmsprop_update_numpy(
self, var, g, mg, rms, mom, lr, rho, momentum, epsilon, centered
):
rms_t = rms * rho + (1 - rho) * g * g
if centered:
mg_t = mg * rho + (1 - rho) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
if momentum > 0.0:
mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))
var_t = var - mom_t
else:
mom_t = mom
var_t = var - lr * g / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(
self,
var,
gindexs,
gvalues,
mg,
rms,
mom,
lr,
rho,
momentum,
epsilon,
centered,
):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue
if centered:
mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue
denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex]
else:
denom_t = rms_t[gindex]
if momentum > 0.0:
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(
denom_t + epsilon
)
var_t[gindex] = var[gindex] - mom_t[gindex]
else:
mom_t[gindex] = mom[gindex]
var_t[gindex] = var[gindex] - lr * gvalue / (
np.sqrt(denom_t) + epsilon
)
return var_t, mg_t, rms_t, mom_t
def testDense(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for (
dtype,
learning_rate,
rho,
momentum,
epsilon,
centered,
) in _TESTPARAMS:
with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu(): # noqa: E501
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, dtype=dtype)
var1 = tf.Variable(var1_np, dtype=dtype)
grads0 = tf.constant(grads0_np, dtype=dtype)
grads1 = tf.constant(grads1_np, dtype=dtype)
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
mg1 = opt.get_slot(var1, "mg")
else:
mg0 = None
mg1 = None
if momentum > 0.0:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
(
var0_np,
mg0_np,
rms0_np,
mom0_np,
) = self._rmsprop_update_numpy(
var0_np,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
learning_rate,
rho,
momentum,
epsilon,
centered,
)
(
var1_np,
mg1_np,
rms1_np,
mom1_np,
) = self._rmsprop_update_numpy(
var1_np,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
learning_rate,
rho,
momentum,
epsilon,
centered,
)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(
mg0_np, self.evaluate(mg0)
)
self.assertAllCloseAccordingToType(
mg1_np, self.evaluate(mg1)
)
if momentum > 0.0:
self.assertAllCloseAccordingToType(
mom0_np, self.evaluate(mom0)
)
self.assertAllCloseAccordingToType(
mom1_np, self.evaluate(mom1)
)
self.assertAllCloseAccordingToType(
rms0_np, self.evaluate(rms0)
)
self.assertAllCloseAccordingToType(
rms1_np, self.evaluate(rms1)
)
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testDenseWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
decay=decay,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.0:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
lr,
rho,
momentum,
epsilon,
centered,
)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
lr,
rho,
momentum,
epsilon,
centered,
)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.0:
self.assertAllCloseAccordingToType(
mom0_np, self.evaluate(mom0)
)
self.assertAllCloseAccordingToType(
mom1_np, self.evaluate(mom1)
)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDenseWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay
)
opt = rmsprop.RMSprop(
learning_rate=lr_schedule,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.0:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
lr,
rho,
momentum,
epsilon,
centered,
)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
lr,
rho,
momentum,
epsilon,
centered,
)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.0:
self.assertAllCloseAccordingToType(
mom0_np, self.evaluate(mom0)
)
self.assertAllCloseAccordingToType(
mom1_np, self.evaluate(mom1)
)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = tf.matmul(
tf.compat.v1.nn.embedding_lookup([var0], [0]), x
)
return pred * pred
sgd_op = rmsprop.RMSprop(
learning_rate=1.0,
rho=0.0,
momentum=0.0,
epsilon=0.0,
centered=False,
).minimize(loss, var_list=[var0])
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0]], self.evaluate(var0)
)
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType(
[[0.0, 1.0]], self.evaluate(var0), atol=0.01
)
def testMinimizeSparseResourceVariableCentered(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = tf.matmul(
tf.compat.v1.nn.embedding_lookup([var0], [0]), x
)
return pred * pred
# loss = lambda: pred * pred
# disable=cell-var-from-loop
sgd_op = rmsprop.RMSprop(
learning_rate=1.0,
rho=0.0,
momentum=0.0,
epsilon=1.0,
centered=True,
).minimize(loss, var_list=[var0])
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0]], self.evaluate(var0)
)
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], self.evaluate(var0), atol=0.01
)
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for (
dtype,
learning_rate,
rho,
momentum,
epsilon,
centered,
) in _TESTPARAMS:
with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu(): # noqa: E501
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np),
tf.constant(grads0_np_indices),
tf.constant([1]),
)
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np),
tf.constant(grads1_np_indices),
tf.constant([1]),
)
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
else:
mg0 = None
mg1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.0:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
(
var0_np,
mg0_np,
rms0_np,
mom0_np,
) = self._sparse_rmsprop_update_numpy(
var0_np,
grads0_np_indices,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
learning_rate,
rho,
momentum,
epsilon,
centered,
)
(
var1_np,
mg1_np,
rms1_np,
mom1_np,
) = self._sparse_rmsprop_update_numpy(
var1_np,
grads1_np_indices,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
learning_rate,
rho,
momentum,
epsilon,
centered,
)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(
mg0_np, self.evaluate(mg0)
)
self.assertAllCloseAccordingToType(
mg1_np, self.evaluate(mg1)
)
self.assertAllCloseAccordingToType(
rms0_np, self.evaluate(rms0)
)
self.assertAllCloseAccordingToType(
rms1_np, self.evaluate(rms1)
)
if momentum > 0.0:
self.assertAllCloseAccordingToType(
mom0_np, self.evaluate(mom0)
)
self.assertAllCloseAccordingToType(
mom1_np, self.evaluate(mom1)
)
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testCallableParams(self):
for dtype in _DATA_TYPES:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
rho = lambda: 0.9
momentum = lambda: 0.0
epsilon = 1.0
opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array(
[
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),
]
),
self.evaluate(var0),
)
self.assertAllCloseAccordingToType(
np.array(
[
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),
]
),
self.evaluate(var1),
)
# Step 2: the root mean square accumulators contain the previous
# update.
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array(
[
1.0
- (0.1 * 2.0 / math.sqrt(0.001 + 1.0))
- (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),
2.0
- (0.1 * 2.0 / math.sqrt(0.001 + 1.0))
- (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),
]
),
self.evaluate(var0),
)
self.assertAllCloseAccordingToType(
np.array(
[
3.0
- (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))
- (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),
4.0
- (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))
- (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),
]
),
self.evaluate(var1),
)
def testConstructRMSpropWithLR(self):
opt = rmsprop.RMSprop(lr=1.0)
opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0)
opt_3 = rmsprop.RMSprop(learning_rate=0.1)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt_2.lr, tf.Variable)
self.assertIsInstance(opt_3.lr, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testSlotsUniqueEager(self):
v1 = tf.Variable(1.0)
v2 = tf.Variable(1.0)
opt = rmsprop.RMSprop(1.0, momentum=0.0, centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and one unique slot variable for v1 and v2.
self.assertLen(set({id(v) for v in opt.variables()}), 3)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)
)
opt = rmsprop.RMSprop(learning_rate=1.0, momentum=0.2, centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and
# v2.
self.assertLen(set({id(v) for v in opt.variables()}), 5)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)
)
opt = rmsprop.RMSprop(learning_rate=1.0, momentum=0.2, centered=True)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and three unique slot variables for v1 and
# v2
self.assertLen(set({id(v) for v in opt.variables()}), 7)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testMomentumProperValue(self):
with self.assertRaisesRegex(
ValueError,
r"`momentum` must be between \[0, 1\]. "
r"Received: momentum=2.5 \(of type <class "
r"\'float\'>\).",
):
rmsprop.RMSprop(1.0, momentum=2.5, centered=False)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class SlotColocationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([True, False])
@tf_test_utils.run_gpu_only
def testRunMinimizeOnGPUForCPUVariables(self, use_resource):
with tf.device("/device:CPU:0"):
if use_resource:
var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)
var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)
else:
var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)
var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)
def loss():
return 5 * var0 + 3 * var1
opt = rmsprop.RMSprop(
learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0
)
# Fetch params to validate initial values
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step through optimizer on GPU.
# Slot variables are created the first time optimizer is used on some
# variable. This tests that slot variables will be colocated with the
# base variable.
with tf.device("/device:GPU:0"):
# Note that for eager execution, minimize expects a function instead
# of a Tensor.
opt_op = opt.minimize(loss, [var0, var1])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params, All variables should have decreased.
self.assertTrue(
all(v < 0.0 for v in self.evaluate(var0)),
msg=f"updated variables: {self.evaluate(var0)}",
)
self.assertTrue(
all(v < 2.0 for v in self.evaluate(var1)),
msg=f"updated variables: {self.evaluate(var1)}",
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/legacy/rmsprop_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/rmsprop_test.py",
"repo_id": "tf-keras",
"token_count": 19127
} | 194 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SGD optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers import optimizer
from tf_keras.saving.object_registration import register_keras_serializable
# isort: off
from tensorflow.python.util.tf_export import keras_export
@register_keras_serializable()
@keras_export(
"keras.optimizers.SGD",
"keras.optimizers.experimental.SGD",
"keras.dtensor.experimental.optimizers.SGD",
v1=[],
)
class SGD(optimizer.Optimizer):
r"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to 0.001.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations.
Defaults to 0, i.e., vanilla gradient descent.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
{{base_optimizer_keyword_args}}
Usage:
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.1)
>>> var = tf.Variable(1.0)
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> opt.minimize(loss, [var])
>>> # Step is `- learning_rate * grad`
>>> var.numpy()
0.9
>>> opt = tf.keras.optimizers.SGD(0.1, momentum=0.9)
>>> var = tf.Variable(1.0)
>>> val0 = var.value()
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> # First step is `- learning_rate * grad`
>>> opt.minimize(loss, [var])
>>> val1 = var.value()
>>> (val0 - val1).numpy()
0.1
>>> # On later steps, step-size increases because of momentum
>>> opt.minimize(loss, [var])
>>> val2 = var.value()
>>> (val1 - val2).numpy()
0.18
Reference:
- For `nesterov=True`, See [Sutskever et al., 2013](
http://proceedings.mlr.press/v28/sutskever13.pdf).
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=True,
name="SGD",
**kwargs
):
super().__init__(
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
**kwargs
)
self._learning_rate = self._build_learning_rate(learning_rate)
self.momentum = momentum
self.nesterov = nesterov
if isinstance(momentum, (int, float)) and (
momentum < 0 or momentum > 1
):
raise ValueError("`momentum` must be between [0, 1].")
def build(self, var_list):
"""Initialize optimizer variables.
SGD optimizer has one variable `momentums`, only set if `self.momentum`
is not 0.
Args:
var_list: list of model variables to build SGD variables on.
"""
super().build(var_list)
if hasattr(self, "_built") and self._built:
return
self.momentums = []
for var in var_list:
self.momentums.append(
self.add_variable_from_reference(
model_variable=var, variable_name="m"
)
)
self._built = True
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
lr = tf.cast(self.learning_rate, variable.dtype)
m = None
var_key = self._var_key(variable)
momentum = tf.cast(self.momentum, variable.dtype)
m = self.momentums[self._index_dict[var_key]]
# TODO(b/204321487): Add nesterov acceleration.
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients.
add_value = tf.IndexedSlices(
-gradient.values * lr, gradient.indices
)
if m is not None:
m.assign(m * momentum)
m.scatter_add(add_value)
if self.nesterov:
variable.scatter_add(add_value)
variable.assign_add(m * momentum)
else:
variable.assign_add(m)
else:
variable.scatter_add(add_value)
else:
# Dense gradients
if m is not None:
m.assign(-gradient * lr + m * momentum)
if self.nesterov:
variable.assign_add(-gradient * lr + m * momentum)
else:
variable.assign_add(m)
else:
variable.assign_add(-gradient * lr)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
self._learning_rate
),
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
SGD.__doc__ = SGD.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| tf-keras/tf_keras/optimizers/sgd.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/sgd.py",
"repo_id": "tf-keras",
"token_count": 3081
} | 195 |
# Description:
# Contains TF-Keras protobufs
load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//tf_keras:friends",
],
licenses = ["notice"], # Apache 2.0
)
py_proto_library(
name = "saved_metadata_proto_py_pb2",
srcs = ["saved_metadata.proto"],
deps = [":versions_proto_py_pb2"],
)
py_proto_library(
name = "projector_config_proto_py_pb2",
srcs = ["projector_config.proto"],
)
py_proto_library(
name = "versions_proto_py_pb2",
srcs = ["versions.proto"],
)
| tf-keras/tf_keras/protobuf/BUILD/0 | {
"file_path": "tf-keras/tf_keras/protobuf/BUILD",
"repo_id": "tf-keras",
"token_count": 282
} | 196 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Description:
# TF-Keras saving and loading files for SavedModel.
# buildifier: disable=same-origin-load
# Placeholder: load unaliased py_binary
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//tf_keras/layers/rnn:__pkg__",
"//tf_keras/saving:__subpackages__",
],
licenses = ["notice"],
)
py_library(
name = "order_preserving_set",
srcs = ["order_preserving_set.py"],
)
py_library(
name = "utils",
srcs = ["utils.py"],
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/engine:base_layer_utils",
"//tf_keras/utils:layer_utils",
],
)
py_library(
name = "saved_model",
srcs = [
"base_serialization.py",
"constants.py",
"json_utils.py",
"layer_serialization.py",
"load.py",
"metric_serialization.py",
"model_serialization.py",
"network_serialization.py",
"save.py",
"save_impl.py",
"serialized_attributes.py",
],
srcs_version = "PY3",
deps = [
":order_preserving_set",
":utils",
"//:expect_tensorflow_installed",
"//tf_keras/utils:generic_utils",
],
)
tf_py_test(
name = "revive_test",
size = "medium",
srcs = ["revive_test.py"],
python_version = "PY3",
shard_count = 8,
tags = [
"no_windows", # b/158005583
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "json_utils_test",
size = "small",
srcs = ["json_utils_test.py"],
python_version = "PY3",
deps = [
":saved_model",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
tf_py_test(
name = "saved_model_test",
size = "medium",
srcs = ["saved_model_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"no_oss", # TODO(keras-team): Fails in OSS.
"no_pip", # TODO(b/202022379)
"no_rocm",
"no_windows",
"notsan", #TODO(b/181771982): it is flaky
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
py_binary(
name = "create_test_saved_model",
srcs = ["create_test_saved_model.py"],
python_version = "PY3",
srcs_version = "PY3",
tags = ["no_oss"],
deps = [
"//:expect_absl_installed", # absl:app
"//:expect_tensorflow_installed",
"//tf_keras:regularizers",
"//tf_keras/testing_infra:test_utils",
"//third_party/py/absl/flags",
],
)
tf_py_test(
name = "determinism_test",
srcs = ["determinism_test.py"],
data = [":create_test_saved_model.par"],
python_version = "PY3",
tags = ["no_oss"],
deps = [
"//:expect_tensorflow_installed",
],
)
| tf-keras/tf_keras/saving/legacy/saved_model/BUILD/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/BUILD",
"repo_id": "tf-keras",
"token_count": 1862
} | 197 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras legacy SavedModel saving."""
import os
import tensorflow.compat.v2 as tf
from absl import logging
from tf_keras import backend
from tf_keras.protobuf import saved_metadata_pb2
from tf_keras.protobuf import versions_pb2
from tf_keras.saving.legacy import saving_utils
from tf_keras.saving.legacy import serialization
from tf_keras.saving.legacy.saved_model import constants
from tf_keras.saving.legacy.saved_model import save_impl
from tf_keras.saving.legacy.saved_model import utils
from tf_keras.utils.generic_utils import LazyLoader
from tf_keras.utils.io_utils import ask_to_proceed_with_overwrite
# isort: off
from tensorflow.python.saved_model import save as save_lib
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
base_layer = LazyLoader("base_layer", globals(), "tf_keras.engine.base_layer")
training_lib = LazyLoader("training_lib", globals(), "tf_keras.engine.training")
def save(
model,
filepath,
overwrite,
include_optimizer,
signatures=None,
options=None,
save_traces=True,
):
"""Saves a model as a SavedModel to the filepath.
Args:
model: TF-Keras model instance to be saved.
filepath: String path to save the model.
overwrite: whether to overwrite the existing filepath.
include_optimizer: If True, save the model's optimizer state.
signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: (only applies to SavedModel format) `tf.saved_model.SaveOptions`
object that specifies options for saving to SavedModel.
save_traces: (only applies to SavedModel format) When enabled, the
SavedModel will store the function traces for each layer. This
can be disabled, so that only the configs of each layer are stored.
Disabling this will decrease serialization time and file size, but
it requires that all custom layers/models implement a
`get_config()` method. Defaults to `True`.
Raises:
ValueError: if the model's inputs have not been defined.
"""
# If file exists and should not be overwritten.
if not overwrite and os.path.exists(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
if save_traces:
if save_impl.should_skip_serialization(model):
saving_utils.raise_model_input_error(model)
if not include_optimizer:
orig_optimizer = model.optimizer
model.optimizer = None
# TODO(b/180760306) Change to del model.optimizer if Layer's __delattr__
# calls AutoTrackable's __delattr__.
model._delete_tracking("optimizer")
# Trace all functions and signatures with `training=0` instead of using an
# already-set learning phase placeholder.
# This is needed for compatibility reasons until learning phase setting
# is removed from the public apis.
with serialization.SharedObjectSavingScope():
with backend.deprecated_internal_learning_phase_scope(0):
with utils.keras_option_scope(save_traces):
saved_nodes, node_paths = save_lib.save_and_return_nodes(
model, filepath, signatures, options
)
# Save all metadata to a separate file in the SavedModel directory.
metadata = generate_keras_metadata(saved_nodes, node_paths)
with tf.io.gfile.GFile(
tf.io.gfile.join(filepath, constants.SAVED_METADATA_PATH), "wb"
) as w:
w.write(metadata.SerializeToString(deterministic=True))
if not include_optimizer:
model.optimizer = orig_optimizer
def generate_keras_metadata(saved_nodes, node_paths):
"""Constructs a KerasMetadata proto with the metadata of each object."""
metadata = saved_metadata_pb2.SavedMetadata()
for node_id, node in enumerate(saved_nodes):
if isinstance(node, base_layer.Layer):
path = node_paths[node]
if not path:
node_path = "root"
else:
node_path = f"root.{'.'.join([ref.name for ref in path])}"
metadata.nodes.add(
node_id=node_id,
node_path=node_path,
version=versions_pb2.VersionDef(
producer=2, min_consumer=1, bad_consumers=[]
),
identifier=node._object_identifier,
metadata=node._tracking_metadata,
)
# Log warning if the node's class name conflicts with a Keras
# built-in object.
class_name = node.__class__.__name__
from tf_keras.layers import serialization as layers_serialization
builtin_layer = layers_serialization.get_builtin_layer(class_name)
if builtin_layer:
if not isinstance(node, builtin_layer):
logging.warning(
"%s has the same name '%s' as a built-in TF-Keras "
"object. Consider renaming %s to avoid naming "
"conflicts when loading with "
"`tf.keras.models.load_model`. "
"If renaming is not possible, pass "
"the object in the `custom_objects` "
"parameter of the load "
"function.",
node,
class_name,
node.__class__,
)
return metadata
| tf-keras/tf_keras/saving/legacy/saved_model/save.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/save.py",
"repo_id": "tf-keras",
"token_count": 2551
} | 198 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for serialization_lib."""
import json
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.saving import serialization_lib
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.testing_infra import test_utils
def custom_fn(x):
return x**2
class CustomLayer(keras.layers.Layer):
def __init__(self, factor):
super().__init__()
self.factor = factor
def call(self, x):
return x * self.factor
def get_config(self):
return {"factor": self.factor}
class NestedCustomLayer(keras.layers.Layer):
def __init__(self, factor, dense=None, activation=None):
super().__init__()
self.factor = factor
if dense is None:
self.dense = keras.layers.Dense(1, activation=custom_fn)
else:
self.dense = serialization_lib.deserialize_keras_object(dense)
if activation is None:
self.activation = keras.layers.Activation("relu")
else:
self.activation = serialization_lib.deserialize_keras_object(
activation
)
def call(self, x):
return self.dense(x * self.factor)
def get_config(self):
return {
"factor": self.factor,
"dense": self.dense,
"activation": self.activation,
}
class WrapperLayer(keras.layers.Layer):
def __init__(self, layer, **kwargs):
super().__init__(**kwargs)
self.layer = layer
def call(self, x):
return self.layer(x)
def get_config(self):
config = super().get_config()
return {"layer": self.layer, **config}
@test_utils.run_v2_only
class SerializationLibTest(tf.test.TestCase, parameterized.TestCase):
def roundtrip(self, obj, custom_objects=None, safe_mode=True):
serialized = serialization_lib.serialize_keras_object(obj)
json_data = json.dumps(serialized)
json_data = json.loads(json_data)
deserialized = serialization_lib.deserialize_keras_object(
json_data, custom_objects=custom_objects, safe_mode=safe_mode
)
reserialized = serialization_lib.serialize_keras_object(deserialized)
return serialized, deserialized, reserialized
@parameterized.named_parameters(
("str", "hello"),
("bytes", b"hello"),
("nparray_int", np.array([0, 1])),
("nparray_float", np.array([0.0, 1.0])),
("nparray_item", np.float32(1.0)),
("plain_types_list", ["hello", 0, "world", 1.0, True]),
("plain_types_dict", {"1": "hello", "2": 0, "3": True}),
("plain_types_nested_dict", {"1": "hello", "2": [True, False]}),
)
def test_simple_objects(self, obj):
serialized, _, reserialized = self.roundtrip(obj)
self.assertEqual(serialized, reserialized)
def test_builtin_layers(self):
serialized, _, reserialized = self.roundtrip(keras.layers.Dense(3))
self.assertEqual(serialized, reserialized)
def test_tensors_and_tensorshape(self):
x = tf.random.normal((2, 2), dtype="float64")
obj = {"x": x}
_, new_obj, _ = self.roundtrip(obj)
self.assertAllClose(x, new_obj["x"], atol=1e-5)
obj = {"x.shape": x.shape}
_, new_obj, _ = self.roundtrip(obj)
self.assertListEqual(x.shape.as_list(), new_obj["x.shape"])
def test_custom_fn(self):
obj = {"activation": custom_fn}
serialized, _, reserialized = self.roundtrip(
obj, custom_objects={"custom_fn": custom_fn}
)
self.assertEqual(serialized, reserialized)
# Test inside layer
dense = keras.layers.Dense(1, activation=custom_fn)
dense.build((None, 2))
_, new_dense, _ = self.roundtrip(
dense, custom_objects={"custom_fn": custom_fn}
)
x = tf.random.normal((2, 2))
y1 = dense(x)
_ = new_dense(x)
new_dense.set_weights(dense.get_weights())
y2 = new_dense(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_custom_layer(self):
layer = CustomLayer(factor=2)
x = tf.random.normal((2, 2))
y1 = layer(x)
_, new_layer, _ = self.roundtrip(
layer, custom_objects={"CustomLayer": CustomLayer}
)
y2 = new_layer(x)
self.assertAllClose(y1, y2, atol=1e-5)
layer = NestedCustomLayer(factor=2)
x = tf.random.normal((2, 2))
y1 = layer(x)
_, new_layer, _ = self.roundtrip(
layer,
custom_objects={
"NestedCustomLayer": NestedCustomLayer,
"custom_fn": custom_fn,
},
)
_ = new_layer(x)
new_layer.set_weights(layer.get_weights())
y2 = new_layer(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_lambda_fn(self):
obj = {"activation": lambda x: x**2}
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
self.roundtrip(obj, safe_mode=True)
_, new_obj, _ = self.roundtrip(obj, safe_mode=False)
self.assertEqual(obj["activation"](3), new_obj["activation"](3))
def test_lambda_layer(self):
lmbda = keras.layers.Lambda(lambda x: x**2)
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
self.roundtrip(lmbda, safe_mode=True)
_, new_lmbda, _ = self.roundtrip(lmbda, safe_mode=False)
x = tf.random.normal((2, 2))
y1 = lmbda(x)
y2 = new_lmbda(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_safe_mode_scope(self):
lmbda = keras.layers.Lambda(lambda x: x**2)
with serialization_lib.SafeModeScope(safe_mode=True):
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
self.roundtrip(lmbda)
with serialization_lib.SafeModeScope(safe_mode=False):
_, new_lmbda, _ = self.roundtrip(lmbda)
x = tf.random.normal((2, 2))
y1 = lmbda(x)
y2 = new_lmbda(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_tensorspec(self):
inputs = keras.Input(type_spec=tf.TensorSpec((2, 2), tf.float32))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
_, new_model, _ = self.roundtrip(model)
x = tf.random.normal((2, 2))
y1 = model(x)
new_model.set_weights(model.get_weights())
y2 = new_model(x)
self.assertAllClose(y1, y2, atol=1e-5)
def shared_inner_layer(self):
input_1 = keras.Input((2,))
input_2 = keras.Input((2,))
shared_layer = keras.layers.Dense(1)
output_1 = shared_layer(input_1)
wrapper_layer = WrapperLayer(shared_layer)
output_2 = wrapper_layer(input_2)
model = keras.Model([input_1, input_2], [output_1, output_2])
_, new_model, _ = self.roundtrip(
model, custom_objects={"WrapperLayer": WrapperLayer}
)
self.assertIs(model.layers[2], model.layers[3].layer)
self.assertIs(new_model.layers[2], new_model.layers[3].layer)
def test_functional_subclass(self):
class PlainFunctionalSubclass(keras.Model):
pass
inputs = keras.Input((2,))
outputs = keras.layers.Dense(1)(inputs)
model = PlainFunctionalSubclass(inputs, outputs)
x = tf.random.normal((2, 2))
y1 = model(x)
_, new_model, _ = self.roundtrip(
model,
custom_objects={"PlainFunctionalSubclass": PlainFunctionalSubclass},
)
new_model.set_weights(model.get_weights())
y2 = new_model(x)
self.assertAllClose(y1, y2, atol=1e-5)
self.assertIsInstance(new_model, PlainFunctionalSubclass)
class FunctionalSubclassWCustomInit(keras.Model):
def __init__(self, num_units=1, **kwargs):
inputs = keras.Input((2,))
outputs = keras.layers.Dense(num_units)(inputs)
super().__init__(inputs, outputs)
model = FunctionalSubclassWCustomInit(num_units=2)
x = tf.random.normal((2, 2))
y1 = model(x)
_, new_model, _ = self.roundtrip(
model,
custom_objects={
"FunctionalSubclassWCustomInit": FunctionalSubclassWCustomInit
},
)
new_model.set_weights(model.get_weights())
y2 = new_model(x)
self.assertAllClose(y1, y2, atol=1e-5)
self.assertIsInstance(new_model, FunctionalSubclassWCustomInit)
def test_shared_object(self):
class MyLayer(keras.layers.Layer):
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
if isinstance(activation, dict):
self.activation = (
serialization_lib.deserialize_keras_object(activation)
)
else:
self.activation = activation
def call(self, x):
return self.activation(x)
def get_config(self):
config = super().get_config()
config["activation"] = self.activation
return config
class SharedActivation:
def __call__(self, x):
return x**2
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls()
shared_act = SharedActivation()
layer_1 = MyLayer(activation=shared_act)
layer_2 = MyLayer(activation=shared_act)
layers = [layer_1, layer_2]
with serialization_lib.ObjectSharingScope():
serialized, new_layers, reserialized = self.roundtrip(
layers,
custom_objects={
"MyLayer": MyLayer,
"SharedActivation": SharedActivation,
},
)
self.assertIn("shared_object_id", serialized[0]["config"]["activation"])
obj_id = serialized[0]["config"]["activation"]
self.assertIn("shared_object_id", serialized[1]["config"]["activation"])
self.assertEqual(obj_id, serialized[1]["config"]["activation"])
self.assertIs(layers[0].activation, layers[1].activation)
self.assertIs(new_layers[0].activation, new_layers[1].activation)
def test_legacy_internal_object(self):
from tf_keras.layers.rnn.legacy_cells import (
LSTMCell, # pylint: disable=C6204
)
# tf.nn.rnn_cell.LSTMCell belongs to keras.__internal__.legacy namespace
cell = LSTMCell(32)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer="rmsprop", loss="mse")
x_in = np.random.random((3, 5, 5))
y_out_1 = model.predict(x_in)
weights = model.get_weights()
# serialize and deserialize
config = serialization_lib.serialize_keras_object(layer)
layer = serialization_lib.deserialize_keras_object(
config,
custom_objects={"LSTMCell": LSTMCell},
)
# Restore RNN cell into model with weights
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_out_2 = model.predict(x_in)
self.assertAllClose(y_out_1, y_out_2, atol=1e-5)
@keras.utils.register_keras_serializable()
class MyDense(keras.layers.Layer):
def __init__(
self,
units,
*,
kernel_regularizer=None,
kernel_initializer=None,
**kwargs
):
super().__init__(**kwargs)
self._units = units
self._kernel_regularizer = kernel_regularizer
self._kernel_initializer = kernel_initializer
def get_config(self):
return dict(
units=self._units,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
**super().get_config()
)
def build(self, input_shape):
unused_batch_size, input_units = input_shape
self._kernel = self.add_weight(
"kernel",
[input_units, self._units],
dtype=tf.float32,
regularizer=self._kernel_regularizer,
initializer=self._kernel_initializer,
)
def call(self, inputs):
return tf.matmul(inputs, self._kernel)
@keras.utils.register_keras_serializable()
class MyWrapper(keras.layers.Layer):
def __init__(self, wrapped, **kwargs):
super().__init__(**kwargs)
self._wrapped = wrapped
def get_config(self):
return dict(wrapped=self._wrapped, **super().get_config())
@classmethod
def from_config(cls, config):
config["wrapped"] = keras.utils.deserialize_keras_object(
config["wrapped"]
)
return cls(**config)
def call(self, inputs):
return self._wrapped(inputs)
@test_utils.run_v2_only
class JsonSerializationTest(tf.test.TestCase, parameterized.TestCase):
def test_serialize_deserialize_custom_layer_json(self):
reg = keras.regularizers.L2(0.101)
ini = keras.initializers.Constant(1.0)
dense = MyDense(4, kernel_regularizer=reg, kernel_initializer=ini)
inputs = keras.layers.Input(shape=[3])
outputs = dense(inputs)
model = keras.Model(inputs, outputs)
model_json = model.to_json()
model2 = keras.models.model_from_json(model_json)
self.assertEqual(model_json, model2.to_json())
def test_serialize_deserialize_custom_layer_with_wrapper_json(self):
reg = keras.regularizers.L2(0.101)
ini = keras.initializers.Constant(1.0)
dense = MyDense(4, kernel_regularizer=reg, kernel_initializer=ini)
wrapper = MyWrapper(dense)
inputs = keras.layers.Input(shape=[3])
outputs = wrapper(inputs)
model = keras.Model(inputs, outputs)
model_json = model.to_json()
model2 = keras.models.model_from_json(model_json)
self.assertEqual(model_json, model2.to_json())
@test_utils.run_v2_only
class BackwardsCompatibilityTest(tf.test.TestCase, parameterized.TestCase):
def assert_old_format_can_be_deserialized(self, obj, custom_objects=None):
old_config = legacy_serialization.serialize_keras_object(obj)
revived = serialization_lib.deserialize_keras_object(
old_config, custom_objects=custom_objects
)
new_config_1 = serialization_lib.serialize_keras_object(obj)
new_config_2 = serialization_lib.serialize_keras_object(revived)
self.assertEqual(new_config_1, new_config_2)
def test_backwards_compatibility_with_old_serialized_format(self):
optimizer = keras.optimizers.Adam(learning_rate=0.1)
self.assert_old_format_can_be_deserialized(
optimizer, custom_objects=vars(keras.optimizers)
)
activation = keras.activations.relu
self.assert_old_format_can_be_deserialized(
activation, custom_objects=vars(keras.activations)
)
initializer = keras.initializers.VarianceScaling(scale=2.0)
self.assert_old_format_can_be_deserialized(
initializer, custom_objects=vars(keras.initializers)
)
regularizer = keras.regularizers.L2(0.3)
self.assert_old_format_can_be_deserialized(
regularizer, custom_objects=vars(keras.regularizers)
)
constraint = keras.constraints.UnitNorm()
self.assert_old_format_can_be_deserialized(
constraint, custom_objects=vars(keras.constraints)
)
layer = keras.layers.Dense(2)
self.assert_old_format_can_be_deserialized(
layer, custom_objects=vars(keras.layers)
)
layer = keras.layers.MultiHeadAttention(2, 4)
self.assert_old_format_can_be_deserialized(
layer, custom_objects=vars(keras.layers)
)
# Custom objects
layer = CustomLayer(2)
self.assert_old_format_can_be_deserialized(
layer, custom_objects={"CustomLayer": CustomLayer}
)
layer = keras.layers.Dense(1, activation=custom_fn)
self.assert_old_format_can_be_deserialized(
layer, custom_objects={**vars(keras.layers), "custom_fn": custom_fn}
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/saving/serialization_lib_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/serialization_lib_test.py",
"repo_id": "tf-keras",
"token_count": 8026
} | 199 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
# isort: off
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training.saver import (
export_meta_graph,
)
class ConvertVariablesToConstantsTest(tf.test.TestCase):
def _get_tensors(self, sess, tensor_list):
"""Returns a list of Tensor objects from the Session."""
return [
sess.graph.get_tensor_by_name(tensor.name) for tensor in tensor_list
]
def _get_tensor_names(self, tensors):
"""Returns a list of string names for the tensors specified."""
return [tensor.name.split(":")[0] for tensor in tensors]
def _evaluate_graph_def(self, graph_def, inputs, outputs, input_data):
"""Evaluates the GraphDef using Sessions."""
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
sess = tf.compat.v1.Session(graph=graph)
input_tensors = self._get_tensors(sess, inputs)
output_tensors = self._get_tensors(sess, outputs)
return sess.run(
output_tensors, feed_dict=dict(zip(input_tensors, input_data))
)
def _ensure_no_variables_in_graph(self, graph_def):
"""Ensures there are no variables in the graph."""
for node in graph_def.node:
self.assertNotIn(
node.op,
["Variable", "VariableV2", "VarHandleOp", "ReadVariableOp"],
)
def _test_converted_keras_model(
self, model, constant_graph_def, input_data
):
"""Compares the converted TF-Keras model."""
expected_value = model.predict(input_data)
actual_value = self._evaluate_graph_def(
constant_graph_def, model.inputs, model.outputs, [input_data]
)
np.testing.assert_almost_equal(
np.array([expected_value]), actual_value, 5
)
def _inline_functions(self, graph_def, arrays):
meta_graph = export_meta_graph(graph_def=graph_def)
fetch_collection = meta_graph_pb2.CollectionDef()
for name in arrays:
fetch_collection.node_list.value.append(name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function
# inlining.
config = tf.compat.v1.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.optimizers.append("function")
return tf_optimizer.OptimizeGraph(config, meta_graph)
def testWithEmbeddings(self):
"""Freezes a graph with embeddings."""
state_input = keras.layers.Input(
shape=(1,), name="state_input", dtype="int32"
)
output = keras.layers.Embedding(
output_dim=16, input_dim=100, input_length=1, name="state"
)(state_input)
model = keras.models.Model(inputs=[state_input], outputs=[output])
model.compile(
loss={"state": "sparse_categorical_crossentropy"}, optimizer="adam"
)
# Freeze the graph.
sess = keras.backend.get_session()
variable_graph_def = sess.graph_def
output_tensor = self._get_tensor_names(model.outputs)
constant_graph_def = (
tf.compat.v1.graph_util.convert_variables_to_constants(
sess, variable_graph_def, output_tensor
)
)
# Validate converted graph.
input_data = np.array(np.random.random_sample([1, 1]), dtype=np.int32)
self._ensure_no_variables_in_graph(constant_graph_def)
self._test_converted_keras_model(model, constant_graph_def, input_data)
def testKerasBatchNorm(self):
"""Freezes a graph with TF-Keras batch norm."""
inputs = keras.layers.Input(shape=(128, 128, 1))
batch_norm = keras.layers.BatchNormalization()(inputs)
model = keras.models.Model(inputs, batch_norm, name="test")
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
tensor_names = [tensor.name for tensor in model.inputs + model.outputs]
# Freeze the graph.
sess = keras.backend.get_session()
variable_graph_def = sess.graph_def
variable_graph_def = self._inline_functions(
variable_graph_def, tensor_names
)
output_tensor = self._get_tensor_names(model.outputs)
constant_graph_def = (
tf.compat.v1.graph_util.convert_variables_to_constants(
sess, variable_graph_def, output_tensor
)
)
# Validate converted graph.
input_data = np.array(
np.random.random_sample([1, 128, 128, 1]), dtype=np.int32
)
self._ensure_no_variables_in_graph(constant_graph_def)
self._test_converted_keras_model(model, constant_graph_def, input_data)
def testLSTM(self):
"""Freezes a TF-Keras LSTM."""
model = keras.models.Sequential(
[keras.layers.LSTM(units=10, input_shape=(10, 10))]
)
tensor_names = [tensor.name for tensor in model.inputs + model.outputs]
# Freeze the model.
sess = keras.backend.get_session()
variable_graph_def = sess.graph_def
variable_graph_def = self._inline_functions(
variable_graph_def, tensor_names
)
output_tensor = self._get_tensor_names(model.outputs)
constant_graph_def = (
tf.compat.v1.graph_util.convert_variables_to_constants(
sess, variable_graph_def, output_tensor
)
)
# Validate converted graph.
input_data = np.array(
np.random.random_sample([10, 10, 10]), dtype=np.int32
)
self._ensure_no_variables_in_graph(constant_graph_def)
self._test_converted_keras_model(model, constant_graph_def, input_data)
if __name__ == "__main__":
tf.compat.v1.disable_eager_execution()
tf.test.main()
| tf-keras/tf_keras/tests/graph_util_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/graph_util_test.py",
"repo_id": "tf-keras",
"token_count": 2969
} | 200 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object-based saving which use tf.train.* optimizers."""
import functools
import os
import tensorflow.compat.v2 as tf
from tf_keras.engine import training
from tf_keras.layers import core
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.checkpoint import (
checkpoint as trackable_utils,
)
from tensorflow.python.eager import context
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
class NonLayerTrackable(tf.Module):
def __init__(self):
super().__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[]
)
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super().__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class CheckpointingTests(test_combinations.TestCase):
@tf_test_utils.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = tf.constant([[3.0]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should
# not go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
optimizer_step = tf.compat.v1.train.get_or_create_global_step()
root_trackable = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step
)
if tf.executing_eagerly():
optimizer.minimize(
lambda: model(input_value), global_step=optimizer_step
)
optimizer.minimize(
lambda: other_model(input_value), global_step=optimizer_step
)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step
)
optimizer.minimize(
other_model(input_value), global_step=optimizer_step
)
self.evaluate(trackable_utils.gather_initializers(root_trackable))
self.evaluate(train_op)
(
named_variables,
serialized_graph,
_,
) = tf.__internal__.tracking.ObjectGraphView(
root_trackable
).serialize_object_graph()
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names
]
named_variables = {v.name: v for v in named_variables}
self.assertEqual(
len(expected_checkpoint_names), len(named_variables.keys())
)
# Check that we've created the right full_names of objects (not
# exhaustive)
expected_names = {
"optimizer_step" + suffix: "global_step",
"model/_second/kernel" + suffix: "my_model/dense_1/kernel",
"model/_named_dense/kernel" + suffix: "my_model/dense/kernel",
"optimizer/beta1_power" + suffix: "beta1_power",
"optimizer/beta2_power" + suffix: "beta2_power",
}
for nodes in serialized_graph.nodes:
for attribute in nodes.attributes:
expected_name = expected_names.pop(
attribute.checkpoint_key, None
)
if expected_name is not None:
self.assertEqual(expected_name, attribute.full_name)
self.assertEmpty(expected_names)
# Spot check the generated protocol buffers.
self.assertEqual(
"optimizer", serialized_graph.nodes[0].children[1].local_name
)
optimizer_node = serialized_graph.nodes[
serialized_graph.nodes[0].children[1].node_id
]
self.assertEqual("beta1_power", optimizer_node.children[0].local_name)
self.assertEqual(
"beta1_power",
serialized_graph.nodes[optimizer_node.children[0].node_id]
.attributes[0]
.full_name,
)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[
optimizer_node.slot_variables[0].original_variable_node_id
]
.attributes[0]
.full_name,
)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[
optimizer_node.slot_variables[0].slot_variable_node_id
]
.attributes[0]
.full_name,
)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(var=model._named_dense.kernel, name="m").name,
)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0].original_variable_node_id
]
.attributes[0]
.checkpoint_key,
)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0].slot_variable_node_id
]
.attributes[0]
.checkpoint_key,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testSaveRestore(self):
with self.test_session():
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root_trackable = tf.train.Checkpoint(
optimizer=optimizer, model=model
)
input_value = tf.constant([[3.0]])
if tf.executing_eagerly():
optimizer.minimize(lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph
# building.
root_trackable.save_counter
self.evaluate(
trackable_utils.gather_initializers(root_trackable)
)
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(
tf.compat.v1.assign(model._named_dense.variables[1], [42.0])
)
m_bias_slot = optimizer.get_slot(
model._named_dense.variables[1], "m"
)
self.evaluate(tf.compat.v1.assign(m_bias_slot, [1.5]))
save_path = root_trackable.save(file_prefix=prefix)
self.evaluate(
tf.compat.v1.assign(model._named_dense.variables[1], [43.0])
)
self.evaluate(tf.compat.v1.assign(root_trackable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(tf.compat.v1.assign(m_bias_slot, [-2.0]))
# Immediate restoration
status = root_trackable.restore(
save_path=save_path
).assert_consumed()
status.run_restore_ops()
self.assertAllEqual(
[42.0], self.evaluate(model._named_dense.variables[1])
)
self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not tf.executing_eagerly():
# Restore-on-create is only supported when executing eagerly
return
on_create_model = MyModel()
on_create_optimizer = tf.compat.v1.train.AdamOptimizer(
0.001,
# Preserve beta1_power and beta2_power when applying gradients
# so we can test that they've been restored correctly.
beta1=1.0,
beta2=1.0,
)
on_create_root = tf.train.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model
)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(tf.constant([[3.0]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual(
[42.0], self.evaluate(on_create_model._named_dense.variables[1])
)
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m"
)
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(
optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()),
)
dummy_var = tf.Variable([1.0])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_existing_objects_matched()
status.assert_consumed()
(
beta1_power,
beta2_power,
) = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(
optimizer_variables[0], self.evaluate(beta1_power)
)
self.assertAllEqual(
optimizer_variables[1], self.evaluate(beta2_power)
)
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step(),
)
root.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = tf.constant([[3.0]])
optimizer.minimize(
lambda: model(input_value),
global_step=root.optimizer_step,
)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual(
(training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy(),
)
def testEagerDistributionStrategy(self):
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
def _train_fn(optimizer, model, root):
input_value = tf.constant([[3.0]])
optimizer.minimize(
functools.partial(model, input_value),
global_step=root.optimizer_step,
)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
for training_continuation in range(3):
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
root.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
strategy.extended.call_for_each_replica(
functools.partial(_train_fn, optimizer, model, root)
)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual(
(training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy(),
)
def testGraphDistributionStrategy(self):
self.skipTest("b/121381184")
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
def _train_fn(optimizer, model, root):
input_value = tf.constant([[3.0]])
return optimizer.minimize(
functools.partial(model, input_value),
global_step=root.optimizer_step,
)
for training_continuation in range(3):
with tf.Graph().as_default():
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
status = root.restore(
tf.train.latest_checkpoint(checkpoint_directory)
)
train_op = strategy.extended.call_for_each_replica(
functools.partial(_train_fn, optimizer, model, root)
)
with self.session() as session:
if training_continuation > 0:
status.assert_consumed()
status.initialize_or_restore()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual(
(training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy(),
)
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with tf.Graph().as_default():
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root = tf.compat.v1.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
input_value = tf.constant([[3.0]])
train_op = optimizer.minimize(
model(input_value), global_step=root.global_step
)
checkpoint_path = tf.train.latest_checkpoint(
checkpoint_directory
)
with self.session(
graph=tf.compat.v1.get_default_graph()
) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(
file_prefix=checkpoint_prefix, session=session
)
self.assertEqual(
(training_continuation + 1) * num_training_steps,
session.run(root.global_step),
)
self.assertEqual(
training_continuation + 1,
session.run(root.save_counter),
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph()
# creation.
with self.test_session():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
for training_continuation in range(3):
with test_utils.device(should_use_gpu=True):
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
manager = tf.train.CheckpointManager(
root, checkpoint_directory, max_to_keep=1
)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = tf.constant([[3.0]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step,
)
if not tf.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual(
(training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step),
)
self.assertEqual(
training_continuation + 1,
self.evaluate(root.save_counter),
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testWithDefun(self):
with self.test_session():
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with test_utils.device(should_use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = tf.compat.v1.train.AdamOptimizer(0.0)
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(), # noqa: E501
)
checkpoint_path = tf.train.latest_checkpoint(
checkpoint_directory
)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@tf.function
def _call_model(x):
return model(x)
with tf.GradientTape() as tape:
loss = _call_model(tf.constant([[3.0]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(
zip(gradients, model.variables),
global_step=root.global_step,
)
if not tf.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose(
[[42.0]], self.evaluate(model.variables[0])
)
else:
self.evaluate(model.variables[0].assign([[42.0]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual(
(training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step),
)
self.assertEqual(
training_continuation + 1,
self.evaluate(root.save_counter),
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super().__init__()
self.w = tf.Variable(0.0)
self.b = tf.Variable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
model = Model()
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with tf.GradientTape() as tape:
loss = (tf.constant(1.0) - model(tf.constant(1.0))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)]
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_initialize_if_not_restoring(self):
with self.test_session():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with test_utils.device(should_use_gpu=True):
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root = tf.train.Checkpoint(
# Do not save the optimizer with the checkpoint.
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(),
)
optimizer_checkpoint = tf.train.Checkpoint(optimizer=optimizer)
checkpoint_path = tf.train.latest_checkpoint(
checkpoint_directory
)
status = root.restore(save_path=checkpoint_path)
input_value = tf.constant([[3.0]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step,
)
if not tf.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
self.evaluate([v.initializer for v in optimizer.variables()])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.variables()[0].assign(42.0))
optimizer_save_path = optimizer_checkpoint.save(
optimizer_only_prefix
)
# Restore into a graph with the optimizer
with test_utils.device(should_use_gpu=True):
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(),
)
status = root.restore(save_path=model_save_path)
input_value = tf.constant([[3.0]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step,
)
if not tf.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
train_fn()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
# Make sure initialization doesn't clobber later restores
with test_utils.device(should_use_gpu=True):
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001, beta1=1.0)
root = tf.train.Checkpoint(
optimizer=optimizer,
model=model,
global_step=tf.compat.v1.train.get_or_create_global_step(),
)
opt_root = tf.train.Checkpoint(optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(
save_path=optimizer_save_path
)
input_value = tf.constant([[3.0]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step,
)
if not tf.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn()
self.assertEqual(42.0, self.evaluate(optimizer.variables()[0]))
class CheckpointCompatibilityTests(test_combinations.TestCase):
def _initialized_model(self):
input_value = tf.constant([[3.0]])
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
optimizer_step = tf.compat.v1.train.get_or_create_global_step()
root_trackable = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step
)
train_op = optimizer.minimize(
functools.partial(model, input_value), global_step=optimizer_step
)
self.evaluate(trackable_utils.gather_initializers(root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.0]))
self.evaluate(
optimizer.get_slot(var=model._named_dense.bias, name="m").assign(
[2.0]
)
)
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.0))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.0]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m"
).assign([102.0])
)
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.0))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.0], self.evaluate(root_trackable.model._named_dense.bias)
)
self.assertAllEqual(
[2.0],
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m"
)
),
)
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.assertAllEqual(3.0, self.evaluate(beta1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = tf.Graph()
with save_graph.as_default(), self.session(
graph=save_graph
) as session:
root = self._initialized_model()
name_saver = tf.compat.v1.train.Saver()
return name_saver.save(
sess=session,
save_path=checkpoint_prefix,
global_step=root.optimizer_step,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_utils.device(should_use_gpu=True):
with self.test_session():
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = tf.train.Checkpoint(root=root)
self._set_sentinels(root)
status = object_saver.read(save_path)
if tf.executing_eagerly():
self._check_sentinels(root)
if tf.executing_eagerly():
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't
# know whether the restore will be complete.
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.read(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the
# name-based checkpoint.
root.not_in_name_checkpoint = tf.Variable([1.0])
status = object_saver.read(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = tf.Graph()
with save_graph.as_default(), self.session(graph=save_graph):
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with tf.__internal__.eager_context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with tf.__internal__.eager_context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = tf.Graph()
with save_graph.as_default(), self.session(graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| tf-keras/tf_keras/tests/tracking_util_with_v1_optimizers_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/tracking_util_with_v1_optimizers_test.py",
"repo_id": "tf-keras",
"token_count": 18719
} | 201 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for control flow.
This file is copied from tensorflow/python/ops/control_flow_util.py.
"""
import tensorflow.compat.v2 as tf
def InXlaContext(graph):
ctxt = graph._get_control_flow_context()
return GetContainingXLAContext(ctxt) is not None
def GraphOrParentsInXlaContext(graph):
while True:
if InXlaContext(graph):
return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def IsInWhileLoop(op):
ctxt = op._get_control_flow_context()
return GetContainingWhileContext(ctxt) is not None
def GetContainingWhileContext(ctxt, stop_ctxt=None):
"""Returns the first ancestor WhileContext of `ctxt`.
Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
stop_ctxt: ControlFlowContext, optional. If provided, the search will end
if it sees stop_ctxt.
Returns:
`ctxt` if `ctxt` is a WhileContext, the most nested WhileContext
containing `ctxt`, or None if `ctxt` is not in a while loop. If
`stop_ctxt` is not `None`, this returns `ctxt` if it matches `stop_ctxt`
in its traversal.
"""
while ctxt:
if ctxt.IsWhileContext() or ctxt == stop_ctxt:
return ctxt
ctxt = ctxt.outer_context
return None
def GetContainingXLAContext(ctxt):
"""Returns the first ancestor XLAContext of `ctxt`.
Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a
while loop.
Args:
ctxt: ControlFlowContext
Returns:
`ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing
`ctxt`, or None if `ctxt` is not in a while loop.
"""
while ctxt:
if ctxt.IsXLAContext():
return ctxt
ctxt = ctxt.outer_context
return None
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Args:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, tf.Variable):
return tf.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)
return tf.__internal__.smart_cond.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name
)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Args:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
integer 1 or 0.
"""
if isinstance(pred, tf.Tensor):
return tf.get_static_value(pred)
if pred in {0, 1}: # Accept 1/0 as valid boolean values
return bool(pred)
if isinstance(pred, bool):
return pred
if isinstance(pred, tf.Variable):
return None
raise TypeError(
"`pred` must be a Tensor, or a Python bool, or 1 or 0. "
f"Received: {type(pred)}"
)
| tf-keras/tf_keras/utils/control_flow_util.py/0 | {
"file_path": "tf-keras/tf_keras/utils/control_flow_util.py",
"repo_id": "tf-keras",
"token_count": 1647
} | 202 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_utils."""
import io
import os
import pathlib
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import image_utils
@test_utils.run_v2_only
class TestImageUtils(test_combinations.TestCase):
def test_smart_resize(self):
test_input = np.random.random((20, 40, 3))
output = image_utils.smart_resize(test_input, size=(50, 50))
self.assertIsInstance(output, np.ndarray)
self.assertListEqual(list(output.shape), [50, 50, 3])
output = image_utils.smart_resize(test_input, size=(10, 10))
self.assertListEqual(list(output.shape), [10, 10, 3])
output = image_utils.smart_resize(test_input, size=(100, 50))
self.assertListEqual(list(output.shape), [100, 50, 3])
output = image_utils.smart_resize(test_input, size=(5, 15))
self.assertListEqual(list(output.shape), [5, 15, 3])
@parameterized.named_parameters(
("size1", (50, 50)),
("size2", (10, 10)),
("size3", (100, 50)),
("size4", (5, 15)),
)
def test_smart_resize_tf_dataset(self, size):
test_input_np = np.random.random((2, 20, 40, 3))
test_ds = tf.data.Dataset.from_tensor_slices(test_input_np)
resize = lambda img: image_utils.smart_resize(img, size=size)
test_ds = test_ds.map(resize)
for sample in test_ds.as_numpy_iterator():
self.assertIsInstance(sample, np.ndarray)
self.assertListEqual(list(sample.shape), [size[0], size[1], 3])
def test_smart_resize_batch(self):
img = np.random.random((2, 20, 40, 3))
out = image_utils.smart_resize(img, size=(20, 20))
self.assertListEqual(list(out.shape), [2, 20, 20, 3])
self.assertAllClose(out, img[:, :, 10:-10, :])
def test_smart_resize_errors(self):
with self.assertRaisesRegex(ValueError, "a tuple of 2 integers"):
image_utils.smart_resize(
np.random.random((20, 20, 2)), size=(10, 5, 3)
)
with self.assertRaisesRegex(ValueError, "incorrect rank"):
image_utils.smart_resize(np.random.random((2, 4)), size=(10, 5))
with self.assertRaisesRegex(ValueError, "incorrect rank"):
image_utils.smart_resize(
np.random.random((2, 4, 4, 5, 3)), size=(10, 5)
)
@test_utils.run_v2_only
class TestImageLoading(test_combinations.TestCase):
def test_load_img(self):
tmpdir = self.create_tempdir()
filename_rgb = os.path.join(tmpdir.full_path, "rgb_utils.png")
filename_rgba = os.path.join(tmpdir.full_path, "rgba_utils.png")
filename_grayscale_8bit = os.path.join(
tmpdir.full_path, "grayscale_8bit_utils.png"
)
filename_grayscale_16bit = os.path.join(
tmpdir.full_path, "grayscale_16bit_utils.tiff"
)
filename_grayscale_32bit = os.path.join(
tmpdir.full_path, "grayscale_32bit_utils.tiff"
)
original_rgb_array = np.array(
255 * np.random.rand(100, 100, 3), dtype=np.uint8
)
original_rgb = image_utils.array_to_img(original_rgb_array, scale=False)
original_rgb.save(filename_rgb)
original_rgba_array = np.array(
255 * np.random.rand(100, 100, 4), dtype=np.uint8
)
original_rgba = image_utils.array_to_img(
original_rgba_array, scale=False
)
original_rgba.save(filename_rgba)
original_grayscale_8bit_array = np.array(
255 * np.random.rand(100, 100, 1), dtype=np.uint8
)
original_grayscale_8bit = image_utils.array_to_img(
original_grayscale_8bit_array, scale=False
)
original_grayscale_8bit.save(filename_grayscale_8bit)
original_grayscale_16bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)),
dtype=np.int16,
)
original_grayscale_16bit = image_utils.array_to_img(
original_grayscale_16bit_array, scale=False, dtype="int16"
)
original_grayscale_16bit.save(filename_grayscale_16bit)
original_grayscale_32bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)),
dtype=np.int32,
)
original_grayscale_32bit = image_utils.array_to_img(
original_grayscale_32bit_array, scale=False, dtype="int32"
)
original_grayscale_32bit.save(filename_grayscale_32bit)
# Test that loaded image is exactly equal to original.
loaded_im = image_utils.load_img(filename_rgb)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgb_array.shape)
self.assertAllClose(loaded_im_array, original_rgb_array)
loaded_im = image_utils.load_img(filename_rgba, color_mode="rgba")
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgba_array.shape)
self.assertAllClose(loaded_im_array, original_rgba_array)
loaded_im = image_utils.load_img(filename_rgb, color_mode="grayscale")
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(
loaded_im_array.shape,
(original_rgb_array.shape[0], original_rgb_array.shape[1], 1),
)
loaded_im = image_utils.load_img(
filename_grayscale_8bit, color_mode="grayscale"
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(
loaded_im_array.shape, original_grayscale_8bit_array.shape
)
self.assertAllClose(loaded_im_array, original_grayscale_8bit_array)
loaded_im = image_utils.load_img(
filename_grayscale_16bit, color_mode="grayscale"
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int16")
self.assertEqual(
loaded_im_array.shape, original_grayscale_16bit_array.shape
)
self.assertAllClose(loaded_im_array, original_grayscale_16bit_array)
# test casting int16 image to float32
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertAllClose(loaded_im_array, original_grayscale_16bit_array)
loaded_im = image_utils.load_img(
filename_grayscale_32bit, color_mode="grayscale"
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int32")
self.assertEqual(
loaded_im_array.shape, original_grayscale_32bit_array.shape
)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# test casting int32 image to float32
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# Test that nothing is changed when target size is equal to original.
loaded_im = image_utils.load_img(filename_rgb, target_size=(100, 100))
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgb_array.shape)
self.assertAllClose(loaded_im_array, original_rgb_array)
loaded_im = image_utils.load_img(
filename_rgba, color_mode="rgba", target_size=(100, 100)
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, original_rgba_array.shape)
self.assertAllClose(loaded_im_array, original_rgba_array)
loaded_im = image_utils.load_img(
filename_rgb, color_mode="grayscale", target_size=(100, 100)
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(
loaded_im_array.shape,
(original_rgba_array.shape[0], original_rgba_array.shape[1], 1),
)
loaded_im = image_utils.load_img(
filename_grayscale_8bit,
color_mode="grayscale",
target_size=(100, 100),
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(
loaded_im_array.shape, original_grayscale_8bit_array.shape
)
self.assertAllClose(loaded_im_array, original_grayscale_8bit_array)
loaded_im = image_utils.load_img(
filename_grayscale_16bit,
color_mode="grayscale",
target_size=(100, 100),
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int16")
self.assertEqual(
loaded_im_array.shape, original_grayscale_16bit_array.shape
)
self.assertAllClose(loaded_im_array, original_grayscale_16bit_array)
loaded_im = image_utils.load_img(
filename_grayscale_32bit,
color_mode="grayscale",
target_size=(100, 100),
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int32")
self.assertEqual(
loaded_im_array.shape, original_grayscale_32bit_array.shape
)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# Test down-sampling with bilinear interpolation.
loaded_im = image_utils.load_img(filename_rgb, target_size=(25, 25))
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 3))
loaded_im = image_utils.load_img(
filename_rgba, color_mode="rgba", target_size=(25, 25)
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 4))
loaded_im = image_utils.load_img(
filename_rgb, color_mode="grayscale", target_size=(25, 25)
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image_utils.load_img(
filename_grayscale_8bit,
color_mode="grayscale",
target_size=(25, 25),
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image_utils.load_img(
filename_grayscale_16bit,
color_mode="grayscale",
target_size=(25, 25),
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int16")
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image_utils.load_img(
filename_grayscale_32bit,
color_mode="grayscale",
target_size=(25, 25),
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int32")
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
# Test down-sampling with nearest neighbor interpolation.
loaded_im_nearest = image_utils.load_img(
filename_rgb, target_size=(25, 25), interpolation="nearest"
)
loaded_im_array_nearest = image_utils.img_to_array(loaded_im_nearest)
self.assertEqual(loaded_im_array_nearest.shape, (25, 25, 3))
self.assertTrue(np.any(loaded_im_array_nearest != loaded_im_array))
loaded_im_nearest = image_utils.load_img(
filename_rgba,
color_mode="rgba",
target_size=(25, 25),
interpolation="nearest",
)
loaded_im_array_nearest = image_utils.img_to_array(loaded_im_nearest)
self.assertEqual(loaded_im_array_nearest.shape, (25, 25, 4))
self.assertTrue(np.any(loaded_im_array_nearest != loaded_im_array))
loaded_im = image_utils.load_img(
filename_grayscale_8bit,
color_mode="grayscale",
target_size=(25, 25),
interpolation="nearest",
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image_utils.load_img(
filename_grayscale_16bit,
color_mode="grayscale",
target_size=(25, 25),
interpolation="nearest",
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int16")
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
loaded_im = image_utils.load_img(
filename_grayscale_32bit,
color_mode="grayscale",
target_size=(25, 25),
interpolation="nearest",
)
loaded_im_array = image_utils.img_to_array(loaded_im, dtype="int32")
self.assertEqual(loaded_im_array.shape, (25, 25, 1))
# Test different path type
with open(filename_grayscale_32bit, "rb") as f:
path_ = io.BytesIO(f.read()) # io.Bytesio
loaded_im = image_utils.load_img(path_, color_mode="grayscale")
loaded_im_array = image_utils.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
path_ = filename_grayscale_32bit # str
loaded_im = image_utils.load_img(path_, color_mode="grayscale")
loaded_im_array = image_utils.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
path_ = filename_grayscale_32bit.encode() # bytes
loaded_im = image_utils.load_img(path_, color_mode="grayscale")
loaded_im_array = image_utils.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
path_ = pathlib.Path(
os.path.join(tmpdir.full_path, "grayscale_32bit_utils.tiff")
)
loaded_im = image_utils.load_img(path_, color_mode="grayscale")
loaded_im_array = image_utils.img_to_array(loaded_im, dtype=np.int32)
self.assertAllClose(loaded_im_array, original_grayscale_32bit_array)
# Check that exception is raised if interpolation not supported.
loaded_im = image_utils.load_img(
filename_rgb, interpolation="unsupported"
)
with self.assertRaises(ValueError):
loaded_im = image_utils.load_img(
filename_rgb, target_size=(25, 25), interpolation="unsupported"
)
# Check that the aspect ratio of a square is the same
filename_red_square = os.path.join(
tmpdir.full_path, "red_square_utils.png"
)
arr = np.zeros((50, 100, 3), dtype=np.uint8) # rectangle image 100x50
arr[20:30, 45:55, 0] = 255 # red square 10x10
red_square_array = np.array(arr)
red_square = image_utils.array_to_img(red_square_array, scale=False)
red_square.save(filename_red_square)
loaded_im = image_utils.load_img(
filename_red_square, target_size=(25, 25), keep_aspect_ratio=True
)
loaded_im_array = image_utils.img_to_array(loaded_im)
self.assertEqual(loaded_im_array.shape, (25, 25, 3))
red_channel_arr = loaded_im_array[:, :, 0].astype(bool)
square_width = np.sum(np.sum(red_channel_arr, axis=0))
square_height = np.sum(np.sum(red_channel_arr, axis=1))
aspect_ratio_result = square_width / square_height
# original square had 1:1 ratio
self.assertNear(aspect_ratio_result, 1.0, 0.01)
def test_array_to_img_and_img_to_array(self):
height, width = 10, 8
# Test the data format
# Test RGB 3D
x = np.random.random((3, height, width))
img = image_utils.array_to_img(x, data_format="channels_first")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_first")
self.assertEqual(x.shape, (3, height, width))
# Test RGBA 3D
x = np.random.random((4, height, width))
img = image_utils.array_to_img(x, data_format="channels_first")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_first")
self.assertEqual(x.shape, (4, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = image_utils.array_to_img(x, data_format="channels_first")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_first")
self.assertEqual(x.shape, (1, height, width))
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (1, height, width)),
dtype=np.int32,
)
img = image_utils.array_to_img(x, data_format="channels_first")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_first")
self.assertEqual(x.shape, (1, height, width))
# Test tf data format
# Test RGB 3D
x = np.random.random((height, width, 3))
img = image_utils.array_to_img(x, data_format="channels_last")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_last")
self.assertEqual(x.shape, (height, width, 3))
# Test RGBA 3D
x = np.random.random((height, width, 4))
img = image_utils.array_to_img(x, data_format="channels_last")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_last")
self.assertEqual(x.shape, (height, width, 4))
# Test 2D
x = np.random.random((height, width, 1))
img = image_utils.array_to_img(x, data_format="channels_last")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_last")
self.assertEqual(x.shape, (height, width, 1))
# grayscale 16-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int16,
)
img = image_utils.array_to_img(x, data_format="channels_last")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_last")
self.assertEqual(x.shape, (height, width, 1))
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int32,
)
img = image_utils.array_to_img(x, data_format="channels_last")
self.assertEqual(img.size, (width, height))
x = image_utils.img_to_array(img, data_format="channels_last")
self.assertEqual(x.shape, (height, width, 1))
# Test invalid use case
with self.assertRaises(ValueError):
x = np.random.random((height, width)) # not 3D
img = image_utils.array_to_img(x, data_format="channels_first")
with self.assertRaises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = image_utils.array_to_img(x, data_format="channels")
with self.assertRaises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5))
img = image_utils.array_to_img(x, data_format="channels_last")
with self.assertRaises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = image_utils.img_to_array(x, data_format="channels")
with self.assertRaises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5, 3))
img = image_utils.img_to_array(x, data_format="channels_last")
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/image_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/image_utils_test.py",
"repo_id": "tf-keras",
"token_count": 9508
} | 203 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for np_utils."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.testing_infra import test_combinations
from tf_keras.utils import np_utils
NUM_CLASSES = 5
class TestNPUtils(test_combinations.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = np_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorial_without_num_classes(self):
label = [0, 2, 5]
one_hot = np_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES - 1)),
((3,), (3, NUM_CLASSES - 1)),
((4, 3), (4, 3, NUM_CLASSES - 1)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES - 1)),
((3, 1), (3, NUM_CLASSES - 1)),
((3, 2, 1), (3, 2, NUM_CLASSES - 1)),
]
)
def test_to_ordinal(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
ordinal = np_utils.to_ordinal(label, NUM_CLASSES)
# Check shape
self.assertEqual(ordinal.shape, expected_shape)
# Make sure all the values are either 0 or 1
self.assertTrue(np.all(np.logical_or(ordinal == 0, ordinal == 1)))
# Get original labels back from ordinal matrix
self.assertTrue(
np.all(ordinal.cumprod(-1).sum(-1).reshape(label.shape) == label)
)
def test_to_ordinal_without_num_classes(self):
label = [0, 2, 5]
one_hot = np_utils.to_ordinal(label)
self.assertEqual(one_hot.shape, (3, 5))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/np_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/np_utils_test.py",
"repo_id": "tf-keras",
"token_count": 1326
} | 204 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to TF-Keras exception stack trace prettifying."""
import inspect
import os
import sys
import traceback
import types
import tensorflow.compat.v2 as tf
_EXCLUDED_PATHS = (
os.path.abspath(os.path.join(__file__, "..", "..")),
os.path.join("tensorflow", "python"),
)
def include_frame(fname):
for exclusion in _EXCLUDED_PATHS:
if exclusion in fname:
return False
return True
def _process_traceback_frames(tb):
"""Iterate through traceback frames and return a new, filtered traceback."""
last_tb = None
tb_list = list(traceback.walk_tb(tb))
for f, line_no in reversed(tb_list):
if include_frame(f.f_code.co_filename):
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
if last_tb is None and tb_list:
# If no frames were kept during filtering, create a new traceback
# from the outermost function.
f, line_no = tb_list[-1]
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
return last_tb
def filter_traceback(fn):
"""Filter out Keras-internal stack trace frames in exceptions raised by
fn."""
if sys.version_info.major != 3 or sys.version_info.minor < 7:
return fn
def error_handler(*args, **kwargs):
if not tf.debugging.is_traceback_filtering_enabled():
return fn(*args, **kwargs)
filtered_tb = None
try:
return fn(*args, **kwargs)
except Exception as e:
filtered_tb = _process_traceback_frames(e.__traceback__)
# To get the full stack trace, call:
# `tf.debugging.disable_traceback_filtering()`
raise e.with_traceback(filtered_tb) from None
finally:
del filtered_tb
return tf.__internal__.decorator.make_decorator(fn, error_handler)
def inject_argument_info_in_traceback(fn, object_name=None):
"""Add information about call argument values to an error message.
Arguments:
fn: Function to wrap. Exceptions raised by the this function will be
re-raised with additional information added to the error message,
displaying the values of the different arguments that the function
was called with.
object_name: String, display name of the class/function being called,
e.g. `'layer "layer_name" (LayerClass)'`.
Returns:
A wrapped version of `fn`.
"""
def error_handler(*args, **kwargs):
signature = None
bound_signature = None
try:
return fn(*args, **kwargs)
except Exception as e:
if hasattr(e, "_keras_call_info_injected"):
# Only inject info for the innermost failing call
raise e
signature = inspect.signature(fn)
try:
# The first argument is `self`, so filter it out
bound_signature = signature.bind(*args, **kwargs)
except TypeError:
# Likely unbindable arguments
raise e
# Add argument context
arguments_context = []
for arg in list(signature.parameters.values()):
if arg.name in bound_signature.arguments:
value = tf.nest.map_structure(
format_argument_value,
bound_signature.arguments[arg.name],
)
else:
value = arg.default
arguments_context.append(f" β’ {arg.name}={value}")
if arguments_context:
arguments_context = "\n".join(arguments_context)
# Get original error message and append information to it.
if isinstance(e, tf.errors.OpError):
message = e.message
elif e.args:
# Canonically, the 1st argument in an exception is the error
# message. This works for all built-in Python exceptions.
message = e.args[0]
else:
message = ""
display_name = f"{object_name if object_name else fn.__name__}"
message = (
f"Exception encountered when calling {display_name}.\n\n"
f"{message}\n\n"
f"Call arguments received by {display_name}:\n"
f"{arguments_context}"
)
# Reraise exception, with added context
if isinstance(e, tf.errors.OpError):
new_e = e.__class__(e.node_def, e.op, message, e.error_code)
else:
try:
# For standard exceptions such as ValueError, TypeError,
# etc.
new_e = e.__class__(message)
except TypeError:
# For any custom error that doesn't have a standard
# signature.
new_e = RuntimeError(message)
new_e._keras_call_info_injected = True
else:
new_e = e
raise new_e.with_traceback(e.__traceback__) from None
finally:
del signature
del bound_signature
return tf.__internal__.decorator.make_decorator(fn, error_handler)
def format_argument_value(value):
if isinstance(value, tf.Tensor):
# Simplified representation for eager / graph tensors
# to keep messages readable
return f"tf.Tensor(shape={value.shape}, dtype={value.dtype.name})"
return repr(value)
| tf-keras/tf_keras/utils/traceback_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/traceback_utils.py",
"repo_id": "tf-keras",
"token_count": 2848
} | 205 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import List
from typing import Optional
from typing import Type
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
from autokeras import blocks
from autokeras import graph as graph_module
from autokeras import pipeline
from autokeras import tuners
from autokeras.engine import head as head_module
from autokeras.engine import node as node_module
from autokeras.engine import tuner
from autokeras.nodes import Input
from autokeras.utils import data_utils
from autokeras.utils import utils
TUNER_CLASSES = {
"bayesian": tuners.BayesianOptimization,
"random": tuners.RandomSearch,
"hyperband": tuners.Hyperband,
"greedy": tuners.Greedy,
}
def get_tuner_class(tuner):
if isinstance(tuner, str) and tuner in TUNER_CLASSES:
return TUNER_CLASSES.get(tuner)
else:
raise ValueError(
'Expected the tuner argument to be one of "greedy", '
'"random", "hyperband", or "bayesian", '
"but got {tuner}".format(tuner=tuner)
)
class AutoModel(object):
"""A Model defined by inputs and outputs.
AutoModel combines a HyperModel and a Tuner to tune the HyperModel.
The user can use it in a similar way to a Keras model since it
also has `fit()` and `predict()` methods.
The AutoModel has two use cases. In the first case, the user only specifies
the input nodes and output heads of the AutoModel. The AutoModel infers the
rest part of the model. In the second case, user can specify the high-level
architecture of the AutoModel by connecting the Blocks with the functional
API, which is the same as the Keras
[functional API](https://www.tensorflow.org/guide/keras/functional).
# Example
```python
# The user only specifies the input nodes and output heads.
import autokeras as ak
ak.AutoModel(
inputs=[ak.ImageInput(), ak.TextInput()],
outputs=[ak.ClassificationHead(), ak.RegressionHead()]
)
```
```python
# The user specifies the high-level architecture.
import autokeras as ak
image_input = ak.ImageInput()
image_output = ak.ImageBlock()(image_input)
text_input = ak.TextInput()
text_output = ak.TextBlock()(text_input)
output = ak.Merge()([image_output, text_output])
classification_output = ak.ClassificationHead()(output)
regression_output = ak.RegressionHead()(output)
ak.AutoModel(
inputs=[image_input, text_input],
outputs=[classification_output, regression_output]
)
```
# Arguments
inputs: A list of Node instances.
The input node(s) of the AutoModel.
outputs: A list of Node or Head instances.
The output node(s) or head(s) of the AutoModel.
project_name: String. The name of the AutoModel. Defaults to
'auto_model'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to
100.
directory: String. The path to a directory for storing the search
outputs. Defaults to None, which would create a folder with the
name of the AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
tuner: String or subclass of AutoTuner. If string, it should be one of
'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a
subclass of AutoTuner. Defaults to 'greedy'.
overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing
project of the same name if one is found. Otherwise, overwrites the
project.
seed: Int. Random seed.
max_model_size: Int. Maximum number of scalars in the parameters of a
model. Models larger than this are rejected.
**kwargs: Any arguments supported by keras_tuner.Tuner.
"""
def __init__(
self,
inputs: Union[Input, List[Input]],
outputs: Union[head_module.Head, node_module.Node, list],
project_name: str = "auto_model",
max_trials: int = 100,
directory: Union[str, Path, None] = None,
objective: str = "val_loss",
tuner: Union[str, Type[tuner.AutoTuner]] = "greedy",
overwrite: bool = False,
seed: Optional[int] = None,
max_model_size: Optional[int] = None,
**kwargs
):
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
self.seed = seed
if seed:
np.random.seed(seed)
tf.random.set_seed(seed)
# TODO: Support passing a tuner instance.
# Initialize the hyper_graph.
graph = self._build_graph()
if isinstance(tuner, str):
tuner = get_tuner_class(tuner)
self.tuner = tuner(
hypermodel=graph,
overwrite=overwrite,
objective=objective,
max_trials=max_trials,
directory=directory,
seed=self.seed,
project_name=project_name,
max_model_size=max_model_size,
**kwargs
)
self.overwrite = overwrite
self._heads = [output_node.in_blocks[0] for output_node in self.outputs]
@property
def objective(self):
return self.tuner.objective
@property
def max_trials(self):
return self.tuner.max_trials
@property
def directory(self):
return self.tuner.directory
@property
def project_name(self):
return self.tuner.project_name
def _assemble(self):
"""Assemble the Blocks based on the input output nodes."""
inputs = nest.flatten(self.inputs)
outputs = nest.flatten(self.outputs)
middle_nodes = [
input_node.get_block()(input_node) for input_node in inputs
]
# Merge the middle nodes.
if len(middle_nodes) > 1:
output_node = blocks.Merge()(middle_nodes)
else:
output_node = middle_nodes[0]
outputs = nest.flatten(
[output_blocks(output_node) for output_blocks in outputs]
)
return graph_module.Graph(inputs=inputs, outputs=outputs)
def _build_graph(self):
# Using functional API.
if all(
[isinstance(output, node_module.Node) for output in self.outputs]
):
graph = graph_module.Graph(inputs=self.inputs, outputs=self.outputs)
# Using input/output API.
elif all(
[isinstance(output, head_module.Head) for output in self.outputs]
):
# Clear session to reset get_uid(). The names of the blocks will
# start to count from 1 for new blocks in a new AutoModel
# afterwards. When initializing multiple AutoModel with Task API,
# if not counting from 1 for each of the AutoModel, the predefined
# hp values in task specifiec tuners would not match the names.
keras.backend.clear_session()
graph = self._assemble()
self.outputs = graph.outputs
keras.backend.clear_session()
return graph
def fit(
self,
x=None,
y=None,
batch_size=32,
epochs=None,
callbacks=None,
validation_split=0.2,
validation_data=None,
verbose=1,
**kwargs
):
"""Search for the best model and hyperparameters for the AutoModel.
It will search for the best model based on the performances on
validation data.
# Arguments
x: numpy.ndarray or tensorflow.Dataset. Training data x.
y: numpy.ndarray or tensorflow.Dataset. Training data y.
batch_size: Int. Number of samples per gradient update. Defaults to
32.
epochs: Int. The number of epochs to train each model during the
search. If unspecified, by default we train for a maximum of
1000 epochs, but we stop training if the validation loss stops
improving for 10 epochs (unless you specified an EarlyStopping
callback as part of the callbacks argument, in which case the
EarlyStopping callback you specified will determine early
stopping).
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1. Defaults to 0.2.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model
metrics on this data at the end of each epoch. The validation
data is selected from the last samples in the `x` and `y` data
provided, before shuffling. This argument is not supported when
`x` is a dataset. The best model found would be fit on the
entire dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar,
2 = one line per epoch. Note that the progress bar is not
particularly useful when logged to a file, so verbose=2 is
recommended when not running interactively (eg, in a production
environment). Controls the verbosity of both KerasTuner search
and
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit)
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
# Returns
history: A Keras History object corresponding to the best model.
Its History.history attribute is a record of training
loss values and metrics values at successive epochs, as well as
validation loss values and validation metrics values (if
applicable).
"""
# Check validation information.
if not validation_data and not validation_split:
raise ValueError(
"Either validation_data or a non-zero validation_split "
"should be provided."
)
if validation_data:
validation_split = 0
dataset, validation_data = self._convert_to_dataset(
x=x, y=y, validation_data=validation_data, batch_size=batch_size
)
self._analyze_data(dataset)
self._build_hyper_pipeline(dataset)
# Split the data with validation_split.
if validation_data is None and validation_split:
dataset, validation_data = data_utils.split_dataset(
dataset, validation_split
)
history = self.tuner.search(
x=dataset,
epochs=epochs,
callbacks=callbacks,
validation_data=validation_data,
validation_split=validation_split,
verbose=verbose,
**kwargs
)
return history
def _adapt(self, dataset, hms, batch_size):
if isinstance(dataset, tf.data.Dataset):
sources = data_utils.unzip_dataset(dataset)
else:
sources = nest.flatten(dataset)
adapted = []
for source, hm in zip(sources, hms):
source = hm.get_adapter().adapt(source, batch_size)
adapted.append(source)
if len(adapted) == 1:
return adapted[0]
return tf.data.Dataset.zip(tuple(adapted))
def _check_data_format(self, dataset, validation=False, predict=False):
"""Check if the dataset has the same number of IOs with the model."""
if validation:
in_val = " in validation_data"
if isinstance(dataset, tf.data.Dataset):
x = dataset
y = None
else:
x, y = dataset
else:
in_val = ""
x, y = dataset
if isinstance(x, tf.data.Dataset) and y is not None:
raise ValueError(
"Expected y to be None when x is "
"tf.data.Dataset{in_val}.".format(in_val=in_val)
)
if isinstance(x, tf.data.Dataset):
if not predict:
x_shapes, y_shapes = data_utils.dataset_shape(x)
x_shapes = nest.flatten(x_shapes)
y_shapes = nest.flatten(y_shapes)
else:
x_shapes = nest.flatten(data_utils.dataset_shape(x))
else:
x_shapes = [a.shape for a in nest.flatten(x)]
if not predict:
y_shapes = [a.shape for a in nest.flatten(y)]
if len(x_shapes) != len(self.inputs):
raise ValueError(
"Expected x{in_val} to have {input_num} arrays, "
"but got {data_num}".format(
in_val=in_val,
input_num=len(self.inputs),
data_num=len(x_shapes),
)
)
if not predict and len(y_shapes) != len(self.outputs):
raise ValueError(
"Expected y{in_val} to have {output_num} arrays, "
"but got {data_num}".format(
in_val=in_val,
output_num=len(self.outputs),
data_num=len(y_shapes),
)
)
def _analyze_data(self, dataset):
input_analysers = [node.get_analyser() for node in self.inputs]
output_analysers = [head.get_analyser() for head in self._heads]
analysers = input_analysers + output_analysers
for x, y in dataset:
x = nest.flatten(x)
y = nest.flatten(y)
for item, analyser in zip(x + y, analysers):
analyser.update(item)
for analyser in analysers:
analyser.finalize()
for hm, analyser in zip(self.inputs + self._heads, analysers):
hm.config_from_analyser(analyser)
def _build_hyper_pipeline(self, dataset):
self.tuner.hyper_pipeline = pipeline.HyperPipeline(
inputs=[node.get_hyper_preprocessors() for node in self.inputs],
outputs=[head.get_hyper_preprocessors() for head in self._heads],
)
self.tuner.hypermodel.hyper_pipeline = self.tuner.hyper_pipeline
def _convert_to_dataset(self, x, y, validation_data, batch_size):
"""Convert the data to tf.data.Dataset."""
# TODO: Handle other types of input, zip dataset, tensor, dict.
# Convert training data.
self._check_data_format((x, y))
if isinstance(x, tf.data.Dataset):
dataset = x
x = dataset.map(lambda x, y: x)
y = dataset.map(lambda x, y: y)
x = self._adapt(x, self.inputs, batch_size)
y = self._adapt(y, self._heads, batch_size)
dataset = tf.data.Dataset.zip((x, y))
# Convert validation data
if validation_data:
self._check_data_format(validation_data, validation=True)
if isinstance(validation_data, tf.data.Dataset):
x = validation_data.map(lambda x, y: x)
y = validation_data.map(lambda x, y: y)
else:
x, y = validation_data
x = self._adapt(x, self.inputs, batch_size)
y = self._adapt(y, self._heads, batch_size)
validation_data = tf.data.Dataset.zip((x, y))
return dataset, validation_data
def _has_y(self, dataset):
"""Remove y from the tf.data.Dataset if exists."""
shapes = data_utils.dataset_shape(dataset)
# Only one or less element in the first level.
if len(shapes) <= 1:
return False
# The first level has more than 1 element.
# The nest has 2 levels.
for shape in shapes:
if isinstance(shape, tuple):
return True
# The nest has one level.
# It matches the single IO case.
return (
len(shapes) == 2
and len(self.inputs) == 1
and len(self.outputs) == 1
)
def predict(self, x, batch_size=32, verbose=1, **kwargs):
"""Predict the output for a given testing data.
# Arguments
x: Any allowed types according to the input node. Testing data.
batch_size: Number of samples per batch.
If unspecified, batch_size will default to 32.
verbose: Verbosity mode. 0 = silent, 1 = progress bar.
Controls the verbosity of
[keras.Model.predict](https://tensorflow.org/api_docs/python/tf/keras/Model#predict)
**kwargs: Any arguments supported by keras.Model.predict.
# Returns
A list of numpy.ndarray objects or a single numpy.ndarray.
The predicted results.
"""
if isinstance(x, tf.data.Dataset) and self._has_y(x):
x = x.map(lambda x, y: x)
self._check_data_format((x, None), predict=True)
dataset = self._adapt(x, self.inputs, batch_size)
pipeline = self.tuner.get_best_pipeline()
model = self.tuner.get_best_model()
dataset = pipeline.transform_x(dataset)
dataset = tf.data.Dataset.zip((dataset, dataset))
y = model.predict(dataset, **kwargs)
y = utils.predict_with_adaptive_batch_size(
model=model,
batch_size=batch_size,
x=dataset,
verbose=verbose,
**kwargs
)
return pipeline.postprocess(y)
def evaluate(self, x, y=None, batch_size=32, verbose=1, **kwargs):
"""Evaluate the best model for the given data.
# Arguments
x: Any allowed types according to the input node. Testing data.
y: Any allowed types according to the head. Testing targets.
Defaults to None.
batch_size: Number of samples per batch.
If unspecified, batch_size will default to 32.
verbose: Verbosity mode. 0 = silent, 1 = progress bar.
Controls the verbosity of
[keras.Model.evaluate](http://tensorflow.org/api_docs/python/tf/keras/Model#evaluate)
**kwargs: Any arguments supported by keras.Model.evaluate.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs and/or
metrics). The attribute model.metrics_names will give you the
display labels for the scalar outputs.
"""
self._check_data_format((x, y))
if isinstance(x, tf.data.Dataset):
dataset = x
x = dataset.map(lambda x, y: x)
y = dataset.map(lambda x, y: y)
x = self._adapt(x, self.inputs, batch_size)
y = self._adapt(y, self._heads, batch_size)
dataset = tf.data.Dataset.zip((x, y))
pipeline = self.tuner.get_best_pipeline()
dataset = pipeline.transform(dataset)
model = self.tuner.get_best_model()
return utils.evaluate_with_adaptive_batch_size(
model=model,
batch_size=batch_size,
x=dataset,
verbose=verbose,
**kwargs
)
def export_model(self):
"""Export the best Keras Model.
# Returns
keras.Model instance. The best model found during the search, loaded
with trained weights.
"""
return self.tuner.get_best_model()
| autokeras/autokeras/auto_model.py/0 | {
"file_path": "autokeras/autokeras/auto_model.py",
"repo_id": "autokeras",
"token_count": 9331
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import keras_tuner
import pytest
import autokeras as ak
from autokeras import graph as graph_module
def test_input_output_disconnect():
input_node1 = ak.Input()
output_node = input_node1
_ = ak.DenseBlock()(output_node)
input_node = ak.Input()
output_node = input_node
output_node = ak.DenseBlock()(output_node)
output_node = ak.RegressionHead()(output_node)
with pytest.raises(ValueError) as info:
graph_module.Graph(inputs=input_node1, outputs=output_node)
assert "Inputs and outputs not connected." in str(info.value)
def test_hyper_graph_cycle():
input_node1 = ak.Input()
input_node2 = ak.Input()
output_node1 = ak.DenseBlock()(input_node1)
output_node2 = ak.DenseBlock()(input_node2)
output_node = ak.Merge()([output_node1, output_node2])
head = ak.RegressionHead()
output_node = head(output_node)
head.outputs = output_node1
with pytest.raises(ValueError) as info:
graph_module.Graph(
inputs=[input_node1, input_node2], outputs=output_node
)
assert "The network has a cycle." in str(info.value)
def test_input_missing():
input_node1 = ak.Input()
input_node2 = ak.Input()
output_node1 = ak.DenseBlock()(input_node1)
output_node2 = ak.DenseBlock()(input_node2)
output_node = ak.Merge()([output_node1, output_node2])
output_node = ak.RegressionHead()(output_node)
with pytest.raises(ValueError) as info:
graph_module.Graph(inputs=input_node1, outputs=output_node)
assert "A required input is missing for HyperModel" in str(info.value)
def test_graph_basics():
input_node = ak.Input(shape=(30,))
output_node = input_node
output_node = ak.DenseBlock()(output_node)
output_node = ak.RegressionHead(shape=(1,))(output_node)
model = graph_module.Graph(inputs=input_node, outputs=output_node).build(
keras_tuner.HyperParameters()
)
assert model.input_shape == (None, 30)
assert model.output_shape == (None, 1)
def test_adamw_optimizer():
input_node = ak.Input(shape=(30,))
output_node = input_node
output_node = ak.DenseBlock()(output_node)
output_node = ak.RegressionHead(shape=(1,))(output_node)
hp = keras_tuner.HyperParameters()
hp.Choice("optimizer", ["adam", "sgd", "adam_weight_decay"], default="adam")
hp.values["optimizer"] = "adam_weight_decay"
graph = graph_module.Graph(inputs=input_node, outputs=output_node)
graph.num_samples = 10000
graph.inputs[0].batch_size = 32
graph.epochs = 10
model = graph.build(hp)
assert model.input_shape == (None, 30)
assert model.output_shape == (None, 1)
def test_graph_save_load(tmp_path):
input1 = ak.Input()
input2 = ak.Input()
output1 = ak.DenseBlock()(input1)
output2 = ak.ConvBlock()(input2)
output = ak.Merge()([output1, output2])
output1 = ak.RegressionHead()(output)
output2 = ak.ClassificationHead()(output)
graph = graph_module.Graph(
inputs=[input1, input2],
outputs=[output1, output2],
)
path = os.path.join(tmp_path, "graph")
graph.save(path)
graph = graph_module.load_graph(path)
assert len(graph.inputs) == 2
assert len(graph.outputs) == 2
assert isinstance(graph.inputs[0].out_blocks[0], ak.DenseBlock)
assert isinstance(graph.inputs[1].out_blocks[0], ak.ConvBlock)
def test_merge():
input_node1 = ak.Input(shape=(30,))
input_node2 = ak.Input(shape=(40,))
output_node1 = ak.DenseBlock()(input_node1)
output_node2 = ak.DenseBlock()(input_node2)
output_node = ak.Merge()([output_node1, output_node2])
output_node = ak.RegressionHead(shape=(1,))(output_node)
model = graph_module.Graph(
inputs=[input_node1, input_node2], outputs=output_node
).build(keras_tuner.HyperParameters())
assert model.input_shape == [(None, 30), (None, 40)]
assert model.output_shape == (None, 1)
def test_save_custom_metrics_loss(tmp_path):
def custom_metric(y_pred, y_true):
return 1
def custom_loss(y_pred, y_true):
return y_pred - y_true
head = ak.ClassificationHead(
loss=custom_loss, metrics=["accuracy", custom_metric]
)
input_node = ak.Input()
output_node = head(input_node)
graph = graph_module.Graph(input_node, output_node)
path = os.path.join(tmp_path, "graph")
graph.save(path)
new_graph = graph_module.load_graph(
path,
custom_objects={
"custom_metric": custom_metric,
"custom_loss": custom_loss,
},
)
assert new_graph.blocks[0].metrics[1](0, 0) == 1
assert new_graph.blocks[0].loss(3, 2) == 1
def test_cat_to_num_with_img_input_error():
input_node = ak.ImageInput()
output_node = ak.CategoricalToNumerical()(input_node)
with pytest.raises(TypeError) as info:
graph_module.Graph(input_node, outputs=output_node).compile()
assert "CategoricalToNumerical can only be used" in str(info.value)
def test_graph_can_init_with_one_missing_output():
input_node = ak.ImageInput()
output_node = ak.ConvBlock()(input_node)
output_node = ak.RegressionHead()(output_node)
ak.ClassificationHead()(output_node)
graph_module.Graph(input_node, output_node)
| autokeras/autokeras/graph_test.py/0 | {
"file_path": "autokeras/autokeras/graph_test.py",
"repo_id": "autokeras",
"token_count": 2292
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from autokeras.engine import preprocessor
class Encoder(preprocessor.TargetPreprocessor):
"""Transform labels to encodings.
# Arguments
labels: A list of labels of any type. The labels to be encoded.
"""
def __init__(self, labels, **kwargs):
super().__init__(**kwargs)
self.labels = [
label.decode("utf-8") if isinstance(label, bytes) else str(label)
for label in labels
]
def get_config(self):
return {"labels": self.labels}
def fit(self, dataset):
return
def transform(self, dataset):
"""Transform labels to integer encodings.
# Arguments
dataset: tf.data.Dataset. The dataset to be transformed.
# Returns
tf.data.Dataset. The transformed dataset.
"""
keys_tensor = tf.constant(self.labels)
vals_tensor = tf.constant(list(range(len(self.labels))))
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1
)
return dataset.map(lambda x: table.lookup(tf.reshape(x, [-1])))
class OneHotEncoder(Encoder):
def transform(self, dataset):
"""Transform labels to one-hot encodings.
# Arguments
dataset: tf.data.Dataset. The dataset to be transformed.
# Returns
tf.data.Dataset. The transformed dataset.
"""
dataset = super().transform(dataset)
eye = tf.eye(len(self.labels))
dataset = dataset.map(lambda x: tf.nn.embedding_lookup(eye, x))
return dataset
def postprocess(self, data):
"""Transform probabilities back to labels.
# Arguments
data: numpy.ndarray. The output probabilities of the classification
head.
# Returns
numpy.ndarray. The original labels.
"""
return np.array(
list(
map(
lambda x: self.labels[x],
np.argmax(np.array(data), axis=1),
)
)
).reshape(-1, 1)
class LabelEncoder(Encoder):
"""Transform the labels to integer encodings."""
def transform(self, dataset):
"""Transform labels to integer encodings.
# Arguments
dataset: tf.data.Dataset. The dataset to be transformed.
# Returns
tf.data.Dataset. The transformed dataset.
"""
dataset = super().transform(dataset)
dataset = dataset.map(lambda x: tf.expand_dims(x, axis=-1))
return dataset
def postprocess(self, data):
"""Transform probabilities back to labels.
# Arguments
data: numpy.ndarray. The output probabilities of the classification
head.
# Returns
numpy.ndarray. The original labels.
"""
return np.array(
list(map(lambda x: self.labels[int(round(x[0]))], np.array(data)))
).reshape(-1, 1)
| autokeras/autokeras/preprocessors/encoders.py/0 | {
"file_path": "autokeras/autokeras/preprocessors/encoders.py",
"repo_id": "autokeras",
"token_count": 1524
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
import tensorflow as tf
from tensorflow import keras
from autokeras import auto_model
from autokeras import blocks
from autokeras import nodes as input_module
from autokeras.engine import tuner
from autokeras.tuners import greedy
from autokeras.tuners import task_specific
from autokeras.utils import types
class SupervisedImagePipeline(auto_model.AutoModel):
def __init__(self, outputs, **kwargs):
super().__init__(
inputs=input_module.ImageInput(), outputs=outputs, **kwargs
)
class ImageClassifier(SupervisedImagePipeline):
"""AutoKeras image classification class.
# Arguments
num_classes: Int. Defaults to None. If None, it will be inferred from
the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
project_name: String. The name of the AutoModel.
Defaults to 'image_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to
100.
directory: String. The path to a directory for storing the search
outputs. Defaults to None, which would create a folder with the
name of the AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
tuner: String or subclass of AutoTuner. If string, it should be one of
'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a
subclass of AutoTuner. If left unspecified, it uses a task specific
tuner, which first evaluates the most commonly used models for the
task before exploring other models.
overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing
project of the same name if one is found. Otherwise, overwrites the
project.
seed: Int. Random seed.
max_model_size: Int. Maximum number of scalars in the parameters of a
model. Models larger than this are rejected.
**kwargs: Any arguments supported by AutoModel.
"""
def __init__(
self,
num_classes: Optional[int] = None,
multi_label: bool = False,
loss: types.LossType = None,
metrics: Optional[types.MetricsType] = None,
project_name: str = "image_classifier",
max_trials: int = 100,
directory: Union[str, Path, None] = None,
objective: str = "val_loss",
tuner: Union[str, Type[tuner.AutoTuner]] = None,
overwrite: bool = False,
seed: Optional[int] = None,
max_model_size: Optional[int] = None,
**kwargs
):
if tuner is None:
tuner = task_specific.ImageClassifierTuner
super().__init__(
outputs=blocks.ClassificationHead(
num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics,
),
max_trials=max_trials,
directory=directory,
project_name=project_name,
objective=objective,
tuner=tuner,
overwrite=overwrite,
seed=seed,
max_model_size=max_model_size,
**kwargs
)
def fit(
self,
x: Optional[types.DatasetType] = None,
y: Optional[types.DatasetType] = None,
epochs: Optional[int] = None,
callbacks: Optional[List[keras.callbacks.Callback]] = None,
validation_split: Optional[float] = 0.2,
validation_data: Union[
tf.data.Dataset, Tuple[types.DatasetType, types.DatasetType], None
] = None,
**kwargs
):
"""Search for the best model and hyperparameters for the AutoModel.
It will search for the best model based on the performances on
validation data.
# Arguments
x: numpy.ndarray or tensorflow.Dataset. Training data x. The shape
of the data should be (samples, width, height) or (samples,
width, height, channels).
y: numpy.ndarray or tensorflow.Dataset. Training data y. It can be
raw labels, one-hot encoded if more than two classes, or binary
encoded for binary classification.
epochs: Int. The number of epochs to train each model during the
search. If unspecified, by default we train for a maximum of
1000 epochs, but we stop training if the validation loss stops
improving for 10 epochs (unless you specified an EarlyStopping
callback as part of the callbacks argument, in which case the
EarlyStopping callback you specified will determine early
stopping).
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire dataset
including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
# Returns
history: A Keras History object corresponding to the best model.
Its History.history attribute is a record of training loss
values and metrics values at successive epochs, as well as
validation loss values and validation metrics values (if
applicable).
"""
history = super().fit(
x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs
)
return history
class ImageRegressor(SupervisedImagePipeline):
"""AutoKeras image regression class.
# Arguments
output_dim: Int. The number of output dimensions. Defaults to None.
If None, it will be inferred from the data.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
project_name: String. The name of the AutoModel.
Defaults to 'image_regressor'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to
100.
directory: String. The path to a directory for storing the search
outputs. Defaults to None, which would create a folder with the
name of the AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
tuner: String or subclass of AutoTuner. If string, it should be one of
'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a
subclass of AutoTuner. If left unspecified, it uses a task specific
tuner, which first evaluates the most commonly used models for the
task before exploring other models.
overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing
project of the same name if one is found. Otherwise, overwrites the
project.
seed: Int. Random seed.
max_model_size: Int. Maximum number of scalars in the parameters of a
model. Models larger than this are rejected.
**kwargs: Any arguments supported by AutoModel.
"""
def __init__(
self,
output_dim: Optional[int] = None,
loss: types.LossType = "mean_squared_error",
metrics: Optional[types.MetricsType] = None,
project_name: str = "image_regressor",
max_trials: int = 100,
directory: Union[str, Path, None] = None,
objective: str = "val_loss",
tuner: Union[str, Type[tuner.AutoTuner]] = None,
overwrite: bool = False,
seed: Optional[int] = None,
max_model_size: Optional[int] = None,
**kwargs
):
if tuner is None:
tuner = greedy.Greedy
super().__init__(
outputs=blocks.RegressionHead(
output_dim=output_dim, loss=loss, metrics=metrics
),
max_trials=max_trials,
directory=directory,
project_name=project_name,
objective=objective,
tuner=tuner,
overwrite=overwrite,
seed=seed,
max_model_size=max_model_size,
**kwargs
)
def fit(
self,
x: Optional[types.DatasetType] = None,
y: Optional[types.DatasetType] = None,
epochs: Optional[int] = None,
callbacks: Optional[List[keras.callbacks.Callback]] = None,
validation_split: Optional[float] = 0.2,
validation_data: Union[
types.DatasetType, Tuple[types.DatasetType], None
] = None,
**kwargs
):
"""Search for the best model and hyperparameters for the AutoModel.
It will search for the best model based on the performances on
validation data.
# Arguments
x: numpy.ndarray or tensorflow.Dataset. Training data x. The shape
of the data should be (samples, width, height) or (samples,
width, height, channels).
y: numpy.ndarray or tensorflow.Dataset. Training data y. The targets
passing to the head would have to be tf.data.Dataset,
np.ndarray, pd.DataFrame or pd.Series. It can be single-column
or multi-column. The values should all be numerical.
epochs: Int. The number of epochs to train each model during the
search. If unspecified, by default we train for a maximum of
1000 epochs, but we stop training if the validation loss stops
improving for 10 epochs (unless you specified an EarlyStopping
callback as part of the callbacks argument, in which case the
EarlyStopping callback you specified will determine early
stopping).
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire
dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
# Returns
history: A Keras History object corresponding to the best model.
Its History.history attribute is a record of training
loss values and metrics values at successive epochs, as well as
validation loss values and validation metrics values (if
applicable).
"""
history = super().fit(
x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs
)
return history
class ImageSegmenter(SupervisedImagePipeline):
"""AutoKeras image segmentation class.
# Arguments
num_classes: Int. Defaults to None. If None, it will be inferred from
the data.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of metrics used to measure the accuracy of the model,
default to 'accuracy'.
project_name: String. The name of the AutoModel.
Defaults to 'image_segmenter'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to
100.
directory: String. The path to a directory for storing the search
outputs. Defaults to None, which would create a folder with the
name of the AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
tuner: String or subclass of AutoTuner. If string, it should be one of
'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a
subclass of AutoTuner. If left unspecified, it uses a task specific
tuner, which first evaluates the most commonly used models for the
task before exploring other models.
overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing
project of the same name if one is found. Otherwise, overwrites the
project.
seed: Int. Random seed.
**kwargs: Any arguments supported by AutoModel.
"""
def __init__(
self,
num_classes: Optional[int] = None,
loss: types.LossType = None,
metrics: Optional[types.MetricsType] = None,
project_name: str = "image_segmenter",
max_trials: int = 100,
directory: Union[str, Path, None] = None,
objective: str = "val_loss",
tuner: Union[str, Type[tuner.AutoTuner]] = None,
overwrite: bool = False,
seed: Optional[int] = None,
**kwargs
):
if tuner is None:
tuner = greedy.Greedy
super().__init__(
outputs=blocks.SegmentationHead(
num_classes=num_classes, loss=loss, metrics=metrics
),
max_trials=max_trials,
directory=directory,
project_name=project_name,
objective=objective,
tuner=tuner,
overwrite=overwrite,
seed=seed,
**kwargs
)
def fit(
self,
x: Optional[types.DatasetType] = None,
y: Optional[types.DatasetType] = None,
epochs: Optional[int] = None,
callbacks: Optional[List[keras.callbacks.Callback]] = None,
validation_split: Optional[float] = 0.2,
validation_data: Union[
types.DatasetType, Tuple[types.DatasetType], None
] = None,
**kwargs
):
"""Search for the best model and hyperparameters for the AutoModel.
It will search for the best model based on the performances on
validation data.
# Arguments
x: numpy.ndarray or tensorflow.Dataset. Training image dataset x.
The shape of the data should be (samples, width, height) or
(samples, width, height, channels).
y: numpy.ndarray or tensorflow.Dataset. Training image data set y.
It should be a tensor and the height and width should be the
same as x. Each element in the tensor is the label of the
corresponding pixel.
epochs: Int. The number of epochs to train each model during the
search. If unspecified, by default we train for a maximum of
1000 epochs, but we stop training if the validation loss stops
improving for 10 epochs (unless you specified an EarlyStopping
callback as part of the callbacks argument, in which case the
EarlyStopping callback you specified will determine early
stopping).
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire
dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
# Returns
history: A Keras History object corresponding to the best model.
Its History.history attribute is a record of training
loss values and metrics values at successive epochs, as well as
validation loss values and validation metrics values (if
applicable).
"""
history = super().fit(
x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs
)
return history
class ImageObjectDetector(SupervisedImagePipeline):
"""AutoKeras image object detector class.
# Arguments
num_classes: Int. Defaults to None. If None, it will be inferred from
the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
project_name: String. The name of the AutoModel.
Defaults to 'image_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to
100.
directory: String. The path to a directory for storing the search
outputs. Defaults to None, which would create a folder with the
name of the AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
tuner: String or subclass of AutoTuner. If string, it should be one of
'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a
subclass of AutoTuner. If left unspecified, it uses a task specific
tuner, which first evaluates the most commonly used models for the
task before exploring other models.
overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing
project of the same name if one is found. Otherwise, overwrites the
project.
seed: Int. Random seed.
max_model_size: Int. Maximum number of scalars in the parameters of a
model. Models larger than this are rejected.
**kwargs: Any arguments supported by AutoModel.
"""
def __init__(
self,
num_classes: Optional[int] = None,
multi_label: bool = False,
loss: types.LossType = None,
metrics: Optional[types.MetricsType] = None,
project_name: str = "image_classifier",
max_trials: int = 100,
directory: Union[str, Path, None] = None,
objective: str = "val_loss",
tuner: Union[str, Type[tuner.AutoTuner]] = None,
overwrite: bool = False,
seed: Optional[int] = None,
max_model_size: Optional[int] = None,
**kwargs
):
pass # pragma: no cover
def fit(
self,
x: Optional[types.DatasetType] = None,
y: Optional[types.DatasetType] = None,
epochs: Optional[int] = None,
callbacks: Optional[List[keras.callbacks.Callback]] = None,
validation_split: Optional[float] = 0.2,
validation_data: Union[
tf.data.Dataset, Tuple[types.DatasetType, types.DatasetType], None
] = None,
**kwargs
):
"""Search for the best model and hyperparameters for the AutoModel.
It will search for the best model based on the performances on
validation data.
# Arguments
x: numpy.ndarray or tensorflow.Dataset. Training data x. The shape
of the data should be (samples, width, height) or (samples,
width, height, channels). If it's a tensorflow.Dataset only x is
used, and each sample has an image, and corresponding (bboxes,
classIDs).
y: numpy.ndarray. Training data y. They are the
tuples of bounding boxes and their corresponding class IDs
w.r.t. the images in x. Each bounding box is defined by 4
values [ymin, xmin, ymax, xmax]. Box coordinates are measured
from top left image corner, are 0-indexed and proportional to
sides i.e. between [0,1]. Shape of the bounding boxes should be
(None, 4), and shape of the classIDs should be (None,) in each
tuple, where None represents the number of bounding boxes in a
single image.
epochs: Int. The number of epochs to train each model during the
search. If unspecified, by default we train for a maximum of
1000 epochs, but we stop training if the validation loss stops
improving for 10 epochs (unless you specified an EarlyStopping
callback as part of the callbacks argument, in which case the
EarlyStopping callback you specified will determine early
stopping).
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire
dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
"""
pass # pragma: no cover
def predict(self, x, **kwargs):
"""Predict the output for a given testing data.
# Arguments
x: numpy.ndarray or tensorflow.Dataset. Testing data x. The shape of
the data should be (samples, width, height) or (samples, width,
height, channels).
**kwargs: Any arguments supported by keras.Model.predict.
# Returns
labels: [batch_size, 3] shaped tensor containing tuples of (bboxes,
classIDs, scores) for each image in the testing data x, where
each bounding box is defined by 4 values [ymin, xmin, ymax,
xmax]. Box coordinates are measured from top left image corner,
are 0-indexed and proportional to sides i.e. between [0,1].
Shape of the bounding boxes should be (None, 4), and shape of
the classIDs should be (None,) in each tuple, where None
represents the number of bounding boxes detected in an image.
The scores denote the probability with which a class is detected
in the corresponding bounding box.
"""
pass # pragma: no cover
| autokeras/autokeras/tasks/image.py/0 | {
"file_path": "autokeras/autokeras/tasks/image.py",
"repo_id": "autokeras",
"token_count": 11395
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import timeit
class Experiment(object):
def __init__(self, name, tmp_dir="tmp_dir"):
self.name = name
self.tmp_dir = tmp_dir
def get_auto_model(self):
raise NotImplementedError
@staticmethod
def load_data():
raise NotImplementedError
def run_once(self):
(x_train, y_train), (x_test, y_test) = self.load_data()
auto_model = self.get_auto_model()
start_time = timeit.default_timer()
auto_model.fit(x_train, y_train)
stop_time = timeit.default_timer()
accuracy = auto_model.evaluate(x_test, y_test)[1]
total_time = stop_time - start_time
return total_time, accuracy
def run(self, repeat_times=1):
total_times = []
metric_values = []
for i in range(repeat_times):
total_time, metric = self.run_once()
total_times.append(total_time)
metric_values.append(metric)
self.tear_down()
return total_times, metric_values
def tear_down(self):
shutil.rmtree(self.tmp_dir)
| autokeras/benchmark/experiments/experiment.py/0 | {
"file_path": "autokeras/benchmark/experiments/experiment.py",
"repo_id": "autokeras",
"token_count": 653
} | 4 |
<jupyter_start><jupyter_code>!pip install autokeras
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import load_model
import autokeras as ak<jupyter_output><empty_output><jupyter_text>You can easily export your model the best model found by AutoKeras as a Keras Model.The following example uses [ImageClassifier](/image_classifier) as an example.All the tasks and the [AutoModel](/auto_model/automodel-class) has this[export_model](/auto_model/export_model-method) function.<jupyter_code>print(tf.__version__)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Initialize the image classifier.
clf = ak.ImageClassifier(
overwrite=True, max_trials=1
) # Try only 1 model.(Increase accordingly)
# Feed the image classifier with training data.
clf.fit(x_train, y_train, epochs=1) # Change no of epochs to improve the model
# Export as a Keras Model.
model = clf.export_model()
print(type(model)) # <class 'tensorflow.python.keras.engine.training.Model'>
try:
model.save("model_autokeras", save_format="tf")
except Exception:
model.save("model_autokeras.h5")
loaded_model = load_model("model_autokeras", custom_objects=ak.CUSTOM_OBJECTS)
predicted_y = loaded_model.predict(tf.expand_dims(x_test, -1))
print(predicted_y)<jupyter_output><empty_output> | autokeras/docs/ipynb/export.ipynb/0 | {
"file_path": "autokeras/docs/ipynb/export.ipynb",
"repo_id": "autokeras",
"token_count": 469
} | 5 |
import importlib
import inspect
import os
import re
def count_leading_spaces(s):
ws = re.search(r"\S", s)
if ws:
return ws.start()
else:
return 0
def insert_in_file(markdown_text, file_path):
"""Save module page.
Either insert content into existing page,
or create page otherwise."""
if file_path.exists():
template = file_path.read_text(encoding="utf-8")
if "{{autogenerated}}" not in template:
raise RuntimeError(
f"Template found for {file_path} but missing "
f"{{autogenerated}} tag."
)
markdown_text = template.replace("{{autogenerated}}", markdown_text)
print("...inserting autogenerated content into template:", file_path)
else:
print("...creating new page with autogenerated content:", file_path)
os.makedirs(file_path.parent, exist_ok=True)
file_path.write_text(markdown_text, encoding="utf-8")
def code_snippet(snippet):
return f"```python\n{snippet}\n```\n"
def make_source_link(cls, project_url):
if isinstance(project_url, dict):
base_module = cls.__module__.split(".")[0]
project_url = project_url[base_module]
path = cls.__module__.replace(".", "/")
line = inspect.getsourcelines(cls)[-1]
return (
f'<span style="float:right;">'
f"[[source]]({project_url}/{path}.py#L{line})"
f"</span>"
)
def format_classes_list(classes, page_name):
for i in range(len(classes)):
if not isinstance(classes[i], (list, tuple)):
classes[i] = (classes[i], [])
for class_, class_methods in classes:
if not inspect.isclass(class_):
# TODO: add a test for this
raise TypeError(
f"{class_} was given in the class list "
f"of {page_name} but {class_} is not a Python class."
)
return classes
def get_class_from_method(meth):
"""See
https://stackoverflow.com/questions/3589311/
get-defining-class-of-unbound-method-object-in-python-3/
25959545#25959545
"""
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(
inspect.getmodule(meth),
meth.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0],
)
if isinstance(cls, type):
return cls
return getattr(
meth, "__objclass__", None
) # handle special descriptor objects
def ismethod(function):
return get_class_from_method(function) is not None
def import_object(string: str):
"""Import an object from a string.
The object can be a function, class or method.
For example: `'keras.layers.Dense.get_weights'` is valid.
"""
last_object_got = None
seen_names = []
for name in string.split("."):
seen_names.append(name)
try:
last_object_got = importlib.import_module(".".join(seen_names))
except ModuleNotFoundError:
last_object_got = getattr(last_object_got, name)
return last_object_got
def get_type(object_) -> str:
if inspect.isclass(object_):
return "class"
elif ismethod(object_):
return "method"
elif inspect.isfunction(object_):
return "function"
else:
raise TypeError(
f"{object_} is detected as neither a class, a method nor"
f"a function."
)
def insert_in_string(target, string_to_insert, start, end):
target_start_cut = target[:start]
target_end_cut = target[end:]
return target_start_cut + string_to_insert + target_end_cut
def remove_indentation(string):
string = string.replace("\n ", "\n")
if string[:4] == " ":
string = string[4:]
return string
def get_dotted_path(class_):
return f"{class_.__module__}.{class_.__qualname__}"
| autokeras/docs/keras_autodoc/utils.py/0 | {
"file_path": "autokeras/docs/keras_autodoc/utils.py",
"repo_id": "autokeras",
"token_count": 1792
} | 6 |
# Benchmarks
We track the performance of the latest AutoKeras release on the benchmark datasets.
Tested on a single NVIDIA Tesla V100 GPU.
| Name | API | Metric | Results | GPU Days |
| - | - | - | - | - |
| [MNIST](http://yann.lecun.com/exdb/mnist/) | ImageClassifier| Accuracy | 99.04% | 0.51 |
| [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) | ImageClassifier| Accuracy | 97.10% | 1.8 |
| [IMDB](https://ai.stanford.edu/~amaas/data/sentiment/) | TextClassifier | Accuracy | 93.93% | 1.2 |
| [Titanic](https://www.tensorflow.org/datasets/catalog/titanic) | StructuredDataClassifier | Accuracy | 82.20% | 0.007 |
| [California Housing](https://scikit-learn.org/stable/datasets/index.html#california-housing-dataset) | StructuredDataRegression | MSE | 0.23 | 0.06 | | autokeras/docs/templates/benchmarks.md/0 | {
"file_path": "autokeras/docs/templates/benchmarks.md",
"repo_id": "autokeras",
"token_count": 290
} | 7 |
"""
Regression tasks estimate a numeric variable, such as the price of a house or
voter turnout.
This example is adapted from a
[notebook](https://gist.github.com/mapmeld/98d1e9839f2d1f9c4ee197953661ed07)
which estimates a person's age from their image, trained on the
[IMDB-WIKI](https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/) photographs
of famous
people.
First, prepare your image data in a numpy.ndarray or tensorflow.Dataset format.
Each image must have the same shape, meaning each has the same width, height,
and color channels as other images in the set.
"""
from datetime import datetime
from datetime import timedelta
import numpy as np
import pandas as pd
import tensorflow as tf
from google.colab import drive
from PIL import Image
from scipy.io import loadmat
import autokeras as ak
"""
### Connect your Google Drive for Data
"""
drive.mount("/content/drive")
"""
### Install AutoKeras and TensorFlow
Download the master branch to your Google Drive for this tutorial. In general,
you can use *pip install autokeras* .
"""
"""shell
!pip install -v "/content/drive/My Drive/AutoKeras-dev/autokeras-master.zip"
!pip uninstall keras-tuner
!pip install
git+git://github.com/keras-team/keras-tuner.git@d2d69cba21a0b482a85ce2a38893e2322e139c01
"""
"""shell
!pip install tensorflow==2.2.0
"""
"""
###**Import IMDB Celeb images and metadata**
"""
"""shell
!mkdir "./drive/My Drive/mlin/celebs"
"""
"""shell
! wget -O "./drive/My Drive/mlin/celebs/imdb_0.tar"
https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/imdb_0.tar
"""
"""shell
! cd "./drive/My Drive/mlin/celebs" && tar -xf imdb_0.tar
! rm "./drive/My Drive/mlin/celebs/imdb_0.tar"
"""
"""
Uncomment and run the below cell if you need to re-run the cells again and
above don't need to install everything from the beginning.
"""
# ! cd ./drive/My\ Drive/mlin/celebs.
"""shell
! ls "./drive/My Drive/mlin/celebs/imdb/"
"""
"""shell
! wget https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/imdb_meta.tar
! tar -xf imdb_meta.tar
! rm imdb_meta.tar
"""
"""
###**Converting from MATLAB date to actual Date-of-Birth**
"""
def datenum_to_datetime(datenum):
"""
Convert Matlab datenum into Python datetime.
"""
days = datenum % 1
hours = days % 1 * 24
minutes = hours % 1 * 60
seconds = minutes % 1 * 60
try:
return (
datetime.fromordinal(int(datenum))
+ timedelta(days=int(days))
+ timedelta(hours=int(hours))
+ timedelta(minutes=int(minutes))
+ timedelta(seconds=round(seconds))
- timedelta(days=366)
)
except Exception:
return datenum_to_datetime(700000)
print(datenum_to_datetime(734963))
"""
### **Opening MatLab file to Pandas DataFrame**
"""
x = loadmat("imdb/imdb.mat")
mdata = x["imdb"] # variable in mat file
mdtype = mdata.dtype # dtypes of structures are "unsized objects"
ndata = {n: mdata[n][0, 0] for n in mdtype.names}
columns = [n for n, v in ndata.items()]
rows = []
for col in range(0, 10):
values = list(ndata.items())[col]
for num, val in enumerate(values[1][0], start=0):
if col == 0:
rows.append([])
if num > 0:
if columns[col] == "dob":
rows[num].append(datenum_to_datetime(int(val)))
elif columns[col] == "photo_taken":
rows[num].append(datetime(year=int(val), month=6, day=30))
else:
rows[num].append(val)
dt = map(lambda row: np.array(row), np.array(rows[1:]))
df = pd.DataFrame(data=dt, index=range(0, len(rows) - 1), columns=columns)
print(df.head())
print(columns)
print(df["full_path"])
"""
### **Calculating age at time photo was taken**
"""
df["age"] = (df["photo_taken"] - df["dob"]).astype("int") / 31558102e9
print(df["age"])
"""
### **Creating dataset**
* We sample 200 of the images which were included in this first download.
* Images are resized to 128x128 to standardize shape and conserve memory
* RGB images are converted to grayscale to standardize shape
* Ages are converted to ints
"""
def df2numpy(train_set):
images = []
for img_path in train_set["full_path"]:
img = (
Image.open("./drive/My Drive/mlin/celebs/imdb/" + img_path[0])
.resize((128, 128))
.convert("L")
)
images.append(np.asarray(img, dtype="int32"))
image_inputs = np.array(images)
ages = train_set["age"].astype("int").to_numpy()
return image_inputs, ages
train_set = df[df["full_path"] < "02"].sample(200)
train_imgs, train_ages = df2numpy(train_set)
test_set = df[df["full_path"] < "02"].sample(100)
test_imgs, test_ages = df2numpy(test_set)
"""
### **Training using AutoKeras**
"""
# Initialize the image regressor
reg = ak.ImageRegressor(max_trials=15) # AutoKeras tries 15 different models.
# Find the best model for the given training data
reg.fit(train_imgs, train_ages)
# Predict with the chosen model:
# predict_y = reg.predict(test_images) # Uncomment if required
# Evaluate the chosen model with testing data
print(reg.evaluate(train_imgs, train_ages))
"""
### **Validation Data**
By default, AutoKeras use the last 20% of training data as validation data. As
shown in the example below, you can use validation_split to specify the
percentage.
"""
reg.fit(
train_imgs,
train_ages,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=3,
)
"""
You can also use your own validation set instead of splitting it from the
training data with validation_data.
"""
split = 460000
x_val = train_imgs[split:]
y_val = train_ages[split:]
x_train = train_imgs[:split]
y_train = train_ages[:split]
reg.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=3,
)
"""
### **Customized Search Space**
For advanced users, you may customize your search space by using AutoModel
instead of ImageRegressor. You can configure the ImageBlock for some high-level
configurations, e.g., block_type for the type of neural network to search,
normalize for whether to do data normalization, augment for whether to do data
augmentation. You can also choose not to specify these arguments, which would
leave the different choices to be tuned automatically. See the following
example for detail.
"""
input_node = ak.ImageInput()
output_node = ak.ImageBlock(
# Only search ResNet architectures.
block_type="resnet",
# Normalize the dataset.
normalize=True,
# Do not do data augmentation.
augment=False,
)(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10)
reg.fit(x_train, y_train, epochs=3)
"""
The usage of AutoModel is similar to the functional API of Keras. Basically, you
are building a graph, whose edges are blocks and the nodes are intermediate
outputs of blocks. To add an edge from input_node to output_node with
output_node = ak.some_block(input_node). You can even also use more fine
grained blocks to customize the search space even further. See the following
example.
"""
input_node = ak.ImageInput()
output_node = ak.Normalization()(input_node)
output_node = ak.ImageAugmentation(translation_factor=0.3)(output_node)
output_node = ak.ResNetBlock(version="v2")(output_node)
output_node = ak.RegressionHead()(output_node)
clf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10)
clf.fit(x_train, y_train, epochs=3)
"""
### **Data Format**
"""
"""
The AutoKeras ImageClassifier is quite flexible for the data format.
For the image, it accepts data formats both with and without the channel
dimension. The images in the IMDB-Wiki dataset do not have a channel dimension.
Each image is a matrix with shape (128, 128). AutoKeras also accepts images
with a channel dimension at last, e.g., (32, 32, 3), (28, 28, 1).
For the classification labels, AutoKeras accepts both plain labels, i.e.
strings or integers, and one-hot encoded labels, i.e. vectors of 0s and 1s.
So if you prepare your data in the following way, the ImageClassifier should
still work.
"""
# Reshape the images to have the channel dimension.
train_imgs = train_imgs.reshape(train_imgs.shape + (1,))
test_imgs = test_imgs.reshape(test_imgs.shape + (1,))
print(train_imgs.shape) # (200, 128, 128, 1)
print(test_imgs.shape) # (100, 128, 128, 1)
print(train_ages[:3])
"""
We also support using tf.data.Dataset format for the training data. In this
case, the images would have to be 3-dimentional. The labels have to be one-hot
encoded for multi-class classification to be wrapped into tensorflow Dataset.
"""
train_set = tf.data.Dataset.from_tensor_slices(((train_imgs,), (train_ages,)))
test_set = tf.data.Dataset.from_tensor_slices(((test_imgs,), (test_ages,)))
reg = ak.ImageRegressor(max_trials=15)
# Feed the tensorflow Dataset to the classifier.
reg.fit(train_set)
# Predict with the best model.
predicted_y = clf.predict(test_set)
# Evaluate the best model with testing data.
print(clf.evaluate(test_set))
"""
## References
[Main Reference
Notebook](https://gist.github.com/mapmeld/98d1e9839f2d1f9c4ee197953661ed07),
[Dataset](https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/),
[ImageRegressor](/image_regressor),
[ResNetBlock](/block/#resnetblock-class),
[ImageInput](/node/#imageinput-class),
[AutoModel](/auto_model/#automodel-class),
[ImageBlock](/block/#imageblock-class),
[Normalization](/preprocessor/#normalization-class),
[ImageAugmentation](/preprocessor/#image-augmentation-class),
[RegressionHead](/head/#regressionhead-class).
"""
| autokeras/examples/celeb_age.py/0 | {
"file_path": "autokeras/examples/celeb_age.py",
"repo_id": "autokeras",
"token_count": 3530
} | 8 |
pytest --cov-report xml:cov.xml --cov autokeras $1 | autokeras/shell/coverage.sh/0 | {
"file_path": "autokeras/shell/coverage.sh",
"repo_id": "autokeras",
"token_count": 21
} | 9 |
# Keras Applications
β οΈ This GitHub repository is now deprecated -- All Keras Applications models have moved into the core Keras repository and the TensorFlow pip package. All code changes and discussion should move to the Keras repository.
For users looking for a place to start using premade models, consult the [Keras API documentation](https://keras.io/api/applications/).
| keras-applications/README.md/0 | {
"file_path": "keras-applications/README.md",
"repo_id": "keras-applications",
"token_count": 90
} | 10 |
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
preprocess_input = imagenet_utils.preprocess_input
WEIGHTS_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg16_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
def VGG16(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(img_input)
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv1')(x)
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='vgg16')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = keras_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
if backend.backend() == 'theano':
keras_utils.convert_all_kernels_in_model(model)
elif weights is not None:
model.load_weights(weights)
return model
| keras-applications/keras_applications/vgg16.py/0 | {
"file_path": "keras-applications/keras_applications/vgg16.py",
"repo_id": "keras-applications",
"token_count": 4338
} | 11 |
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="wy-breadcrumbs">
<li><a href="{{ nav.homepage.url|url }}">Docs</a> »</li>
{% if page %}
{% for doc in page.ancestors %}
{% if doc.link %}
<li><a href="{{ doc.link|e }}">{{ doc.title }}</a> »</li>
{% else %}
<li>{{ doc.title }} »</li>
{% endif %}
{% endfor %}
{% endif %}
{% if page %}<li>{{ page.title }}</li>{% endif %}
<li class="wy-breadcrumbs-aside">
{%- block repo %}
{% if page and page.edit_url %}
<a href="https://github.com/keras-team/keras/tree/master/docs"
{%- if config.repo_name|lower == 'github' %}
class="icon icon-github"
{%- elif config.repo_name|lower == 'bitbucket' %}
class="icon icon-bitbucket"
{%- elif config.repo_name|lower == 'gitlab' %}
class="icon icon-gitlab"
{% endif %}> Edit on {{ config.repo_name }}</a>
{% endif %}
{%- endblock %}
</li>
</ul>
{% if config.theme.prev_next_buttons_location|lower in ['top', 'both']
and page and (page.next_page or page.previous_page) %}
<div class="rst-breadcrumbs-buttons" role="navigation" aria-label="breadcrumb navigation">
{%- if page.next_page %}
<a href="{{ page.next_page.url|url }}" class="btn btn-neutral float-right" title="{{ page.next_page.title }}">Next <span class="icon icon-circle-arrow-right"></span></a>
{%- endif %}
{%- if page.previous_page %}
<a href="{{ page.previous_page.url|url }}" class="btn btn-neutral" title="{{ page.previous_page.title }}"><span class="icon icon-circle-arrow-left"></span> Previous</a>
{%- endif %}
</div>
{% endif %}
<hr/>
</div>
| keras-contrib/contrib_docs/theme/breadcrumbs.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/breadcrumbs.html",
"repo_id": "keras-contrib",
"token_count": 815
} | 12 |
'''
Trains a DenseNet-40-12 model on the CIFAR-10 Dataset.
Gets a 94.84% accuracy score after 100 epochs.
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from keras import backend as K
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras_contrib.applications import DenseNet
batch_size = 64
nb_classes = 10
epochs = 100
img_rows, img_cols = 32, 32
img_channels = 3
# Parameters for the DenseNet model builder
if K.image_data_format() == 'channels_first':
img_dim = (img_channels, img_rows, img_cols)
else:
img_dim = (img_rows, img_cols, img_channels)
depth = 40
nb_dense_block = 3
growth_rate = 12
nb_filter = 16
dropout_rate = 0.0 # 0.0 for data augmentation
# Create the model (without loading weights)
model = DenseNet(depth=depth, nb_dense_block=nb_dense_block,
growth_rate=growth_rate, nb_filter=nb_filter,
dropout_rate=dropout_rate,
input_shape=img_dim,
weights=None)
print('Model created')
model.summary()
optimizer = Adam(lr=1e-3) # Using Adam instead of SGD to speed up training
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
print('Finished compiling')
(trainX, trainY), (testX, testY) = cifar10.load_data()
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX /= 255.
testX /= 255.
Y_train = np_utils.to_categorical(trainY, nb_classes)
Y_test = np_utils.to_categorical(testY, nb_classes)
generator = ImageDataGenerator(rotation_range=15,
width_shift_range=5. / 32,
height_shift_range=5. / 32)
generator.fit(trainX, seed=0)
weights_file = 'DenseNet-40-12-CIFAR-10.h5'
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1),
cooldown=0, patience=10, min_lr=0.5e-6)
early_stopper = EarlyStopping(monitor='val_acc', min_delta=1e-4, patience=20)
model_checkpoint = ModelCheckpoint(weights_file, monitor='val_acc',
save_best_only=True,
save_weights_only=True, mode='auto')
callbacks = [lr_reducer, early_stopper, model_checkpoint]
model.fit_generator(generator.flow(trainX, Y_train, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size,
epochs=epochs,
callbacks=callbacks,
validation_data=(testX, Y_test),
verbose=2)
scores = model.evaluate(testX, Y_test, batch_size=batch_size)
print('Test loss : ', scores[0])
print('Test accuracy : ', scores[1])
| keras-contrib/examples/cifar10_densenet.py/0 | {
"file_path": "keras-contrib/examples/cifar10_densenet.py",
"repo_id": "keras-contrib",
"token_count": 1265
} | 13 |
from keras import backend as K
# We import all keras backend functions here,
# so that files in this repo can import both
# core and contrib backend functions with a
# single import statement.
if K.backend() == 'theano':
from .theano_backend import *
elif K.backend() == 'tensorflow':
from .tensorflow_backend import *
elif K.backend() == 'cntk':
from .cntk_backend import *
| keras-contrib/keras_contrib/backend/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/backend/__init__.py",
"repo_id": "keras-contrib",
"token_count": 129
} | 14 |
from __future__ import absolute_import
from .convaware import ConvolutionAware
| keras-contrib/keras_contrib/initializers/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/initializers/__init__.py",
"repo_id": "keras-contrib",
"token_count": 20
} | 15 |
from keras.layers import Layer, InputSpec
from keras import initializers, regularizers, constraints
from keras import backend as K
class InstanceNormalization(Layer):
"""Instance normalization layer.
Normalize the activations of the previous layer at each step,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
# Arguments
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `InstanceNormalization`.
Setting `axis=None` will normalize all values in each
instance of the batch.
Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a Sequential model.
# Output shape
Same shape as input.
# References
- [Layer Normalization](https://arxiv.org/abs/1607.06450)
- [Instance Normalization: The Missing Ingredient for Fast Stylization](
https://arxiv.org/abs/1607.08022)
"""
def __init__(self,
axis=None,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(InstanceNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = len(input_shape)
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
self.input_spec = InputSpec(ndim=ndim)
if self.axis is None:
shape = (1,)
else:
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
reduction_axes = list(range(0, len(input_shape)))
if self.axis is not None:
del reduction_axes[self.axis]
del reduction_axes[0]
mean = K.mean(inputs, reduction_axes, keepdims=True)
stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
normed = (inputs - mean) / stddev
broadcast_shape = [1] * len(input_shape)
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
normed = normed + broadcast_beta
return normed
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(InstanceNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/layers/normalization/instancenormalization.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/normalization/instancenormalization.py",
"repo_id": "keras-contrib",
"token_count": 2809
} | 16 |
import numpy as np
from keras import backend as K
all_metrics = []
all_sparse_metrics = []
def validate_metric(metric):
y_a = K.variable(np.random.random((6, 7)))
y_b = K.variable(np.random.random((6, 7)))
output = metric(y_a, y_b)
assert K.eval(output).shape == ()
| keras-contrib/keras_contrib/tests/metrics.py/0 | {
"file_path": "keras-contrib/keras_contrib/tests/metrics.py",
"repo_id": "keras-contrib",
"token_count": 118
} | 17 |
import pytest
import numpy as np
import os
import shutil
from keras.utils import to_categorical
from keras.layers import Layer, Input, Dense, Dropout, BatchNormalization
from keras_contrib.utils.test_utils import to_list, unpack_singleton
from keras_contrib.utils.test_utils import get_test_data
from keras import Model
from keras import backend as K
from keras_contrib.callbacks import TensorBoardGrouped
input_dim = 2
num_hidden = 4
num_classes = 2
batch_size = 5
train_samples = 20
test_samples = 20
def data_generator(x, y, batch_size):
x = to_list(x)
y = to_list(y)
max_batch_index = len(x[0]) // batch_size
i = 0
while 1:
x_batch = [array[i * batch_size: (i + 1) * batch_size] for array in x]
x_batch = unpack_singleton(x_batch)
y_batch = [array[i * batch_size: (i + 1) * batch_size] for array in y]
y_batch = unpack_singleton(y_batch)
yield x_batch, y_batch
i += 1
i = i % max_batch_index
# Changing the default arguments of get_test_data.
def get_data_callbacks(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes):
return get_test_data(num_train=num_train,
num_test=num_test,
input_shape=input_shape,
classification=classification,
num_classes=num_classes)
def test_TensorBoard(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = to_categorical(y_test)
y_train = to_categorical(y_train)
class DummyStatefulMetric(Layer):
def __init__(self, name='dummy_stateful_metric', **kwargs):
super(DummyStatefulMetric, self).__init__(name=name, **kwargs)
self.stateful = True
self.state = K.variable(value=0, dtype='int32')
def reset_states(self):
pass
def __call__(self, y_true, y_pred):
return self.state
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
hidden = BatchNormalization()(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy', DummyStatefulMetric()])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [TensorBoardGrouped(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0),
epochs=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=0), epochs=2)
# fit generator without validation data
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/callbacks/tensorboard_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/callbacks/tensorboard_test.py",
"repo_id": "keras-contrib",
"token_count": 1877
} | 18 |
import pytest
from keras_contrib.losses import jaccard_distance
from keras_contrib.utils.test_utils import is_tf_keras
from keras import backend as K
import numpy as np
@pytest.mark.xfail(is_tf_keras,
reason='TODO fix this.',
strict=True)
def test_jaccard_distance():
# all_right, almost_right, half_right, all_wrong
y_true = np.array([[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0],
[0, 0, 1., 0.]])
y_pred = np.array([[0, 0, 1, 0], [0, 0, 0.9, 0], [0, 0, 0.1, 0],
[1, 1, 0.1, 1.]])
r = jaccard_distance(
K.variable(y_true),
K.variable(y_pred), )
if K.is_keras_tensor(r):
assert K.int_shape(r) == (4, )
all_right, almost_right, half_right, all_wrong = K.eval(r)
assert all_right == 0, 'should converge on zero'
assert all_right < almost_right
assert almost_right < half_right
assert half_right < all_wrong
def test_jaccard_distance_shapes_3d():
y_a = K.variable(np.random.random((5, 6, 7)))
y_b = K.variable(np.random.random((5, 6, 7)))
objective_output = jaccard_distance(y_a, y_b)
assert K.eval(objective_output).shape == (5, 6)
def test_jaccard_distance_shapes_2d():
y_a = K.variable(np.random.random((6, 7)))
y_b = K.variable(np.random.random((6, 7)))
objective_output = jaccard_distance(y_a, y_b)
assert K.eval(objective_output).shape == (6, )
| keras-contrib/tests/keras_contrib/losses/jaccard_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/losses/jaccard_test.py",
"repo_id": "keras-contrib",
"token_count": 678
} | 19 |
"""Benchmark attention layers.
To run benchmarks, see the following command for an example, please change the
flag to your custom value:
```
python3 -m benchmarks.layer_benchmark.attention_benchmark \
--benchmark_name=benchmark_attention \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
```
"""
from absl import app
from absl import flags
from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark
FLAGS = flags.FLAGS
def benchmark_attention(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Attention"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 64], [256, 64]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_multi_head_attention(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "MultiHeadAttention"
init_args = {
"num_heads": 4,
"key_dim": 16,
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 64], [256, 64], [256, 64]],
flat_call_inputs=True,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_additive_attention(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "AdditiveAttention"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 64], [256, 64], [256, 64]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
BENCHMARK_NAMES = {
"benchmark_attention": benchmark_attention,
"benchmark_multi_head_attention": benchmark_multi_head_attention,
"benchmark_additive_attention": benchmark_additive_attention,
}
def main(_):
benchmark_name = FLAGS.benchmark_name
num_samples = FLAGS.num_samples
batch_size = FLAGS.batch_size
jit_compile = FLAGS.jit_compile
if benchmark_name is None:
for name, benchmark_fn in BENCHMARK_NAMES.items():
benchmark_fn(num_samples, batch_size, jit_compile)
return
if benchmark_name not in BENCHMARK_NAMES:
raise ValueError(
f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must "
f"be one of {BENCHMARK_NAMES.keys()}"
)
benchmark_fn = BENCHMARK_NAMES[benchmark_name]
benchmark_fn(num_samples, batch_size, jit_compile)
if __name__ == "__main__":
app.run(main)
| keras-core/benchmarks/layer_benchmark/attention_benchmark.py/0 | {
"file_path": "keras-core/benchmarks/layer_benchmark/attention_benchmark.py",
"repo_id": "keras-core",
"token_count": 1373
} | 20 |
import time
import numpy as np
import torch
def train_loop(model, train_loader, num_epochs, optimizer, loss_fn, framework):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
start = None
average_batch_time_per_epoch = []
for _ in range(num_epochs):
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
if batch_idx == 1:
start = time.time()
inputs = inputs.to(device)
targets = targets.to(device)
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
end = time.time()
average_batch_time_per_epoch.append(
(end - start) / (len(train_loader) - 1)
)
average_time = np.mean(average_batch_time_per_epoch)
print(f"Time per batch in {framework}: {average_time:.2f}")
| keras-core/benchmarks/torch_ctl_benchmark/benchmark_utils.py/0 | {
"file_path": "keras-core/benchmarks/torch_ctl_benchmark/benchmark_utils.py",
"repo_id": "keras-core",
"token_count": 510
} | 21 |
"""
Title: Sequence to sequence learning for performing number addition
Author: [Smerity](https://twitter.com/Smerity) and others
Date created: 2015/08/17
Last modified: 2020/04/17
Description: A model that learns to add strings of numbers, e.g. "535+61" -> "596".
Accelerator: GPU
"""
"""
## Introduction
In this example, we train a model to learn to add two numbers, provided as strings.
**Example:**
- Input: "535+61"
- Output: "596"
Input may optionally be reversed, which was shown to increase performance in many tasks
in: [Learning to Execute](http://arxiv.org/abs/1410.4615) and
[Sequence to Sequence Learning with Neural Networks](http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf).
Theoretically, sequence order inversion introduces shorter term dependencies between
source and target for this problem.
**Results:**
For two digits (reversed):
+ One layer LSTM (128 HN), 5k training examples = 99% train/test accuracy in 55 epochs
Three digits (reversed):
+ One layer LSTM (128 HN), 50k training examples = 99% train/test accuracy in 100 epochs
Four digits (reversed):
+ One layer LSTM (128 HN), 400k training examples = 99% train/test accuracy in 20 epochs
Five digits (reversed):
+ One layer LSTM (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
"""
"""
## Setup
"""
import keras_core as keras
from keras_core import layers
import numpy as np
# Parameters for the model and dataset.
TRAINING_SIZE = 50000
DIGITS = 3
REVERSE = True
# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of
# int is DIGITS.
MAXLEN = DIGITS + 1 + DIGITS
"""
## Generate the data
"""
class CharacterTable:
"""Given a set of characters:
+ Encode them to a one-hot integer representation
+ Decode the one-hot or integer representation to their character output
+ Decode a vector of probabilities to their character output
"""
def __init__(self, chars):
"""Initialize character table.
# Arguments
chars: Characters that can appear in the input.
"""
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
def encode(self, C, num_rows):
"""One-hot encode given string C.
# Arguments
C: string, to be encoded.
num_rows: Number of rows in the returned one-hot encoding. This is
used to keep the # of rows for each data the same.
"""
x = np.zeros((num_rows, len(self.chars)))
for i, c in enumerate(C):
x[i, self.char_indices[c]] = 1
return x
def decode(self, x, calc_argmax=True):
"""Decode the given vector or 2D array to their character output.
# Arguments
x: A vector or a 2D array of probabilities or one-hot representations;
or a vector of character indices (used with `calc_argmax=False`).
calc_argmax: Whether to find the character index with maximum
probability, defaults to `True`.
"""
if calc_argmax:
x = x.argmax(axis=-1)
return "".join(self.indices_char[x] for x in x)
# All the numbers, plus sign and space for padding.
chars = "0123456789+ "
ctable = CharacterTable(chars)
questions = []
expected = []
seen = set()
print("Generating data...")
while len(questions) < TRAINING_SIZE:
f = lambda: int(
"".join(
np.random.choice(list("0123456789"))
for i in range(np.random.randint(1, DIGITS + 1))
)
)
a, b = f(), f()
# Skip any addition questions we've already seen
# Also skip any such that x+Y == Y+x (hence the sorting).
key = tuple(sorted((a, b)))
if key in seen:
continue
seen.add(key)
# Pad the data with spaces such that it is always MAXLEN.
q = "{}+{}".format(a, b)
query = q + " " * (MAXLEN - len(q))
ans = str(a + b)
# Answers can be of maximum size DIGITS + 1.
ans += " " * (DIGITS + 1 - len(ans))
if REVERSE:
# Reverse the query, e.g., '12+345 ' becomes ' 543+21'. (Note the
# space used for padding.)
query = query[::-1]
questions.append(query)
expected.append(ans)
print("Total questions:", len(questions))
"""
## Vectorize the data
"""
print("Vectorization...")
x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=bool)
for i, sentence in enumerate(questions):
x[i] = ctable.encode(sentence, MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, DIGITS + 1)
# Shuffle (x, y) in unison as the later parts of x will almost all be larger
# digits.
indices = np.arange(len(y))
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
# Explicitly set apart 10% for validation data that we never train over.
split_at = len(x) - len(x) // 10
(x_train, x_val) = x[:split_at], x[split_at:]
(y_train, y_val) = y[:split_at], y[split_at:]
print("Training Data:")
print(x_train.shape)
print(y_train.shape)
print("Validation Data:")
print(x_val.shape)
print(y_val.shape)
"""
## Build the model
"""
print("Build model...")
num_layers = 1 # Try to add more LSTM layers!
model = keras.Sequential()
# "Encode" the input sequence using a LSTM, producing an output of size 128.
# Note: In a situation where your input sequences have a variable length,
# use input_shape=(None, num_feature).
model.add(layers.Input((MAXLEN, len(chars))))
model.add(layers.LSTM(128))
# As the decoder RNN's input, repeatedly provide with the last output of
# RNN for each time step. Repeat 'DIGITS + 1' times as that's the maximum
# length of output, e.g., when DIGITS=3, max output is 999+999=1998.
model.add(layers.RepeatVector(DIGITS + 1))
# The decoder RNN could be multiple layers stacked or a single layer.
for _ in range(num_layers):
# By setting return_sequences to True, return not only the last output but
# all the outputs so far in the form of (num_samples, timesteps,
# output_dim). This is necessary as TimeDistributed in the below expects
# the first dimension to be the timesteps.
model.add(layers.LSTM(128, return_sequences=True))
# Apply a dense layer to the every temporal slice of an input. For each of step
# of the output sequence, decide which character should be chosen.
model.add(layers.Dense(len(chars), activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model.summary()
"""
## Train the model
"""
epochs = 30
batch_size = 32
# Train the model each generation and show predictions against the validation
# dataset.
for epoch in range(1, epochs):
print()
print("Iteration", epoch)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=1,
validation_data=(x_val, y_val),
)
# Select 10 samples from the validation set at random so we can visualize
# errors.
for i in range(10):
ind = np.random.randint(0, len(x_val))
rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
preds = np.argmax(model.predict(rowx), axis=-1)
q = ctable.decode(rowx[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print("Q", q[::-1] if REVERSE else q, end=" ")
print("T", correct, end=" ")
if correct == guess:
print("β " + guess)
else:
print("β " + guess)
"""
You'll get to 99+% validation accuracy after ~30 epochs.
Example available on HuggingFace.
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/addition-lstm) | [](https://huggingface.co/spaces/keras-io/addition-lstm) |
"""
| keras-core/examples/keras_io/nlp/addition_rnn.py/0 | {
"file_path": "keras-core/examples/keras_io/nlp/addition_rnn.py",
"repo_id": "keras-core",
"token_count": 3090
} | 22 |
"""
Title: Deep Dream
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2016/01/13
Last modified: 2020/05/02
Description: Generating Deep Dreams with Keras.
Accelerator: GPU
"""
"""
## Introduction
"Deep dream" is an image-filtering technique which consists of taking an image
classification model, and running gradient ascent over an input image to
try to maximize the activations of specific layers (and sometimes, specific units in
specific layers) for this input. It produces hallucination-like visuals.
It was first introduced by Alexander Mordvintsev from Google in July 2015.
Process:
- Load the original image.
- Define a number of processing scales ("octaves"),
from smallest to largest.
- Resize the original image to the smallest scale.
- For every scale, starting with the smallest (i.e. current one):
- Run gradient ascent
- Upscale image to the next scale
- Reinject the detail that was lost at upscaling time
- Stop when we are back to the original size.
To obtain the detail lost during upscaling, we simply
take the original image, shrink it down, upscale it,
and compare the result to the (resized) original image.
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
import keras_core as keras
from keras_core.applications import inception_v3
base_image_path = keras.utils.get_file(
"sky.jpg", "https://i.imgur.com/aGBdQyK.jpg"
)
result_prefix = "sky_dream"
# These are the names of the layers
# for which we try to maximize activation,
# as well as their weight in the final loss
# we try to maximize.
# You can tweak these setting to obtain new visual effects.
layer_settings = {
"mixed4": 1.0,
"mixed5": 1.5,
"mixed6": 2.0,
"mixed7": 2.5,
}
# Playing with these hyperparameters will also allow you to achieve new effects
step = 0.01 # Gradient ascent step size
num_octave = 3 # Number of scales at which to run gradient ascent
octave_scale = 1.4 # Size ratio between scales
iterations = 20 # Number of ascent steps per scale
max_loss = 15.0
"""
This is our base image:
"""
from IPython.display import Image, display
display(Image(base_image_path))
"""
Let's set up some image preprocessing/deprocessing utilities:
"""
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate arrays.
img = keras.utils.load_img(image_path)
img = keras.utils.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a NumPy array into a valid image.
x = x.reshape((x.shape[1], x.shape[2], 3))
# Undo inception v3 preprocessing
x /= 2.0
x += 0.5
x *= 255.0
# Convert to uint8 and clip to the valid range [0, 255]
x = np.clip(x, 0, 255).astype("uint8")
return x
"""
## Compute the Deep Dream loss
First, build a feature extraction model to retrieve the activations of our target layers
given an input image.
"""
# Build an InceptionV3 model loaded with pre-trained ImageNet weights
model = inception_v3.InceptionV3(weights="imagenet", include_top=False)
# Get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict(
[
(layer.name, layer.output)
for layer in [model.get_layer(name) for name in layer_settings.keys()]
]
)
# Set up a model that returns the activation values for every target layer
# (as a dict)
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
"""
The actual loss computation is very simple:
"""
def compute_loss(input_image):
features = feature_extractor(input_image)
# Initialize the loss
loss = tf.zeros(shape=())
for name in features.keys():
coeff = layer_settings[name]
activation = features[name]
# We avoid border artifacts by only involving non-border pixels in the loss.
scaling = tf.reduce_prod(tf.cast(tf.shape(activation), "float32"))
loss += (
coeff
* tf.reduce_sum(tf.square(activation[:, 2:-2, 2:-2, :]))
/ scaling
)
return loss
"""
## Set up the gradient ascent loop for one octave
"""
@tf.function
def gradient_ascent_step(img, learning_rate):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads /= tf.maximum(tf.reduce_mean(tf.abs(grads)), 1e-6)
img += learning_rate * grads
return loss, img
def gradient_ascent_loop(img, iterations, learning_rate, max_loss=None):
for i in range(iterations):
loss, img = gradient_ascent_step(img, learning_rate)
if max_loss is not None and loss > max_loss:
break
print("... Loss value at step %d: %.2f" % (i, loss))
return img
"""
## Run the training loop, iterating over different octaves
"""
original_img = preprocess_image(base_image_path)
original_shape = original_img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale**i)) for dim in original_shape])
successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
shrunk_original_img = tf.image.resize(original_img, successive_shapes[0])
img = tf.identity(original_img) # Make a copy
for i, shape in enumerate(successive_shapes):
print("Processing octave %d with shape %s" % (i, shape))
img = tf.image.resize(img, shape)
img = gradient_ascent_loop(
img, iterations=iterations, learning_rate=step, max_loss=max_loss
)
upscaled_shrunk_original_img = tf.image.resize(shrunk_original_img, shape)
same_size_original = tf.image.resize(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = tf.image.resize(original_img, shape)
keras.utils.save_img(result_prefix + ".png", deprocess_image(img.numpy()))
"""
Display the result.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/deep-dream)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/deep-dream).
"""
display(Image(result_prefix + ".png"))
| keras-core/examples/keras_io/tensorflow/generative/deep_dream.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/generative/deep_dream.py",
"repo_id": "keras-core",
"token_count": 2198
} | 23 |
"""
Title: Actor Critic Method
Author: [Apoorv Nandan](https://twitter.com/NandanApoorv)
Converted to Keras Core by: [Muhammad Anas Raza](https://anasrz.com)
Date created: 2020/05/13
Last modified: 2023/07/19
Description: Implement Actor Critic Method in CartPole environment.
Accelerator: NONE
"""
"""
## Introduction
This script shows an implementation of Actor Critic method on CartPole-V0 environment.
### Actor Critic Method
As an agent takes actions and moves through an environment, it learns to map
the observed state of the environment to two possible outputs:
1. Recommended action: A probability value for each action in the action space.
The part of the agent responsible for this output is called the **actor**.
2. Estimated rewards in the future: Sum of all rewards it expects to receive in the
future. The part of the agent responsible for this output is the **critic**.
Agent and Critic learn to perform their tasks, such that the recommended actions
from the actor maximize the rewards.
### CartPole-V1
A pole is attached to a cart placed on a frictionless track. The agent has to apply
force to move the cart. It is rewarded for every time step the pole
remains upright. The agent, therefore, must learn to keep the pole from falling over.
### References
- [CartPole](http://www.derongliu.org/adp/adp-cdrom/Barto1983.pdf)
- [Actor Critic Method](https://hal.inria.fr/hal-00840470/document)
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras_core as keras
from keras_core import layers
import gym
import numpy as np
import tensorflow as tf
# Configuration parameters for the whole setup
seed = 42
gamma = 0.99 # Discount factor for past rewards
max_steps_per_episode = 10000
env = gym.make("CartPole-v1", new_step_api=True) # Create the environment
env.reset(seed=seed)
eps = np.finfo(
np.float32
).eps.item() # Smallest number such that 1.0 + eps != 1.0
"""
## Implement Actor Critic network
This network learns two functions:
1. Actor: This takes as input the state of our environment and returns a
probability value for each action in its action space.
2. Critic: This takes as input the state of our environment and returns
an estimate of total rewards in the future.
In our implementation, they share the initial layer.
"""
num_inputs = 4
num_actions = 2
num_hidden = 128
inputs = layers.Input(shape=(num_inputs,))
common = layers.Dense(num_hidden, activation="relu")(inputs)
action = layers.Dense(num_actions, activation="softmax")(common)
critic = layers.Dense(1)(common)
model = keras.Model(inputs=inputs, outputs=[action, critic])
"""
## Train
"""
optimizer = keras.optimizers.Adam(learning_rate=0.01)
huber_loss = keras.losses.Huber()
action_probs_history = []
critic_value_history = []
rewards_history = []
running_reward = 0
episode_count = 0
while True: # Run until solved
state = env.reset()
episode_reward = 0
with tf.GradientTape() as tape:
for timestep in range(1, max_steps_per_episode):
# env.render(); Adding this line would show the attempts
# of the agent in a pop up window.
state = tf.convert_to_tensor(state)
state = tf.expand_dims(state, 0)
# Predict action probabilities and estimated future rewards
# from environment state
action_probs, critic_value = model(state)
critic_value_history.append(critic_value[0, 0])
# Sample action from action probability distribution
action = np.random.choice(num_actions, p=np.squeeze(action_probs))
action_probs_history.append(tf.math.log(action_probs[0, action]))
# Apply the sampled action in our environment
state, reward, done, _, _ = env.step(action)
rewards_history.append(reward)
episode_reward += reward
if done:
break
# Update running reward to check condition for solving
running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward
# Calculate expected value from rewards
# - At each timestep what was the total reward received after that timestep
# - Rewards in the past are discounted by multiplying them with gamma
# - These are the labels for our critic
returns = []
discounted_sum = 0
for r in rewards_history[::-1]:
discounted_sum = r + gamma * discounted_sum
returns.insert(0, discounted_sum)
# Normalize
returns = np.array(returns)
returns = (returns - np.mean(returns)) / (np.std(returns) + eps)
returns = returns.tolist()
# Calculating loss values to update our network
history = zip(action_probs_history, critic_value_history, returns)
actor_losses = []
critic_losses = []
for log_prob, value, ret in history:
# At this point in history, the critic estimated that we would get a
# total reward = `value` in the future. We took an action with log probability
# of `log_prob` and ended up recieving a total reward = `ret`.
# The actor must be updated so that it predicts an action that leads to
# high rewards (compared to critic's estimate) with high probability.
diff = ret - value
actor_losses.append(-log_prob * diff) # actor loss
# The critic must be updated so that it predicts a better estimate of
# the future rewards.
critic_losses.append(
huber_loss(tf.expand_dims(value, 0), tf.expand_dims(ret, 0))
)
# Backpropagation
loss_value = sum(actor_losses) + sum(critic_losses)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Clear the loss and reward history
action_probs_history.clear()
critic_value_history.clear()
rewards_history.clear()
# Log details
episode_count += 1
if episode_count % 10 == 0:
template = "running reward: {:.2f} at episode {}"
print(template.format(running_reward, episode_count))
if running_reward > 195: # Condition to consider the task solved
print("Solved at episode {}!".format(episode_count))
break
"""
## Visualizations
In early stages of training:

In later stages of training:

"""
| keras-core/examples/keras_io/tensorflow/rl/actor_critic_cartpole.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/rl/actor_critic_cartpole.py",
"repo_id": "keras-core",
"token_count": 2376
} | 24 |
"""
Title: Metric learning for image similarity search
Author: [Mat Kelcey](https://twitter.com/mat_kelcey)
Date created: 2020/06/05
Last modified: 2020/06/09
Description: Example of using similarity metric learning on CIFAR-10 images.
Accelerator: GPU
"""
"""
## Overview
Metric learning aims to train models that can embed inputs into a high-dimensional space
such that "similar" inputs, as defined by the training scheme, are located close to each
other. These models once trained can produce embeddings for downstream systems where such
similarity is useful; examples include as a ranking signal for search or as a form of
pretrained embedding model for another supervised problem.
For a more detailed overview of metric learning see:
* [What is metric learning?](http://contrib.scikit-learn.org/metric-learn/introduction.html)
* ["Using crossentropy for metric learning" tutorial](https://www.youtube.com/watch?v=Jb4Ewl5RzkI)
"""
"""
## Setup
"""
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from collections import defaultdict
from PIL import Image
from sklearn.metrics import ConfusionMatrixDisplay
import keras_core as keras
from keras_core import layers
from keras_core.datasets import cifar10
"""
## Dataset
For this example we will be using the
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset.
"""
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
y_train = np.squeeze(y_train)
x_test = x_test.astype("float32") / 255.0
y_test = np.squeeze(y_test)
"""
To get a sense of the dataset we can visualise a grid of 25 random examples.
"""
height_width = 32
def show_collage(examples):
box_size = height_width + 2
num_rows, num_cols = examples.shape[:2]
collage = Image.new(
mode="RGB",
size=(num_cols * box_size, num_rows * box_size),
color=(250, 250, 250),
)
for row_idx in range(num_rows):
for col_idx in range(num_cols):
array = (np.array(examples[row_idx, col_idx]) * 255).astype(
np.uint8
)
collage.paste(
Image.fromarray(array), (col_idx * box_size, row_idx * box_size)
)
# Double size for visualisation.
collage = collage.resize((2 * num_cols * box_size, 2 * num_rows * box_size))
return collage
# Show a collage of 5x5 random images.
sample_idxs = np.random.randint(0, 50000, size=(5, 5))
examples = x_train[sample_idxs]
show_collage(examples)
"""
Metric learning provides training data not as explicit `(X, y)` pairs but instead uses
multiple instances that are related in the way we want to express similarity. In our
example we will use instances of the same class to represent similarity; a single
training instance will not be one image, but a pair of images of the same class. When
referring to the images in this pair we'll use the common metric learning names of the
`anchor` (a randomly chosen image) and the `positive` (another randomly chosen image of
the same class).
To facilitate this we need to build a form of lookup that maps from classes to the
instances of that class. When generating data for training we will sample from this
lookup.
"""
class_idx_to_train_idxs = defaultdict(list)
for y_train_idx, y in enumerate(y_train):
class_idx_to_train_idxs[y].append(y_train_idx)
class_idx_to_test_idxs = defaultdict(list)
for y_test_idx, y in enumerate(y_test):
class_idx_to_test_idxs[y].append(y_test_idx)
"""
For this example we are using the simplest approach to training; a batch will consist of
`(anchor, positive)` pairs spread across the classes. The goal of learning will be to
move the anchor and positive pairs closer together and further away from other instances
in the batch. In this case the batch size will be dictated by the number of classes; for
CIFAR-10 this is 10.
"""
num_classes = 10
class AnchorPositivePairs(keras.utils.PyDataset):
def __init__(self, num_batchs):
super().__init__()
self.num_batchs = num_batchs
def __len__(self):
return self.num_batchs
def __getitem__(self, _idx):
x = np.empty(
(2, num_classes, height_width, height_width, 3), dtype=np.float32
)
for class_idx in range(num_classes):
examples_for_class = class_idx_to_train_idxs[class_idx]
anchor_idx = random.choice(examples_for_class)
positive_idx = random.choice(examples_for_class)
while positive_idx == anchor_idx:
positive_idx = random.choice(examples_for_class)
x[0, class_idx] = x_train[anchor_idx]
x[1, class_idx] = x_train[positive_idx]
return x
"""
We can visualise a batch in another collage. The top row shows randomly chosen anchors
from the 10 classes, the bottom row shows the corresponding 10 positives.
"""
examples = next(iter(AnchorPositivePairs(num_batchs=1)))
show_collage(examples)
"""
## Embedding model
We define a custom model with a `train_step` that first embeds both anchors and positives
and then uses their pairwise dot products as logits for a softmax.
"""
class EmbeddingModel(keras.Model):
def train_step(self, data):
# Note: Workaround for open issue, to be removed.
if isinstance(data, tuple):
data = data[0]
anchors, positives = data[0], data[1]
with tf.GradientTape() as tape:
# Run both anchors and positives through model.
anchor_embeddings = self(anchors, training=True)
positive_embeddings = self(positives, training=True)
# Calculate cosine similarity between anchors and positives. As they have
# been normalised this is just the pair wise dot products.
similarities = tf.einsum(
"ae,pe->ap", anchor_embeddings, positive_embeddings
)
# Since we intend to use these as logits we scale them by a temperature.
# This value would normally be chosen as a hyper parameter.
temperature = 0.2
similarities /= temperature
# We use these similarities as logits for a softmax. The labels for
# this call are just the sequence [0, 1, 2, ..., num_classes] since we
# want the main diagonal values, which correspond to the anchor/positive
# pairs, to be high. This loss will move embeddings for the
# anchor/positive pairs together and move all other pairs apart.
sparse_labels = tf.range(num_classes)
loss = self.compute_loss(y=sparse_labels, y_pred=similarities)
# Calculate gradients and apply via optimizer.
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
# Update and return metrics (specifically the one for the loss value).
for metric in self.metrics:
metric.update_state(sparse_labels, similarities)
return {m.name: m.result() for m in self.metrics}
"""
Next we describe the architecture that maps from an image to an embedding. This model
simply consists of a sequence of 2d convolutions followed by global pooling with a final
linear projection to an embedding space. As is common in metric learning we normalise the
embeddings so that we can use simple dot products to measure similarity. For simplicity
this model is intentionally small.
"""
inputs = layers.Input(shape=(height_width, height_width, 3))
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, activation="relu")(
inputs
)
x = layers.Conv2D(filters=64, kernel_size=3, strides=2, activation="relu")(x)
x = layers.Conv2D(filters=128, kernel_size=3, strides=2, activation="relu")(x)
x = layers.GlobalAveragePooling2D()(x)
embeddings = layers.Dense(units=8, activation=None)(x)
embeddings = keras.layers.UnitNormalization()(embeddings)
model = EmbeddingModel(inputs, embeddings)
"""
Finally we run the training. On a Google Colab GPU instance this takes about a minute.
"""
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
history = model.fit(AnchorPositivePairs(num_batchs=1000), epochs=20)
plt.plot(history.history["loss"])
plt.show()
"""
## Testing
We can review the quality of this model by applying it to the test set and considering
near neighbours in the embedding space.
First we embed the test set and calculate all near neighbours. Recall that since the
embeddings are unit length we can calculate cosine similarity via dot products.
"""
near_neighbours_per_example = 10
embeddings = model.predict(x_test)
gram_matrix = np.einsum("ae,be->ab", embeddings, embeddings)
near_neighbours = np.argsort(gram_matrix.T)[
:, -(near_neighbours_per_example + 1) :
]
"""
As a visual check of these embeddings we can build a collage of the near neighbours for 5
random examples. The first column of the image below is a randomly selected image, the
following 10 columns show the nearest neighbours in order of similarity.
"""
num_collage_examples = 5
examples = np.empty(
(
num_collage_examples,
near_neighbours_per_example + 1,
height_width,
height_width,
3,
),
dtype=np.float32,
)
for row_idx in range(num_collage_examples):
examples[row_idx, 0] = x_test[row_idx]
anchor_near_neighbours = reversed(near_neighbours[row_idx][:-1])
for col_idx, nn_idx in enumerate(anchor_near_neighbours):
examples[row_idx, col_idx + 1] = x_test[nn_idx]
show_collage(examples)
"""
We can also get a quantified view of the performance by considering the correctness of
near neighbours in terms of a confusion matrix.
Let us sample 10 examples from each of the 10 classes and consider their near neighbours
as a form of prediction; that is, does the example and its near neighbours share the same
class?
We observe that each animal class does generally well, and is confused the most with the
other animal classes. The vehicle classes follow the same pattern.
"""
confusion_matrix = np.zeros((num_classes, num_classes))
# For each class.
for class_idx in range(num_classes):
# Consider 10 examples.
example_idxs = class_idx_to_test_idxs[class_idx][:10]
for y_test_idx in example_idxs:
# And count the classes of its near neighbours.
for nn_idx in near_neighbours[y_test_idx][:-1]:
nn_class_idx = y_test[nn_idx]
confusion_matrix[class_idx, nn_class_idx] += 1
# Display a confusion matrix.
labels = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
disp = ConfusionMatrixDisplay(
confusion_matrix=confusion_matrix, display_labels=labels
)
disp.plot(
include_values=True, cmap="viridis", ax=None, xticks_rotation="vertical"
)
plt.show()
| keras-core/examples/keras_io/tensorflow/vision/metric_learning.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/metric_learning.py",
"repo_id": "keras-core",
"token_count": 4003
} | 25 |
import copy
import math
from keras_core import backend
from keras_core import layers
from keras_core.api_export import keras_core_export
from keras_core.applications import imagenet_utils
from keras_core.models import Functional
from keras_core.ops import operation_utils
from keras_core.utils import file_utils
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/keras-applications/"
WEIGHTS_HASHES = {
"b0": (
"902e53a9f72be733fc0bcb005b3ebbac",
"50bc09e76180e00e4465e1a485ddc09d",
),
"b1": (
"1d254153d4ab51201f1646940f018540",
"74c4e6b3e1f6a1eea24c589628592432",
),
"b2": (
"b15cce36ff4dcbd00b6dd88e7857a6ad",
"111f8e2ac8aa800a7a99e3239f7bfb39",
),
"b3": (
"ffd1fdc53d0ce67064dc6a9c7960ede0",
"af6d107764bb5b1abb91932881670226",
),
"b4": (
"18c95ad55216b8f92d7e70b3a046e2fc",
"ebc24e6d6c33eaebbd558eafbeedf1ba",
),
"b5": (
"ace28f2a6363774853a83a0b21b9421a",
"38879255a25d3c92d5e44e04ae6cec6f",
),
"b6": (
"165f6e37dce68623721b423839de8be5",
"9ecce42647a20130c1f39a5d4cb75743",
),
"b7": (
"8c03f828fec3ef71311cd463b6759d99",
"cbcfe4450ddf6f3ad90b1b398090fe4a",
),
}
DEFAULT_BLOCKS_ARGS = [
{
"kernel_size": 3,
"repeats": 1,
"filters_in": 32,
"filters_out": 16,
"expand_ratio": 1,
"id_skip": True,
"strides": 1,
"se_ratio": 0.25,
},
{
"kernel_size": 3,
"repeats": 2,
"filters_in": 16,
"filters_out": 24,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 5,
"repeats": 2,
"filters_in": 24,
"filters_out": 40,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 3,
"repeats": 3,
"filters_in": 40,
"filters_out": 80,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 5,
"repeats": 3,
"filters_in": 80,
"filters_out": 112,
"expand_ratio": 6,
"id_skip": True,
"strides": 1,
"se_ratio": 0.25,
},
{
"kernel_size": 5,
"repeats": 4,
"filters_in": 112,
"filters_out": 192,
"expand_ratio": 6,
"id_skip": True,
"strides": 2,
"se_ratio": 0.25,
},
{
"kernel_size": 3,
"repeats": 1,
"filters_in": 192,
"filters_out": 320,
"expand_ratio": 6,
"id_skip": True,
"strides": 1,
"se_ratio": 0.25,
},
]
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
DENSE_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 1.0 / 3.0,
"mode": "fan_out",
"distribution": "uniform",
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For EfficientNet, input preprocessing is included as part of the model
(as a `Rescaling` layer), and thus
`keras_core.applications.efficientnet.preprocess_input` is actually a
pass-through function. EfficientNet models expect their inputs to be float
tensors of pixels with values in the `[0-255]` range.
Args:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded.
Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to `None`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. 1000 is how many
ImageNet classes there are. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to `'softmax'`.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A model instance.
"""
IMAGENET_STDDEV_RGB = [0.229, 0.224, 0.225]
def EfficientNet(
width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation="swish",
blocks_args="default",
model_name="efficientnet",
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the EfficientNet architecture.
Args:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A model instance.
"""
if blocks_args == "default":
blocks_args = DEFAULT_BLOCKS_ARGS
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top`'
" as true, `classes` should be 1000"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
def round_filters(filters, divisor=depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(
divisor, int(filters + divisor / 2) // divisor * divisor
)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
# Build stem
x = img_input
x = layers.Rescaling(1.0 / 255.0)(x)
x = layers.Normalization(axis=bn_axis)(x)
if weights == "imagenet":
# Note that the normaliztion layer uses square value of STDDEV as the
# variance for the layer: result = (input - mean) / sqrt(var)
# However, the original implemenetation uses (input - mean) / var to
# normalize the input, we need to divide another sqrt(var) to match the
# original implementation.
# See https://github.com/tensorflow/tensorflow/issues/49930 for more
# details
x = layers.Rescaling(
[1.0 / math.sqrt(stddev) for stddev in IMAGENET_STDDEV_RGB]
)(x)
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3), name="stem_conv_pad"
)(x)
x = layers.Conv2D(
round_filters(32),
3,
strides=2,
padding="valid",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name="stem_conv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name="stem_bn")(x)
x = layers.Activation(activation, name="stem_activation")(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(round_repeats(args["repeats"]) for args in blocks_args))
for i, args in enumerate(blocks_args):
assert args["repeats"] > 0
# Update block input and output filters based on depth multiplier.
args["filters_in"] = round_filters(args["filters_in"])
args["filters_out"] = round_filters(args["filters_out"])
for j in range(round_repeats(args.pop("repeats"))):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
args["strides"] = 1
args["filters_in"] = args["filters_out"]
x = block(
x,
activation,
drop_connect_rate * b / blocks,
name=f"block{i + 1}{chr(j + 97)}_",
**args,
)
b += 1
# Build top
x = layers.Conv2D(
round_filters(1280),
1,
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name="top_conv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name="top_bn")(x)
x = layers.Activation(activation, name="top_activation")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=model_name)
# Load weights.
if weights == "imagenet":
if include_top:
file_suffix = ".h5"
file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
else:
file_suffix = "_notop.h5"
file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
file_name = model_name + file_suffix
weights_path = file_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block(
inputs,
activation="swish",
drop_rate=0.0,
name="",
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=0.0,
id_skip=True,
):
"""An inverted residual block.
Args:
inputs: input tensor.
activation: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
1,
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "expand_conv",
)(inputs)
x = layers.BatchNormalization(axis=bn_axis, name=name + "expand_bn")(x)
x = layers.Activation(activation, name=name + "expand_activation")(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=name + "dwconv_pad",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=name + "dwconv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + "bn")(x)
x = layers.Activation(activation, name=name + "activation")(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=name + "se_reshape")(se)
se = layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_reduce",
)(se)
se = layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_expand",
)(se)
x = layers.multiply([x, se], name=name + "se_excite")
# Output phase
x = layers.Conv2D(
filters_out,
1,
padding="same",
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "project_conv",
)(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + "project_bn")(x)
if id_skip and strides == 1 and filters_in == filters_out:
if drop_rate > 0:
x = layers.Dropout(
drop_rate, noise_shape=(None, 1, 1, 1), name=name + "drop"
)(x)
x = layers.add([x, inputs], name=name + "add")
return x
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB0",
"keras_core.applications.EfficientNetB0",
]
)
def EfficientNetB0(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
1.0,
1.0,
224,
0.2,
model_name="efficientnetb0",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB1",
"keras_core.applications.EfficientNetB1",
]
)
def EfficientNetB1(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
1.0,
1.1,
240,
0.2,
model_name="efficientnetb1",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB2",
"keras_core.applications.EfficientNetB2",
]
)
def EfficientNetB2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
1.1,
1.2,
260,
0.3,
model_name="efficientnetb2",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB3",
"keras_core.applications.EfficientNetB3",
]
)
def EfficientNetB3(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
1.2,
1.4,
300,
0.3,
model_name="efficientnetb3",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB4",
"keras_core.applications.EfficientNetB4",
]
)
def EfficientNetB4(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
1.4,
1.8,
380,
0.4,
model_name="efficientnetb4",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB5",
"keras_core.applications.EfficientNetB5",
]
)
def EfficientNetB5(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
1.6,
2.2,
456,
0.4,
model_name="efficientnetb5",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB6",
"keras_core.applications.EfficientNetB6",
]
)
def EfficientNetB6(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
1.8,
2.6,
528,
0.5,
model_name="efficientnetb6",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
@keras_core_export(
[
"keras_core.applications.efficientnet.EfficientNetB7",
"keras_core.applications.EfficientNetB7",
]
)
def EfficientNetB7(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
**kwargs,
):
return EfficientNet(
2.0,
3.1,
600,
0.5,
model_name="efficientnetb7",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs,
)
EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB0")
EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB1")
EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB2")
EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB3")
EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB4")
EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB5")
EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB6")
EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name="EfficientNetB7")
@keras_core_export("keras_core.applications.efficientnet.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the efficientnet model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a tensor.
data_format: Optional data format of the image tensor/array. `None`
means the global setting `keras_core.backend.image_data_format()`
is used (unless you changed it, it uses `"channels_last"`).
Defaults to `None`.
Returns:
Unchanged `numpy.array` or tensor.
"""
return x
@keras_core_export("keras_core.applications.efficientnet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| keras-core/keras_core/applications/efficientnet.py/0 | {
"file_path": "keras-core/keras_core/applications/efficientnet.py",
"repo_id": "keras-core",
"token_count": 11767
} | 26 |
from keras_core.backend.common import backend_utils
from keras_core.backend.common.variables import AutocastScope
from keras_core.backend.common.variables import KerasVariable
from keras_core.backend.common.variables import get_autocast_scope
from keras_core.backend.common.variables import is_float_dtype
from keras_core.backend.common.variables import is_int_dtype
from keras_core.backend.common.variables import standardize_dtype
from keras_core.backend.common.variables import standardize_shape
from keras_core.random import random
| keras-core/keras_core/backend/common/__init__.py/0 | {
"file_path": "keras-core/keras_core/backend/common/__init__.py",
"repo_id": "keras-core",
"token_count": 165
} | 27 |
from keras_core.backend.jax import core
from keras_core.backend.jax import distribution_lib
from keras_core.backend.jax import image
from keras_core.backend.jax import math
from keras_core.backend.jax import nn
from keras_core.backend.jax import numpy
from keras_core.backend.jax import random
from keras_core.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras_core.backend.jax.core import Variable
from keras_core.backend.jax.core import cast
from keras_core.backend.jax.core import compute_output_spec
from keras_core.backend.jax.core import cond
from keras_core.backend.jax.core import convert_to_numpy
from keras_core.backend.jax.core import convert_to_tensor
from keras_core.backend.jax.core import is_tensor
from keras_core.backend.jax.core import scatter
from keras_core.backend.jax.core import shape
from keras_core.backend.jax.core import stop_gradient
from keras_core.backend.jax.core import vectorized_map
from keras_core.backend.jax.rnn import cudnn_ok
from keras_core.backend.jax.rnn import gru
from keras_core.backend.jax.rnn import lstm
from keras_core.backend.jax.rnn import rnn
| keras-core/keras_core/backend/jax/__init__.py/0 | {
"file_path": "keras-core/keras_core/backend/jax/__init__.py",
"repo_id": "keras-core",
"token_count": 401
} | 28 |
import jax
import numpy as np
from jax import lax
from jax import numpy as jnp
from keras_core.backend import standardize_data_format
from keras_core.backend import standardize_dtype
from keras_core.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
from keras_core.backend.config import epsilon
from keras_core.backend.numpy.core import cast
from keras_core.backend.numpy.core import is_tensor
from keras_core.utils.module_utils import scipy
def relu(x):
return np.maximum(x, 0.0)
def relu6(x):
return np.clip(x, 0.0, 6.0)
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def tanh(x):
return np.tanh(x)
def softplus(x):
return np.log(1.0 + np.exp(x))
def softsign(x):
return x / (1.0 + np.abs(x))
def silu(x):
return x * (1.0 / (1.0 + np.exp(-x)))
def log_sigmoid(x):
return np.log(1.0 / (1.0 + np.exp(-x)))
def leaky_relu(x, negative_slope=0.2):
return np.maximum(x, negative_slope * x)
def hard_sigmoid(x):
x = (x / 6.0) + 0.5
return np.where(x <= 0.0, 0.0, np.where(x >= 1.0, 1.0, x))
def elu(x, alpha=1.0):
return np.where(x >= 0.0, x, alpha * (np.exp(x) - 1.0))
def selu(
x,
alpha=1.6732632423543772848170429916717,
scale=1.0507009873554804934193349852946,
):
return scale * np.where(x >= 0.0, x, alpha * (np.exp(x) - 1.0))
def gelu(x, approximate=True):
if approximate:
return (
0.5
* x
* (
1.0
+ np.tanh(
np.sqrt(2.0 / np.pi) * (x + 0.044715 * np.power(x, 3))
)
)
)
else:
return x * scipy.stats.norm.cdf(x)
def softmax(x, axis=None):
exp_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
return exp_x / np.sum(exp_x, axis=axis, keepdims=True)
def log_softmax(x, axis=None):
max_x = np.max(x, axis=axis, keepdims=True)
logsumexp = np.log(np.exp(x - max_x).sum(axis=axis, keepdims=True))
return x - max_x - logsumexp
def _convert_to_spatial_operand(
x,
num_spatial_dims,
data_format="channels_last",
include_batch_and_channels=True,
):
# Helper function that converts an operand to a spatial operand.
x = (x,) * num_spatial_dims if isinstance(x, int) else x
if not include_batch_and_channels:
return x
if data_format == "channels_last":
x = (1,) + x + (1,)
else:
x = (1,) + (1,) + x
return x
def _pool(
inputs,
initial_value,
reduce_fn,
pool_size,
strides=None,
padding="valid",
):
"""Helper function to define pooling functions.
Args:
inputs: input data of shape `N+2`.
initial_value: the initial value for the reduction.
reduce_fn: a reduce function of the form `(T, T) -> T`.
pool_size: a sequence of `N` integers, representing the window size to
reduce over.
strides: a sequence of `N` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `same` or `valid`.
Returns:
The output of the reduction for each window slice.
"""
if padding not in ("same", "valid"):
raise ValueError(
f"Invalid padding '{padding}', must be 'same' or 'valid'."
)
padding = padding.upper()
return np.array(
lax.reduce_window(
inputs,
initial_value,
reduce_fn,
pool_size,
strides,
padding,
)
)
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
return _pool(inputs, -jnp.inf, lax.max, pool_size, strides, padding)
def average_pool(
inputs,
pool_size,
strides,
padding,
data_format=None,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding)
if padding == "valid":
# Avoid the extra reduce_window.
return pooled / np.prod(pool_size)
else:
# Count the number of valid entries at each input point, then use that
# for computing average. Assumes that any two arrays of same shape will
# be padded the same. Avoid broadcasting on axis where pooling is
# skipped.
shape = [
(a if b != 1 else 1) for (a, b) in zip(inputs.shape, pool_size)
]
window_counts = _pool(
jnp.ones(shape, inputs.dtype),
0.0,
lax.add,
pool_size,
strides,
padding,
)
return pooled / window_counts
def _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format="channels_last",
transpose=False,
):
"""Create a `lax.ConvDimensionNumbers` for the given inputs."""
num_dims = num_spatial_dims + 2
if data_format == "channels_last":
spatial_dims = tuple(range(1, num_dims - 1))
inputs_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
inputs_dn = (0, 1) + spatial_dims
if transpose:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
return lax.ConvDimensionNumbers(
lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn
)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
if data_format == "channels_last":
channels = inputs.shape[-1]
else:
channels = inputs.shape[1]
kernel_in_channels = kernel.shape[-2]
if channels % kernel_in_channels > 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"kernel's in_channels. Received input channels {channels} and "
f"kernel in_channels {kernel_in_channels}. "
)
feature_group_count = channels // kernel_in_channels
return np.array(
jax.lax.conv_general_dilated(
inputs,
kernel if is_tensor(kernel) else kernel.numpy(),
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
)
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
feature_group_count = (
inputs.shape[-1] if data_format == "channels_last" else inputs.shape[1]
)
kernel = jnp.reshape(
kernel if is_tensor(kernel) else kernel.numpy(),
kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1]),
)
return np.array(
jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
padding_values = compute_conv_transpose_padding_args_for_jax(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
return np.array(
jax.lax.conv_transpose(
inputs,
kernel if is_tensor(kernel) else kernel.numpy(),
strides,
padding=padding_values,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
transpose_kernel=True,
)
)
def one_hot(x, num_classes, axis=-1, dtype="float32"):
input_shape = x.shape
# Shrink the last dimension if the shape is (..., 1).
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
x = x.reshape(-1)
if not num_classes:
num_classes = np.max(x) + 1
batch_size = x.shape[0]
categorical = np.zeros((batch_size, num_classes), dtype=dtype)
valid_indices = x >= 0
categorical[np.arange(batch_size)[valid_indices], x[valid_indices]] = 1
# First, reshape the array with the extra dimension at the end
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
# Then, move this new dimension to the right place (according to axis)
if axis != -1:
categorical = np.moveaxis(categorical, -1, axis)
return categorical
def multi_hot(x, num_classes, axis=-1, dtype="float32"):
reduction_axis = 1 if len(x.shape) > 1 else 0
outputs = np.max(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
axis=reduction_axis,
)
return outputs
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = np.array(target)
output = np.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = log_softmax(output, axis=axis)
else:
output = output / np.sum(output, axis, keepdims=True)
output = np.clip(output, epsilon(), 1.0 - epsilon())
log_prob = np.log(output)
return -np.sum(target * log_prob, axis=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = np.array(target, dtype="int32")
output = np.array(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = np.squeeze(target, axis=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if target.shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = log_softmax(output, axis=axis)
else:
output = output / np.sum(output, axis, keepdims=True)
output = np.clip(output, epsilon(), 1.0 - epsilon())
log_prob = np.log(output)
target = one_hot(target, output.shape[axis], axis=axis)
return -np.sum(target * log_prob, axis=axis)
def binary_crossentropy(target, output, from_logits=False):
target = np.array(target)
output = np.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
output = sigmoid(output)
output = np.clip(output, epsilon(), 1.0 - epsilon())
bce = target * np.log(output)
bce += (1.0 - target) * np.log(1.0 - output)
return -bce
def moments(x, axes, keepdims=False):
axes = tuple(axes) if isinstance(axes, list) else axes
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = standardize_dtype(x.dtype)
if ori_dtype == "float16":
need_cast = True
x = cast(x, "float32")
mean = np.mean(x, axes, keepdims=True)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
variance = np.mean(np.square(x), axis=axes, keepdims=True) - np.square(mean)
if not keepdims:
mean = np.squeeze(mean, axes)
variance = np.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = np.clip(mean, np.finfo(np.float16).min, np.finfo(np.float16).max)
variance = np.clip(
variance, np.finfo(np.float16).min, np.finfo(np.float16).max
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
| keras-core/keras_core/backend/numpy/nn.py/0 | {
"file_path": "keras-core/keras_core/backend/numpy/nn.py",
"repo_id": "keras-core",
"token_count": 7165
} | 29 |
import tensorflow as tf
from tensorflow.experimental import numpy as tfnp
from keras_core.backend.common import standardize_dtype
from keras_core.backend.config import floatx
from keras_core.random.seed_generator import SeedGenerator
from keras_core.random.seed_generator import draw_seed
from keras_core.random.seed_generator import make_default_seed
def tf_draw_seed(seed):
# TF ops only accept int32/64 seeds but our base seed is uint32.
return tf.cast(draw_seed(seed), dtype="int32")
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = tf_draw_seed(seed)
return tf.random.stateless_normal(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = tf_draw_seed(seed)
return tf.random.stateless_uniform(
shape=shape,
minval=tf.cast(minval, dtype),
maxval=tf.cast(maxval, dtype),
dtype=dtype,
seed=seed,
)
def categorical(logits, num_samples, dtype="int64", seed=None):
seed = tf_draw_seed(seed)
output = tf.random.stateless_categorical(logits, num_samples, seed=seed)
return tf.cast(output, dtype)
def randint(shape, minval, maxval, dtype="int32", seed=None):
intemediate_dtype = dtype
if standardize_dtype(dtype) not in ["int32", "int64"]:
intemediate_dtype = "int64"
seed = tf_draw_seed(seed)
output = tf.random.stateless_uniform(
shape=shape,
minval=minval,
maxval=maxval,
dtype=intemediate_dtype,
seed=seed,
)
return tf.cast(output, dtype)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = tf_draw_seed(seed)
return tf.random.stateless_truncated_normal(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return tf.shape(inputs)
concrete_inputs_shape = tf.shape(inputs)
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
seed = tf_draw_seed(seed)
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
return tf.nn.experimental.stateless_dropout(
inputs,
rate=rate,
noise_shape=noise_shape,
seed=seed,
)
def shuffle(x, axis=0, seed=None):
seed = tf_draw_seed(seed)
if axis == 0:
return tf.random.experimental.stateless_shuffle(x, seed=seed)
x = tfnp.swapaxes(x, axis1=0, axis2=axis)
x = tf.random.experimental.stateless_shuffle(x, seed=seed)
x = tfnp.swapaxes(x, axis1=0, axis2=axis)
return x
| keras-core/keras_core/backend/tensorflow/random.py/0 | {
"file_path": "keras-core/keras_core/backend/tensorflow/random.py",
"repo_id": "keras-core",
"token_count": 1269
} | 30 |
"""IMDB sentiment classification dataset."""
import json
import numpy as np
from keras_core.api_export import keras_core_export
from keras_core.utils.file_utils import get_file
from keras_core.utils.python_utils import remove_long_seq
@keras_core_export("keras_core.datasets.imdb.load_data")
def load_data(
path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3,
**kwargs,
):
"""Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/).
This is a dataset of 25,000 movies reviews from IMDB, labeled by sentiment
(positive/negative). Reviews have been preprocessed, and each review is
encoded as a list of word indexes (integers).
For convenience, words are indexed by overall frequency in the dataset,
so that for instance the integer "3" encodes the 3rd most frequent word in
the data. This allows for quick filtering operations such as:
"only consider the top 10,000 most
common words, but eliminate the top 20 most common words".
As a convention, "0" does not stand for a specific word, but instead is used
to encode the pad token.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: integer or None. Words are
ranked by how often they occur (in the training set) and only
the `num_words` most frequent words are kept. Any less frequent word
will appear as `oov_char` value in the sequence data. If None,
all words are kept. Defaults to `None`.
skip_top: skip the top N most frequently occurring words
(which may not be informative). These words will appear as
`oov_char` value in the dataset. When 0, no words are
skipped. Defaults to `0`.
maxlen: int or None. Maximum sequence length.
Any longer sequence will be truncated. None, means no truncation.
Defaults to `None`.
seed: int. Seed for reproducible data shuffling.
start_char: int. The start of a sequence will be marked with this
character. 0 is usually the padding character. Defaults to `1`.
oov_char: int. The out-of-vocabulary character.
Words that were cut out because of the `num_words` or
`skip_top` limits will be replaced with this character.
index_from: int. Index actual words with this index and higher.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`, `x_test`**: lists of sequences, which are lists of indexes
(integers). If the num_words argument was specific, the maximum
possible index value is `num_words - 1`. If the `maxlen` argument was
specified, the largest possible sequence length is `maxlen`.
**`y_train`, `y_test`**: lists of integer labels (1 or 0).
**Note**: The 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=origin_folder + "imdb.npz",
file_hash=( # noqa: E501
"69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f"
),
)
with np.load(path, allow_pickle=True) as f:
x_train, labels_train = f["x_train"], f["y_train"]
x_test, labels_test = f["x_test"], f["y_test"]
rng = np.random.RandomState(seed)
indices = np.arange(len(x_train))
rng.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
indices = np.arange(len(x_test))
rng.shuffle(indices)
x_test = x_test[indices]
labels_test = labels_test[indices]
if start_char is not None:
x_train = [[start_char] + [w + index_from for w in x] for x in x_train]
x_test = [[start_char] + [w + index_from for w in x] for x in x_test]
elif index_from:
x_train = [[w + index_from for w in x] for x in x_train]
x_test = [[w + index_from for w in x] for x in x_test]
if maxlen:
x_train, labels_train = remove_long_seq(maxlen, x_train, labels_train)
x_test, labels_test = remove_long_seq(maxlen, x_test, labels_test)
if not x_train or not x_test:
raise ValueError(
"After filtering for sequences shorter than maxlen="
f"{str(maxlen)}, no sequence was kept. Increase maxlen."
)
xs = x_train + x_test
labels = np.concatenate([labels_train, labels_test])
if not num_words:
num_words = max(max(x) for x in xs)
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [
[w if (skip_top <= w < num_words) else oov_char for w in x]
for x in xs
]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = len(x_train)
x_train, y_train = np.array(xs[:idx], dtype="object"), labels[:idx]
x_test, y_test = np.array(xs[idx:], dtype="object"), labels[idx:]
return (x_train, y_train), (x_test, y_test)
@keras_core_export("keras_core.datasets.imdb.get_word_index")
def get_word_index(path="imdb_word_index.json"):
"""Retrieves a dict mapping words to their index in the IMDB dataset.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary. Keys are word strings, values are their
index.
Example:
```python
# Use the default parameters to keras.datasets.imdb.load_data
start_char = 1
oov_char = 2
index_from = 3
# Retrieve the training sequences.
(x_train, _), _ = keras.datasets.imdb.load_data(
start_char=start_char, oov_char=oov_char, index_from=index_from
)
# Retrieve the word index file mapping words to indices
word_index = keras.datasets.imdb.get_word_index()
# Reverse the word index to obtain a dict mapping indices to words
# And add `index_from` to indices to sync with `x_train`
inverted_word_index = dict(
(i + index_from, word) for (word, i) in word_index.items()
)
# Update `inverted_word_index` to include `start_char` and `oov_char`
inverted_word_index[start_char] = "[START]"
inverted_word_index[oov_char] = "[OOV]"
# Decode the first sequence in the dataset
decoded_sequence = " ".join(inverted_word_index[i] for i in x_train[0])
```
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=origin_folder + "imdb_word_index.json",
file_hash="bfafd718b763782e994055a2d397834f",
)
with open(path) as f:
return json.load(f)
| keras-core/keras_core/datasets/imdb.py/0 | {
"file_path": "keras-core/keras_core/datasets/imdb.py",
"repo_id": "keras-core",
"token_count": 2886
} | 31 |
from keras_core.layers.activations.elu import ELU
from keras_core.layers.activations.leaky_relu import LeakyReLU
from keras_core.layers.activations.prelu import PReLU
from keras_core.layers.activations.relu import ReLU
from keras_core.layers.activations.softmax import Softmax
| keras-core/keras_core/layers/activations/__init__.py/0 | {
"file_path": "keras-core/keras_core/layers/activations/__init__.py",
"repo_id": "keras-core",
"token_count": 96
} | 32 |
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.Attention")
class Attention(Layer):
"""Dot-product attention layer, a.k.a. Luong-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as a `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
score_mode: Function to use to compute attention scores, one of
`{"dot", "concat"}`. `"dot"` refers to the dot product between the
query and key vectors. `"concat"` refers to the hyperbolic tangent
of the concatenation of the `query` and `key` vectors.
Call Args:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=False,
score_mode="dot",
dropout=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.use_scale = use_scale
self.score_mode = score_mode
self.dropout = dropout
if self.score_mode not in ["dot", "concat"]:
raise ValueError(
"Invalid value for argument score_mode. "
"Expected one of {'dot', 'concat'}. "
f"Received: score_mode={score_mode}"
)
def build(self, input_shape):
self._validate_inputs(input_shape)
self.scale = None
self.concat_score_weight = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
if self.score_mode == "concat":
self.concat_score_weight = self.add_weight(
name="concat_score_weight",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
self.built = True
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
if self.score_mode == "dot":
scores = ops.matmul(query, ops.transpose(key, axes=[0, 2, 1]))
if self.scale is not None:
scores *= self.scale
elif self.score_mode == "concat":
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
if self.scale is not None:
scores = self.concat_score_weight * ops.sum(
ops.tanh(self.scale * (q_reshaped + k_reshaped)), axis=-1
)
else:
scores = self.concat_score_weight * ops.sum(
ops.tanh(q_reshaped + k_reshaped), axis=-1
)
return scores
def _apply_scores(self, scores, value, scores_mask=None, training=False):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `(batch_size, Tq)` and `key` tensor of
shape `(batch_size, Tv)` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates
`attention_distribution = softmax(scores)`, then returns
`matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `(batch_size, Tq, Tv)`.
value: Value tensor of shape `(batch_size, Tv, dim)`.
scores_mask: A boolean mask tensor of shape `(batch_size, 1, Tv)`
or `(batch_size, Tq, Tv)`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must
contain at least one `True` value in each line along the last
dimension.
training: Python boolean indicating whether the layer should behave
in training mode (adding dropout) or in inference mode
(no dropout).
Returns:
Tensor of shape `(batch_size, Tq, dim)`.
Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
if scores_mask is not None:
padding_mask = ops.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention
# distribution. Note 65504. is the max float16 value.
max_value = 65504.0 if scores.dtype == "float16" else 1.0e9
scores -= max_value * ops.cast(padding_mask, dtype=scores.dtype)
weights = ops.softmax(scores, axis=-1)
if training and self.dropout > 0:
weights = backend.random.dropout(
weights,
self.dropout,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return ops.matmul(weights, value), weights
def _calculate_score_mask(self, scores, v_mask, use_causal_mask):
if v_mask is not None:
# Mask of shape [batch_size, 1, Tv].
v_mask = ops.expand_dims(v_mask, axis=-2)
if not use_causal_mask:
return v_mask
# Creates a lower triangular mask, so position i cannot attend to
# positions j>i. This prevents the flow of information from the
# future into the past.
score_shape = ops.shape(scores)
# causal_mask_shape = [1, Tq, Tv].
mask_shape = (1, score_shape[-2], score_shape[-1])
ones_mask = ops.ones(shape=mask_shape, dtype="int32")
row_index = ops.cumsum(ones_mask, axis=-2)
col_index = ops.cumsum(ones_mask, axis=-1)
causal_mask = ops.greater_equal(row_index, col_index)
if v_mask is not None:
return causal_mask
return ops.logical_and(v_mask, causal_mask)
def call(
self,
inputs,
mask=None,
training=False,
return_attention_scores=False,
use_causal_mask=False,
):
self._validate_inputs(inputs=inputs, mask=mask)
q = inputs[0]
v = inputs[1]
k = inputs[2] if len(inputs) > 2 else v
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
scores = self._calculate_scores(query=q, key=k)
scores_mask = self._calculate_score_mask(
scores, v_mask, use_causal_mask
)
result, attention_scores = self._apply_scores(
scores=scores, value=v, scores_mask=scores_mask, training=training
)
if q_mask is not None:
# Mask of shape [batch_size, Tq, 1].
q_mask = ops.expand_dims(q_mask, axis=-1)
result *= ops.cast(q_mask, dtype=result.dtype)
if return_attention_scores:
return result, attention_scores
return result
def compute_mask(self, inputs, mask=None):
self._validate_inputs(inputs=inputs, mask=mask)
if mask is None or mask[0] is None:
return None
return ops.convert_to_tensor(mask[0])
def compute_output_shape(self, input_shape):
return input_shape[0]
def _validate_inputs(self, inputs, mask=None):
"""Validates arguments of the call method."""
class_name = self.__class__.__name__
if not isinstance(inputs, list):
raise ValueError(
f"{class_name} layer must be called on a list of inputs, "
"namely [query, value] or [query, value, key]. "
f"Received: inputs={inputs}."
)
if len(inputs) < 2 or len(inputs) > 3:
raise ValueError(
f"{class_name} layer accepts inputs list of length 2 or 3, "
"namely [query, value] or [query, value, key]. "
f"Received length: {len(inputs)}."
)
if mask is not None:
if not isinstance(mask, list):
raise ValueError(
f"{class_name} layer mask must be a list, "
f"namely [query_mask, value_mask]. Received: mask={mask}."
)
if len(mask) < 2 or len(mask) > 3:
raise ValueError(
f"{class_name} layer accepts mask list of length 2 or 3. "
f"Received: inputs={inputs}, mask={mask}."
)
def get_config(self):
base_config = super().get_config()
config = {
"use_scale": self.use_scale,
"score_mode": self.score_mode,
"dropout": self.dropout,
}
return {**base_config, **config}
| keras-core/keras_core/layers/attention/attention.py/0 | {
"file_path": "keras-core/keras_core/layers/attention/attention.py",
"repo_id": "keras-core",
"token_count": 5381
} | 33 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import layers
from keras_core import testing
from keras_core.backend.common.backend_utils import (
_convert_conv_tranpose_padding_args_from_keras_to_torch,
)
from keras_core.backend.common.backend_utils import (
compute_conv_transpose_output_shape,
)
from keras_core.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
class ConvTransposeBasicTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 4),
"output_shape": (2, 16, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1,),
"input_shape": (2, 8, 4),
"output_shape": (2, 23, 6),
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 4),
"output_shape": (2, 16, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_conv1d_transpose_basic(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.Conv1DTranspose,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"output_padding": output_padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 8, 4),
"output_shape": (2, 16, 16, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (2, 8, 8, 4),
"output_shape": (2, 23, 23, 6),
},
{
"filters": 6,
"kernel_size": (2, 3),
"strides": (2, 1),
"padding": "valid",
"output_padding": None,
"data_format": "channels_first",
"dilation_rate": (1, 1),
"input_shape": (2, 4, 8, 8),
"output_shape": (2, 6, 16, 10),
},
{
"filters": 2,
"kernel_size": (7, 7),
"strides": (16, 16),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (1, 14, 14, 2),
"output_shape": (1, 224, 224, 2),
},
)
@pytest.mark.requires_trainable_backend
def test_conv2d_transpose_basic(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
if (
data_format == "channels_first"
and backend.backend() == "tensorflow"
):
pytest.skip("channels_first unsupported on CPU with TF")
self.run_layer_test(
layers.Conv2DTranspose,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"output_padding": output_padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 8, 8, 4),
"output_shape": (2, 16, 16, 16, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
"input_shape": (2, 8, 8, 8, 4),
"output_shape": (2, 23, 23, 23, 6),
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
"input_shape": (2, 8, 8, 8, 4),
"output_shape": (2, 16, 9, 17, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_conv3d_transpose_basic(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.Conv3DTranspose,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"output_padding": output_padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `filters` is not positive.
with self.assertRaises(ValueError):
layers.Conv1DTranspose(filters=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaises(ValueError):
layers.Conv2DTranspose(filters=2, kernel_size=(1, 0))
# `strides` has 0.
with self.assertRaises(ValueError):
layers.Conv2DTranspose(
filters=2, kernel_size=(2, 2), strides=(1, 0)
)
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaises(ValueError):
layers.Conv2DTranspose(
filters=2, kernel_size=(2, 2), strides=2, dilation_rate=(2, 1)
)
class ConvTransposeCorrectnessTest(testing.TestCase, parameterized.TestCase):
def np_conv1d_transpose(
self,
x,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 1))
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation = dilation_rate[0]
else:
h_dilation = dilation_rate
h_kernel, ch_out, ch_in = kernel_weights.shape
n_batch, h_x, _ = x.shape
# Get output shape and padding
_, h_out, _ = compute_conv_transpose_output_shape(
x.shape,
kernel_weights.shape,
ch_out,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=x.shape,
kernel_shape=kernel_weights.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
h_pad_side1 = h_kernel - 1 - jax_padding[0][0]
if h_dilation > 1:
# Increase kernel size
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_kenel_size_tuple = (new_h_kernel,)
new_kernel_weights = np.zeros(
(*new_kenel_size_tuple, ch_out, ch_in),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel = kernel_weights.shape[0]
# Compute output
output = np.zeros([n_batch, h_out + h_kernel, ch_out])
for nb in range(n_batch):
for h_x_idx in range(h_x):
h_out_idx = h_x_idx * h_stride # Index in output
output[nb, h_out_idx : h_out_idx + h_kernel, :] += np.sum(
kernel_weights[:, :, :] * x[nb, h_x_idx, :], axis=-1
)
output = output + bias_weights
# Cut padding results from output
output = output[:, h_pad_side1 : h_out + h_pad_side1]
if data_format == "channels_first":
output = output.transpose((0, 2, 1))
return output
def np_conv2d_transpose(
self,
x,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride = strides
else:
h_stride = strides
w_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
h_kernel, w_kernel, ch_out, ch_in = kernel_weights.shape
n_batch, h_x, w_x, _ = x.shape
# Get output shape and padding
_, h_out, w_out, _ = compute_conv_transpose_output_shape(
x.shape,
kernel_weights.shape,
ch_out,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=x.shape,
kernel_shape=kernel_weights.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
h_pad_side1 = h_kernel - 1 - jax_padding[0][0]
w_pad_side1 = w_kernel - 1 - jax_padding[1][0]
if h_dilation > 1 or w_dilation > 1:
# Increase kernel size
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_kenel_size_tuple = (new_h_kernel, new_w_kernel)
new_kernel_weights = np.zeros(
(*new_kenel_size_tuple, ch_out, ch_in),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation, ::w_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel, w_kernel = kernel_weights.shape[:2]
# Compute output
output = np.zeros([n_batch, h_out + h_kernel, w_out + w_kernel, ch_out])
for nb in range(n_batch):
for h_x_idx in range(h_x):
h_out_idx = h_x_idx * h_stride # Index in output
for w_x_idx in range(w_x):
w_out_idx = w_x_idx * w_stride
output[
nb,
h_out_idx : h_out_idx + h_kernel,
w_out_idx : w_out_idx + w_kernel,
:,
] += np.sum(
kernel_weights[:, :, :, :] * x[nb, h_x_idx, w_x_idx, :],
axis=-1,
)
output = output + bias_weights
# Cut padding results from output
output = output[
:,
h_pad_side1 : h_out + h_pad_side1,
w_pad_side1 : w_out + w_pad_side1,
]
if data_format == "channels_first":
output = output.transpose((0, 3, 1, 2))
return output
def np_conv3d_transpose(
self,
x,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 4, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride, d_stride = strides
else:
h_stride = strides
w_stride = strides
d_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation, d_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
d_dilation = dilation_rate
h_kernel, w_kernel, d_kernel, ch_out, ch_in = kernel_weights.shape
n_batch, h_x, w_x, d_x, _ = x.shape
# Get output shape and padding
_, h_out, w_out, d_out, _ = compute_conv_transpose_output_shape(
x.shape,
kernel_weights.shape,
ch_out,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=x.shape,
kernel_shape=kernel_weights.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
h_pad_side1 = h_kernel - 1 - jax_padding[0][0]
w_pad_side1 = w_kernel - 1 - jax_padding[1][0]
d_pad_side1 = d_kernel - 1 - jax_padding[2][0]
if h_dilation > 1 or w_dilation > 1 or d_dilation > 1:
# Increase kernel size
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_d_kernel = d_kernel + (d_dilation - 1) * (d_kernel - 1)
new_kenel_size_tuple = (new_h_kernel, new_w_kernel, new_d_kernel)
new_kernel_weights = np.zeros(
(*new_kenel_size_tuple, ch_out, ch_in),
dtype=kernel_weights.dtype,
)
new_kernel_weights[
::h_dilation, ::w_dilation, ::d_dilation
] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel, w_kernel, d_kernel = kernel_weights.shape[:3]
# Compute output
output = np.zeros(
[
n_batch,
h_out + h_kernel,
w_out + w_kernel,
d_out + d_kernel,
ch_out,
]
)
for nb in range(n_batch):
for h_x_idx in range(h_x):
h_out_idx = h_x_idx * h_stride # Index in output
for w_x_idx in range(w_x):
w_out_idx = w_x_idx * w_stride
for d_x_idx in range(d_x):
d_out_idx = d_x_idx * d_stride
output[
nb,
h_out_idx : h_out_idx + h_kernel,
w_out_idx : w_out_idx + w_kernel,
d_out_idx : d_out_idx + d_kernel,
:,
] += np.sum(
kernel_weights[:, :, :, :, :]
* x[nb, h_x_idx, w_x_idx, d_x_idx, :],
axis=-1,
)
output = output + bias_weights
# Cut padding results from output
output = output[
:,
h_pad_side1 : h_out + h_pad_side1,
w_pad_side1 : w_out + w_pad_side1,
d_pad_side1 : d_out + d_pad_side1,
]
if data_format == "channels_first":
output = output.transpose((0, 4, 1, 2, 3))
return output
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1,),
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_conv1d_transpose(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
layer = layers.Conv1DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = self.np_conv1d_transpose(
inputs,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
self.assertAllClose(outputs, expected, atol=1e-5)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"filters": 6,
"kernel_size": 7,
"strides": 16,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
{
"filters": 6,
"kernel_size": (2, 3),
"strides": (2, 1),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
{
"filters": 2,
"kernel_size": (7, 7),
"strides": (16, 16),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_conv2d_transpose(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
layer = layers.Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 14, 14, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = self.np_conv2d_transpose(
inputs,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
self.assertAllClose(outputs, expected, atol=1e-5)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
},
)
def test_conv3d_transpose(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
layer = layers.Conv3DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = self.np_conv3d_transpose(
inputs,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
self.assertAllClose(outputs, expected, atol=1e-5)
@parameterized.product(
kernel_size=list(range(1, 5)),
strides=list(range(1, 5)),
padding=["same", "valid"],
output_padding=[None] + list(range(1, 5)),
)
def test_conv1d_transpose_consistency(
self, kernel_size, strides, padding, output_padding
):
"""Test conv transpose, on an 1D array of size 3, against several
convolution parameters. In particular, tests if Torch inconsistencies
are raised.
"""
# output_padding cannot be greater than strides
if isinstance(output_padding, int) and output_padding >= strides:
pytest.skip(
"`output_padding` greater than `strides` is not supported"
)
input = np.ones(shape=(1, 3, 1))
kernel_weights = np.arange(1, kernel_size + 1).reshape(
(kernel_size, 1, 1)
)
# Exepected result
expected_res = self.np_conv1d_transpose(
x=input,
kernel_weights=kernel_weights,
bias_weights=np.zeros(shape=(1,)),
strides=strides,
padding=padding,
output_padding=output_padding,
data_format="channels_last",
dilation_rate=1,
)
# keras-core layer
kc_layer = layers.Conv1DTranspose(
filters=1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=1,
)
kc_layer.build(input_shape=(1, 3, 1))
kc_layer.kernel.assign(kernel_weights)
# Special cases for Torch
if backend.backend() == "torch":
# The following set of arguments lead to Torch output padding to be
# greater than strides, which is not supported by Torch.
# An error is raised.
if (kernel_size, strides, padding, output_padding) in [
(2, 1, "same", None),
(4, 1, "same", None),
]:
with pytest.raises(ValueError):
kc_res = kc_layer(input)
return
# When both torch_padding and torch_output_padding are greater
# than 0, Torch outputs are inconsistent with the ones from
# Tensorflow. A warning is raised, and we expect the results to be
# different.
(
torch_padding,
torch_output_padding,
) = _convert_conv_tranpose_padding_args_from_keras_to_torch(
kernel_size=kernel_size,
stride=strides,
dilation_rate=1,
padding=padding,
output_padding=output_padding,
)
if torch_padding > 0 and torch_output_padding > 0:
with pytest.raises(AssertionError):
kc_res = kc_layer(input)
self.assertAllClose(expected_res, kc_res, atol=1e-5)
return
# Compare results
kc_res = kc_layer(input)
self.assertAllClose(expected_res, kc_res, atol=1e-5)
@parameterized.product(
kernel_size=list(range(1, 5)),
strides=list(range(1, 5)),
padding=["same", "valid"],
output_padding=[None] + list(range(1, 5)),
)
def test_shape_inference_static_unknown_shape(
self, kernel_size, strides, padding, output_padding
):
x = layers.Input(shape=(None, None, 3))
x = layers.Conv2DTranspose(
filters=2,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=1,
)(x)
self.assertEqual(x.shape, (None, None, None, 2))
| keras-core/keras_core/layers/convolutional/conv_transpose_test.py/0 | {
"file_path": "keras-core/keras_core/layers/convolutional/conv_transpose_test.py",
"repo_id": "keras-core",
"token_count": 15257
} | 34 |
import warnings
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.ops.node import Node
@keras_core_export("keras_core.layers.InputLayer")
class InputLayer(Layer):
def __init__(
self,
shape=None,
batch_size=None,
dtype=None,
sparse=None,
batch_shape=None,
input_tensor=None,
name=None,
**kwargs,
):
# TODO: support for ragged.
super().__init__(name=name)
if "input_shape" in kwargs:
warnings.warn(
"Argument `input_shape` is deprecated. Use `shape` instead."
)
shape = kwargs.pop("input_shape")
if shape is not None and batch_shape is not None:
raise ValueError(
"You cannot pass both `shape` and `batch_shape` at the "
"same time."
)
if batch_size is not None and batch_shape is not None:
raise ValueError(
"You cannot pass both `batch_size` and `batch_shape` at the "
"same time."
)
if shape is None and batch_shape is None:
raise ValueError("You must pass a `shape` argument.")
if shape is not None:
shape = backend.standardize_shape(shape)
batch_shape = (batch_size,) + shape
self.batch_shape = tuple(batch_shape)
self._dtype = backend.standardize_dtype(dtype)
self.sparse = bool(sparse)
if self.sparse and not backend.SUPPORTS_SPARSE_TENSORS:
raise ValueError(
"`sparse=True` is not supported with backend: "
f"{backend.backend()}"
)
if input_tensor is not None:
if not isinstance(input_tensor, backend.KerasTensor):
raise ValueError(
"Argument `input_tensor` must be a KerasTensor. "
f"Received invalid type: input_tensor={input_tensor} "
f"(of type {type(input_tensor)})"
)
else:
input_tensor = backend.KerasTensor(
shape=batch_shape, dtype=dtype, sparse=sparse, name=name
)
self._input_tensor = input_tensor
Node(operation=self, call_args=(), call_kwargs={}, outputs=input_tensor)
self.built = True
def call(self):
return
@property
def dtype(self):
return self._dtype
def get_config(self):
return {
"batch_shape": self.batch_shape,
"dtype": self.dtype,
"sparse": self.sparse,
"name": self.name,
}
@keras_core_export(["keras_core.layers.Input", "keras_core.Input"])
def Input(
shape=None,
batch_size=None,
dtype=None,
sparse=None,
batch_shape=None,
name=None,
tensor=None,
):
"""Used to instantiate a Keras tensor.
A Keras tensor is a symbolic tensor-like object, which we augment with
certain attributes that allow us to build a Keras model just by knowing the
inputs and outputs of the model.
For instance, if `a`, `b` and `c` are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
Args:
shape: A shape tuple (tuple of integers or `None` objects),
not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors. Elements of this tuple
can be `None`; `None` elements represent dimensions where the shape
is not known and may vary (e.g. sequence length).
batch_size: Optional static batch size (integer).
dtype: The data type expected by the input, as a string
(e.g. `"float32"`, `"int32"`...)
sparse: A boolean specifying whether the expected input will be sparse
tensors. Note that, if `sparse` is `False`, sparse tensors can still
be passed into the input - they will be densified with a default
value of 0. This feature is only supported with the TensorFlow
backend. Defaults to `False`.
name: Optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will use this tensor rather
than creating a new placeholder tensor.
Returns:
A Keras tensor.
Example:
```python
# This is a logistic regression in Keras
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
"""
layer = InputLayer(
shape=shape,
batch_size=batch_size,
dtype=dtype,
sparse=sparse,
batch_shape=batch_shape,
name=name,
input_tensor=tensor,
)
return layer.output
| keras-core/keras_core/layers/core/input_layer.py/0 | {
"file_path": "keras-core/keras_core/layers/core/input_layer.py",
"repo_id": "keras-core",
"token_count": 2266
} | 35 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.merging.base_merge import Merge
from keras_core.utils.numerical_utils import normalize
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape, dimension 1 of
`x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape, always ignore
first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape, dimension 2 of
`y` has been summed over.
(`dot_axes[1]` = 2) `output_shape` = `(100, 30)`
Examples:
>>> x_batch = np.ones(shape=(32, 20, 1))
>>> y_batch = np.ones(shape=(32, 30, 20))
>>> xy_batch_dot = batch_dot(x_batch, y_batch, axes=(1, 2))
Args:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: Tuple or list of integers with target dimensions, or single
integer. The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]`
should be equal.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape (less the
batch dimension and the dimension that was summed over). If the final
rank is 1, we reshape it to `(batch_size, 1)`.
"""
x_shape = x.shape
y_shape = y.shape
x_ndim = len(x_shape)
y_ndim = len(y_shape)
if x_ndim < 2 or y_ndim < 2:
raise ValueError(
f"Cannot do batch_dot on inputs "
f"with rank < 2. "
f"Received inputs with shapes "
f"{x_shape} and {y_shape}."
)
x_batch_size = x_shape[0]
y_batch_size = y_shape[0]
if x_batch_size is not None and y_batch_size is not None:
if x_batch_size != y_batch_size:
raise ValueError(
f"Cannot do batch_dot on inputs "
f"with different batch sizes. "
f"Received inputs with shapes "
f"{x_shape} and {y_shape}."
)
if isinstance(axes, int):
axes = [axes, axes]
if axes is None:
if y_ndim == 2:
axes = [x_ndim - 1, y_ndim - 1]
else:
axes = [x_ndim - 1, y_ndim - 2]
if any(isinstance(a, (list, tuple)) for a in axes):
raise ValueError(
f"Multiple target dimensions are not supported. "
f"Expected: None, int, (int, int), "
f"Provided: {axes} "
)
# if tuple, convert to list.
axes = list(axes)
# convert negative indices.
if axes[0] < 0:
axes[0] += x_ndim
if axes[1] < 0:
axes[1] += y_ndim
# sanity checks
if 0 in axes:
raise ValueError(
"Cannot perform batch_dot over axis 0. "
"If your inputs are not batched, "
"add a dummy batch dimension to your "
"inputs using keras_core.ops.expand_dims(x, 0)"
)
a0, a1 = axes
d1 = x_shape[a0]
d2 = y_shape[a1]
if d1 is not None and d2 is not None and d1 != d2:
raise ValueError(
f"Cannot do batch_dot on inputs with shapes "
f"{x_shape} and {y_shape} with axes={axes}. "
f"x.shape[{axes[0]}] != y.shape[{axes[1]}] ({d1} != {d2})."
)
# backup ndims. Need them later.
orig_x_ndim = x_ndim
orig_y_ndim = y_ndim
# if rank is 2, expand to 3.
if x_ndim == 2:
x = ops.expand_dims(x, 1)
a0 += 1
x_ndim += 1
if y_ndim == 2:
y = ops.expand_dims(y, 2)
y_ndim += 1
# bring x's dimension to be reduced to last axis.
if a0 != x_ndim - 1:
pattern = list(range(x_ndim))
for i in range(a0, x_ndim - 1):
pattern[i] = pattern[i + 1]
pattern[-1] = a0
x = ops.transpose(x, pattern)
# bring y's dimension to be reduced to axis 1.
if a1 != 1:
pattern = list(range(y_ndim))
for i in range(a1, 1, -1):
pattern[i] = pattern[i - 1]
pattern[1] = a1
y = ops.transpose(y, pattern)
# normalize both inputs to rank 3.
if x_ndim > 3:
# squash middle dimensions of x.
x_shape = list(ops.shape(x))
x_mid_dims = x_shape[1:-1]
x_squashed_shape = ops.stack([x_shape[0], -1, x_shape[-1]])
x = ops.reshape(x, x_squashed_shape)
x_squashed = True
else:
x_squashed = False
if y_ndim > 3:
# squash trailing dimensions of y.
y_shape = list(ops.shape(y))
y_trail_dims = y_shape[2:]
y_squashed_shape = ops.stack([y_shape[0], y_shape[1], -1])
y = ops.reshape(y, y_squashed_shape)
y_squashed = True
else:
y_squashed = False
result = ops.matmul(x, y)
# if inputs were squashed, we have to reshape the matmul output.
output_shape = ops.shape(result)
do_reshape = False
if x_squashed:
output_shape = ops.concat(
[output_shape[:1], x_mid_dims, output_shape[-1:]], 0
)
do_reshape = True
if y_squashed:
output_shape = ops.concat([output_shape[:-1], y_trail_dims], 0)
do_reshape = True
if do_reshape:
result = ops.reshape(result, output_shape)
# if the inputs were originally rank 2, we remove the added 1 dim.
if orig_x_ndim == 2:
result = ops.squeeze(result, 1)
elif orig_y_ndim == 2:
result = ops.squeeze(result, -1)
return result
@keras_core_export("keras_core.layers.Dot")
class Dot(Merge):
"""Computes element-wise dot product of two tensors.
It takes a list of inputs of size 2, and the axes
corresponding to each input along with the dot product
is to be performed.
Let's say `x` and `y` are the two input tensors with shapes
`(2, 3, 5)` and `(2, 10, 3)`. The batch dimension should be
of same size for both the inputs, and `axes` should correspond
to the dimensions that have the same size in the corresponding
inputs. e.g. with `axes=(1, 2)`, the dot product of `x`, and `y`
will result in a tensor with shape `(2, 5, 10)`
Examples:
>>> x = np.arange(10).reshape(1, 5, 2)
>>> y = np.arange(10, 20).reshape(1, 2, 5)
>>> keras_core.layers.Dot(axes=(1, 2))([x, y])
Usage in a Keras model:
>>> x1 = keras_core.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = keras_core.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> y = keras_core.layers.Dot(axes=1)([x1, x2])
Args:
axes: Integer or tuple of integers, axis or axes along which to
take the dot product. If a tuple, should be two integers
corresponding to the desired axis from the first input and the
desired axis from the second input, respectively. Note that the
size of the two selected axes must match.
normalize: Whether to L2-normalize samples along the dot product axis
before taking the dot product. If set to `True`, then
the output of the dot product is the cosine proximity
between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
def __init__(self, axes, normalize=False, **kwargs):
super().__init__(**kwargs)
if not isinstance(axes, int):
if not isinstance(axes, (list, tuple)):
raise TypeError(
f"Invalid type for argument `axes`: it should be "
f"a list or an int. Received: axes={axes}"
)
if len(axes) != 2:
raise ValueError(
f"Invalid format for argument `axes`: it should contain "
f"two elements. Received: axes={axes}"
)
if not isinstance(axes[0], int) or not isinstance(axes[1], int):
raise ValueError(
f"Invalid format for argument `axes`: list elements should "
f"be integers. Received: axes={axes}"
)
self.axes = axes
self.normalize = normalize
self.supports_masking = True
self._reshape_required = False
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape[0], tuple) or len(input_shape) != 2:
raise ValueError(
f"A `Dot` layer should be called on a list of 2 inputs. "
f"Received: input_shape={input_shape}"
)
shape1 = input_shape[0]
shape2 = input_shape[1]
if shape1 is None or shape2 is None:
return
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
if shape1[axes[0]] != shape2[axes[1]]:
raise ValueError(
f"Incompatible input shapes: "
f"axis values {shape1[axes[0]]} (at axis {axes[0]}) != "
f"{shape2[axes[1]]} (at axis {axes[1]}). "
f"Full input shapes: {shape1}, {shape2}"
)
self.built = True
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError(
f"A `Dot` layer should be called on exactly 2 inputs. "
f"Received: inputs={inputs}"
)
x1 = inputs[0]
x2 = inputs[1]
if isinstance(self.axes, int):
if self.axes < 0:
axes = [
self.axes % len(x1.shape),
self.axes % len(x2.shape),
]
else:
axes = [self.axes] * 2
else:
axes = []
for i in range(len(self.axes)):
if self.axes[i] < 0:
axes.append(self.axes[i] % len(inputs[i].shape))
else:
axes.append(self.axes[i])
if self.normalize:
x1 = normalize(x1, axis=axes[0])
x2 = normalize(x2, axis=axes[1])
output = batch_dot(x1, x2, axes)
return output
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2:
raise ValueError(
f"A `Dot` layer should be called on a list of 2 inputs. "
f"Received: input_shape={input_shape}"
)
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if isinstance(self.axes, int):
if self.axes < 0:
axes = [self.axes % len(shape1), self.axes % len(shape2)]
else:
axes = [self.axes] * 2
else:
axes = self.axes
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
return None
def get_config(self):
config = {
"axes": self.axes,
"normalize": self.normalize,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_core_export("keras_core.layers.dot")
def dot(inputs, axes=-1, **kwargs):
"""Functional interface to the `Dot` layer.
Args:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, **kwargs)(inputs)
| keras-core/keras_core/layers/merging/dot.py/0 | {
"file_path": "keras-core/keras_core/layers/merging/dot.py",
"repo_id": "keras-core",
"token_count": 6185
} | 36 |
import numpy as np
import pytest
from keras_core import backend
from keras_core import layers
from keras_core import testing
def squared_l2_norm(x):
x = backend.convert_to_numpy(x)
return np.sum(x**2)
class UnitNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
| keras-core/keras_core/layers/normalization/unit_normalization_test.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/unit_normalization_test.py",
"repo_id": "keras-core",
"token_count": 1011
} | 37 |
from keras_core.api_export import keras_core_export
from keras_core.layers.pooling.base_pooling import BasePooling
@keras_core_export(
["keras_core.layers.MaxPooling1D", "keras_core.layers.MaxPool1D"]
)
class MaxPooling1D(BasePooling):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`.
The resulting output when using the `"valid"` padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`.
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras_core.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="valid")
>>> max_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras_core.layers.MaxPooling1D(pool_size=2,
... strides=2, padding="valid")
>>> max_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> max_pool_1d = keras_core.layers.MaxPooling1D(pool_size=2,
... strides=1, padding="same")
>>> max_pool_1d(x)
"""
def __init__(
self,
pool_size=2,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| keras-core/keras_core/layers/pooling/max_pooling1d.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/max_pooling1d.py",
"repo_id": "keras-core",
"token_count": 1459
} | 38 |
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_core import backend
from keras_core import layers
from keras_core import models
from keras_core import testing
from keras_core.saving import load_model
class ArrayLike:
def __init__(self, values):
self.values = values
def __array__(self):
return np.array(self.values)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="Broken with NumPy backend."
)
class HashingTest(testing.TestCase, parameterized.TestCase):
def test_config(self):
layer = layers.Hashing(
num_bins=8,
output_mode="int",
)
self.run_class_serialization_test(layer)
def test_correctness(self):
layer = layers.Hashing(num_bins=3)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1], [0], [1], [1], [2]]))
layer = layers.Hashing(num_bins=3, mask_value="")
inp = [["A"], ["B"], [""], ["C"], ["D"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1], [1], [0], [2], [2]]))
layer = layers.Hashing(num_bins=3, salt=[133, 137])
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[1], [2], [1], [0], [2]]))
layer = layers.Hashing(num_bins=3, salt=133)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
output = layer(inp)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[0], [0], [2], [1], [0]]))
def test_tf_data_compatibility(self):
layer = layers.Hashing(num_bins=3)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
ds = tf.data.Dataset.from_tensor_slices(inp).batch(5).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([[1], [0], [1], [1], [2]]))
@parameterized.named_parameters(
("list", list),
("tuple", tuple),
("numpy", np.array),
("array_like", ArrayLike),
)
def test_tensor_like_inputs(self, data_fn):
input_data = data_fn([0, 1, 2, 3, 4])
expected_output = [1, 0, 1, 0, 2]
layer = layers.Hashing(num_bins=3)
output_data = layer(input_data)
self.assertAllEqual(output_data, expected_output)
def test_hash_single_bin(self):
layer = layers.Hashing(num_bins=1)
inp = np.asarray([["A"], ["B"], ["C"], ["D"], ["E"]])
output = layer(inp)
self.assertAllClose([[0], [0], [0], [0], [0]], output)
def test_hash_dense_input_farmhash(self):
layer = layers.Hashing(num_bins=2)
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
def test_hash_dense_input_mask_value_farmhash(self):
empty_mask_layer = layers.Hashing(num_bins=3, mask_value="")
omar_mask_layer = layers.Hashing(num_bins=3, mask_value="omar")
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
# Outputs should be one more than test_hash_dense_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
# 'omar' should map to 0.
self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
def test_hash_dense_list_input_farmhash(self):
layer = layers.Hashing(num_bins=2)
inp = [["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
inp = ["omar", "stringer", "marlo", "wire", "skywalker"]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([0, 0, 1, 0, 0], output)
def test_hash_dense_int_input_farmhash(self):
layer = layers.Hashing(num_bins=3)
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [0], [1], [0], [2]], output)
def test_hash_dense_input_siphash(self):
layer = layers.Hashing(num_bins=2, salt=[133, 137])
inp = np.asarray(
[["omar"], ["stringer"], ["marlo"], ["wire"], ["skywalker"]]
)
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
# Note the result is different from FarmHash.
self.assertAllClose([[0], [1], [0], [1], [0]], output)
layer_2 = layers.Hashing(num_bins=2, salt=[211, 137])
output_2 = layer_2(inp)
# Note the result is different from (133, 137).
self.assertAllClose([[1], [0], [1], [0], [1]], output_2)
def test_hash_dense_int_input_siphash(self):
layer = layers.Hashing(num_bins=3, salt=[133, 137])
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [1], [2], [0], [1]], output)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_input_farmhash(self):
layer = layers.Hashing(num_bins=2)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([0, 0, 1, 0, 0], output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_input_mask_value_farmhash(self):
empty_mask_layer = layers.Hashing(num_bins=3, mask_value="")
omar_mask_layer = layers.Hashing(num_bins=3, mask_value="omar")
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
self.assertAllClose(indices, omar_mask_output.indices)
self.assertAllClose(indices, empty_mask_output.indices)
# Outputs should be one more than test_hash_sparse_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)
# 'omar' should map to 0.
self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_int_input_farmhash(self):
layer = layers.Hashing(num_bins=3)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 0, 1, 0, 2], output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_input_siphash(self):
layer = layers.Hashing(num_bins=2, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices,
values=["omar", "stringer", "marlo", "wire", "skywalker"],
dense_shape=[3, 2],
)
output = layer(inp)
self.assertAllClose(output.indices, indices)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([0, 1, 0, 1, 0], output.values)
layer_2 = layers.Hashing(num_bins=2, salt=[211, 137])
output = layer_2(inp)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([1, 0, 1, 0, 1], output.values)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses tf.SparseTensor."
)
def test_hash_sparse_int_input_siphash(self):
layer = layers.Hashing(num_bins=3, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = tf.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2]
)
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 1, 2, 0, 1], output.values)
def test_invalid_inputs(self):
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = layers.Hashing(num_bins=None)
with self.assertRaisesRegex(ValueError, "cannot be `None`"):
_ = layers.Hashing(num_bins=-1)
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = layers.Hashing(num_bins=2, salt="string")
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = layers.Hashing(num_bins=2, salt=[1])
with self.assertRaisesRegex(
ValueError, "can only be a tuple of size 2"
):
_ = layers.Hashing(num_bins=1, salt=[133, 137, 177])
def test_one_hot_output(self):
input_array = np.array([0, 1, 2, 3, 4])
expected_output = [
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
expected_output_shape = [None, 3]
inputs = layers.Input(shape=(1,), dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="one_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape)
model = models.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
def test_multi_hot_output(self):
input_array = np.array([[0, 1, 2, 3, 4]])
expected_output = [[1.0, 1.0, 1.0]]
expected_output_shape = [None, 3]
inputs = layers.Input(shape=(None,), dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="multi_hot")
outputs = layer(inputs)
self.assertAllEqual(expected_output_shape, outputs.shape)
model = models.Model(inputs, outputs)
output_data = model(input_array)
self.assertAllEqual(expected_output, output_data)
@parameterized.named_parameters(
(
"1d_input",
[0, 1, 2, 3, 4],
[2.0, 2.0, 1.0],
[3],
),
(
"2d_input",
[[0, 1, 2, 3, 4]],
[[2.0, 2.0, 1.0]],
[None, 3],
),
)
def test_count_output(self, input_value, expected_output, output_shape):
input_array = np.array(input_value)
if input_array.ndim == 1:
symbolic_sample_shape = ()
elif input_array.ndim == 2:
symbolic_sample_shape = (None,)
inputs = layers.Input(shape=symbolic_sample_shape, dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="count")
outputs = layer(inputs)
self.assertAllEqual(output_shape, outputs.shape)
output_data = layer(input_array)
self.assertAllEqual(expected_output, output_data)
@parameterized.named_parameters(
("int32", "int32"),
("int64", "int64"),
)
def test_int_output_dtype(self, dtype):
input_data = layers.Input(batch_size=16, shape=(4,), dtype="string")
layer = layers.Hashing(num_bins=3, output_mode="int", dtype=dtype)
output = layer(input_data)
self.assertEqual(output.dtype, dtype)
@parameterized.named_parameters(
("float32", "float32"),
("float64", "float64"),
)
def test_one_hot_output_dtype(self, dtype):
input_data = layers.Input(batch_size=16, shape=(1,), dtype="string")
layer = layers.Hashing(num_bins=3, output_mode="one_hot", dtype=dtype)
output = layer(input_data)
self.assertEqual(output.dtype, dtype)
def test_config_with_custom_name(self):
layer = layers.Hashing(num_bins=2, name="hashing")
config = layer.get_config()
layer_1 = layers.Hashing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Uses string dtype."
)
def test_saving(self):
input_data = np.array(
["omar", "stringer", "marlo", "wire", "skywalker"]
)
inputs = layers.Input(shape=(), dtype="string")
outputs = layers.Hashing(num_bins=100)(inputs)
model = models.Model(inputs=inputs, outputs=outputs)
original_output_data = model(input_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "keras_model.keras")
model.save(output_path)
loaded_model = load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_data = loaded_model(input_data)
self.assertAllClose(new_output_data, original_output_data)
@parameterized.named_parameters(
(
"list_input",
[1, 2, 3],
[1, 1, 1],
),
(
"list_input_2d",
[[1], [2], [3]],
[[1], [1], [1]],
),
(
"list_input_2d_multiple",
[[1, 2], [2, 3], [3, 4]],
[[1, 1], [1, 1], [1, 1]],
),
(
"list_input_3d",
[[[1], [2]], [[2], [3]], [[3], [4]]],
[[[1], [1]], [[1], [1]], [[1], [1]]],
),
)
def test_hash_list_input(self, input_data, expected):
layer = layers.Hashing(num_bins=2)
out_data = layer(input_data)
self.assertAllEqual(
expected, backend.convert_to_numpy(out_data).tolist()
)
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_string_input_farmhash(self):
# layer = layers.Hashing(num_bins=2)
# inp_data = tf.ragged.constant(
# [
# ["omar", "stringer", "marlo", "wire"],
# ["marlo", "skywalker", "wire"],
# ],
# dtype="string",
# )
# out_data = layer(inp_data)
# # Same hashed output as test_hash_sparse_input_farmhash
# expected_output = [[0, 0, 1, 0], [1, 0, 0]]
# self.assertAllEqual(expected_output, out_data)
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="string")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_input_mask_value(self):
# empty_mask_layer = layers.Hashing(num_bins=3, mask_value="")
# omar_mask_layer = layers.Hashing(num_bins=3, mask_value="omar")
# inp_data = tf.ragged.constant(
# [
# ["omar", "stringer", "marlo", "wire"],
# ["marlo", "skywalker", "wire"],
# ],
# dtype="string",
# )
# empty_mask_output = empty_mask_layer(inp_data)
# omar_mask_output = omar_mask_layer(inp_data)
# # Outputs should be one more than test_hash_ragged_string_input_farmhash
# # (the zeroth bin is now reserved for masks).
# expected_output = [[1, 1, 2, 1], [2, 1, 1]]
# self.assertAllClose(expected_output[0], empty_mask_output[1])
# self.assertAllClose(expected_output[1], empty_mask_output[2])
# # 'omar' should map to 0.
# expected_output = [[0, 1, 2, 1], [2, 1, 1]]
# self.assertAllClose(expected_output[0], omar_mask_output[0])
# self.assertAllClose(expected_output[1], omar_mask_output[1])
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_int_input_farmhash(self):
# layer = layers.Hashing(num_bins=3)
# inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype="int64")
# out_data = layer(inp_data)
# # Same hashed output as test_hash_sparse_input_farmhash
# expected_output = [[1, 0, 0, 2], [1, 0, 1]]
# self.assertAllEqual(expected_output[0], out_data[0])
# self.assertAllEqual(expected_output[1], out_data[1])
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="int64")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_string_input_siphash(self):
# layer = layers.Hashing(num_bins=2, salt=[133, 137])
# inp_data = tf.ragged.constant(
# [
# ["omar", "stringer", "marlo", "wire"],
# ["marlo", "skywalker", "wire"],
# ],
# dtype="string",
# )
# out_data = layer(inp_data)
# # Same hashed output as test_hash_dense_input_siphash
# expected_output = [[0, 1, 0, 1], [0, 0, 1]]
# self.assertAllEqual(expected_output, out_data)
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="string")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# layer_2 = layers.Hashing(num_bins=2, salt=[211, 137])
# out_data = layer_2(inp_data)
# expected_output = [[1, 0, 1, 0], [1, 1, 0]]
# self.assertAllEqual(expected_output, out_data)
# out_t = layer_2(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
# TODO: support tf.RaggedTensor.
# def test_hash_ragged_int_input_siphash(self):
# layer = layers.Hashing(num_bins=3, salt=[133, 137])
# inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype="int64")
# out_data = layer(inp_data)
# # Same hashed output as test_hash_sparse_input_farmhash
# expected_output = [[1, 1, 0, 1], [2, 1, 1]]
# self.assertAllEqual(expected_output, out_data)
# inp_t = layers.Input(shape=(None,), ragged=True, dtype="int64")
# out_t = layer(inp_t)
# model = models.Model(inputs=inp_t, outputs=out_t)
# self.assertAllClose(out_data, model.predict(inp_data))
| keras-core/keras_core/layers/preprocessing/hashing_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/hashing_test.py",
"repo_id": "keras-core",
"token_count": 9152
} | 39 |
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras_core import backend
from keras_core import layers
from keras_core import testing
class RandomRotationTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape((1, 5, 5, 1))
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape((5, 5, 1))
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(expected_output, output)
| keras-core/keras_core/layers/preprocessing/random_rotation_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_rotation_test.py",
"repo_id": "keras-core",
"token_count": 1235
} | 40 |
import numpy as np
import pytest
from keras_core import layers
from keras_core.testing import test_case
class ActivityRegularizationTest(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
@pytest.mark.requires_trainable_backend
def test_activity_regularization_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
)
| keras-core/keras_core/layers/regularization/activity_regularization_test.py/0 | {
"file_path": "keras-core/keras_core/layers/regularization/activity_regularization_test.py",
"repo_id": "keras-core",
"token_count": 438
} | 41 |
import math
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.Flatten")
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Note: If inputs are shaped `(batch,)` without a feature axis, then
flattening adds an extra channel dimension and output shape is `(batch, 1)`.
Args:
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Example:
>>> x = keras_core.Input(shape=(10, 64))
>>> y = keras_core.layers.Flatten()(x)
>>> y.shape
(None, 640)
"""
def __init__(self, data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
self._channels_first = self.data_format == "channels_first"
def call(self, inputs):
input_shape = inputs.shape
rank = len(input_shape)
if self._channels_first and rank > 1:
# Switch to channels-last format.
inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1))
output_shape = tuple(
dim if dim is not None else -1
for dim in self.compute_output_shape(input_shape)
)
return ops.reshape(inputs, output_shape)
def compute_output_shape(self, input_shape):
non_batch_dims = input_shape[1:]
if len(non_batch_dims) == 0:
flattened_dim = 1
elif None in non_batch_dims:
flattened_dim = None
else:
flattened_dim = math.prod(non_batch_dims)
return (input_shape[0], flattened_dim)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def get_config(self):
config = {"data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/reshaping/flatten.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/flatten.py",
"repo_id": "keras-core",
"token_count": 1137
} | 42 |
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.utils import argument_validation
@keras_core_export("keras_core.layers.ZeroPadding2D")
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros at the top, bottom, left and
right side of an image tensor.
Examples:
>>> input_shape = (1, 1, 2, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[[0 1]
[2 3]]]]
>>> y = keras_core.layers.ZeroPadding2D(padding=1)(x)
>>> y
[[[[0 0]
[0 0]
[0 0]
[0 0]]
[[0 0]
[0 1]
[2 3]
[0 0]]
[[0 0]
[0 0]
[0 0]
[0 0]]]]
Args:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding is applied to height and width.
- If tuple of 2 ints: interpreted as two different symmetric padding
values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints: interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`.
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, height, width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, height, width)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_height, padded_width, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, padded_height, padded_width)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, "__len__"):
if len(padding) != 2:
raise ValueError(
"`padding` should have two elements. "
f"Received: padding={padding}."
)
height_padding = argument_validation.standardize_tuple(
padding[0], 2, "1st entry of padding", allow_zero=True
)
width_padding = argument_validation.standardize_tuple(
padding[1], 2, "2nd entry of padding", allow_zero=True
)
self.padding = (height_padding, width_padding)
else:
raise ValueError(
"`padding` should be either an int, a tuple of 2 ints "
"(symmetric_height_crop, symmetric_width_crop), "
"or a tuple of 2 tuples of 2 ints "
"((top_crop, bottom_crop), (left_crop, right_crop)). "
f"Received: padding={padding}."
)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
spatial_dims_offset = 2 if self.data_format == "channels_first" else 1
for index in range(0, 2):
if output_shape[index + spatial_dims_offset] is not None:
output_shape[index + spatial_dims_offset] += (
self.padding[index][0] + self.padding[index][1]
)
return tuple(output_shape)
def call(self, inputs):
if self.data_format == "channels_first":
all_dims_padding = ((0, 0), (0, 0), *self.padding)
else:
all_dims_padding = ((0, 0), *self.padding, (0, 0))
return ops.pad(inputs, all_dims_padding)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/reshaping/zero_padding2d.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/zero_padding2d.py",
"repo_id": "keras-core",
"token_count": 2161
} | 43 |
import pytest
from keras_core import backend
from keras_core import layers
from keras_core import ops
from keras_core import testing
from keras_core.layers.rnn.dropout_rnn_cell import DropoutRNNCell
class RNNCellWithDropout(layers.Layer, DropoutRNNCell):
def __init__(
self, units, dropout=0.5, recurrent_dropout=0.5, seed=None, **kwargs
):
super().__init__(**kwargs)
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed)
self.units = units
self.state_size = units
self.dropout = dropout
self.recurrent_dropout = recurrent_dropout
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="ones",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="ones",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states, training=False):
if training:
dp_mask = self.get_dropout_mask(inputs)
inputs *= dp_mask
prev_output = states[0]
h = ops.matmul(inputs, self.kernel)
if training:
rdp_mask = self.get_recurrent_dropout_mask(prev_output)
prev_output *= rdp_mask
output = h + ops.matmul(prev_output, self.recurrent_kernel)
return output, [output]
class DropoutRNNCellTest(testing.TestCase):
def test_seed_tracking(self):
cell = RNNCellWithDropout(3, seed=1337)
self.assertEqual(len(cell.non_trainable_variables), 1)
layer = layers.RNN(cell)
self.assertEqual(len(layer.non_trainable_variables), 1)
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.RNN,
init_kwargs={"cell": RNNCellWithDropout(5, seed=1337)},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
)
| keras-core/keras_core/layers/rnn/dropout_rnn_cell_test.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/dropout_rnn_cell_test.py",
"repo_id": "keras-core",
"token_count": 1065
} | 44 |
from keras_core.api_export import keras_core_export
@keras_core_export("keras_core._legacy.losses.Reduction")
class Reduction:
AUTO = "auto"
NONE = "none"
SUM = "sum"
SUM_OVER_BATCH_SIZE = "sum_over_batch_size"
@classmethod
def all(cls):
return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError(
f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"'
)
| keras-core/keras_core/legacy/losses.py/0 | {
"file_path": "keras-core/keras_core/legacy/losses.py",
"repo_id": "keras-core",
"token_count": 256
} | 45 |
import warnings
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.losses.loss import Loss
from keras_core.losses.loss import squeeze_to_same_rank
from keras_core.saving import serialization_lib
from keras_core.utils.numerical_utils import normalize
class LossFunctionWrapper(Loss):
def __init__(
self, fn, reduction="sum_over_batch_size", name=None, **kwargs
):
super().__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
base_config = super().get_config()
config = {"fn": serialization_lib.serialize_keras_object(self.fn)}
config.update(serialization_lib.serialize_keras_object(self._fn_kwargs))
return {**base_config, **config}
@classmethod
def from_config(cls, config):
if "fn" in config:
config = serialization_lib.deserialize_keras_object(config)
return cls(**config)
@keras_core_export("keras_core.losses.MeanSquaredError")
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
Formula:
```python
loss = mean(square(y_true - y_pred))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(
self, reduction="sum_over_batch_size", name="mean_squared_error"
):
super().__init__(mean_squared_error, reduction=reduction, name=name)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.MeanAbsoluteError")
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
Formula:
```python
loss = mean(abs(y_true - y_pred))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(
self, reduction="sum_over_batch_size", name="mean_absolute_error"
):
super().__init__(mean_absolute_error, reduction=reduction, name=name)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.MeanAbsolutePercentageError")
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` & `y_pred`.
Formula:
```python
loss = 100 * mean(abs((y_true - y_pred) / y_true))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_absolute_percentage_error",
):
super().__init__(
mean_absolute_percentage_error, reduction=reduction, name=name
)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.MeanSquaredLogarithmicError")
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` & `y_pred`.
Formula:
```python
loss = mean(square(log(y_true + 1) - log(y_pred + 1)))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_squared_logarithmic_error",
):
super().__init__(
mean_squared_logarithmic_error, reduction=reduction, name=name
)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.CosineSimilarity")
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between `y_true` & `y_pred`.
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. This makes it usable as a loss function in a
setting where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity
will be 0 regardless of the proximity between predictions and targets.
Formula:
```python
loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
```
Args:
axis: The axis along which the cosine similarity is computed
(the features axis). Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(
self,
axis=-1,
reduction="sum_over_batch_size",
name="cosine_similarity",
):
super().__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis
)
@keras_core_export("keras_core.losses.Huber")
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` & `y_pred`.
Formula:
```python
for x in error:
if abs(x) <= delta:
loss.append(0.5 * x^2)
elif abs(x) > delta:
loss.append(delta * abs(x) - 0.5 * delta^2)
loss = mean(loss, axis=-1)
```
See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: Type of reduction to apply to loss. Options are `"sum"`,
`"sum_over_batch_size"` or `None`. Defaults to
`"sum_over_batch_size"`.
name: Optional name for the instance.
"""
def __init__(
self,
delta=1.0,
reduction="sum_over_batch_size",
name="huber_loss",
):
super().__init__(huber, name=name, reduction=reduction, delta=delta)
@keras_core_export("keras_core.losses.LogCosh")
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
Formula:
```python
error = y_pred - y_true
logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)`
```
where x is the error `y_pred - y_true`.
Args:
reduction: Type of reduction to apply to loss. Options are `"sum"`,
`"sum_over_batch_size"` or `None`. Defaults to
`"sum_over_batch_size"`.
name: Optional name for the instance.
"""
def __init__(self, reduction="sum_over_batch_size", name="log_cosh"):
super().__init__(log_cosh, name=name, reduction=reduction)
@keras_core_export("keras_core.losses.Hinge")
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = maximum(1 - y_true * y_pred, 0)
```
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(self, reduction="sum_over_batch_size", name="hinge"):
super().__init__(hinge, reduction=reduction, name=name)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.SquaredHinge")
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = square(maximum(1 - y_true * y_pred, 0))
```
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(self, reduction="sum_over_batch_size", name="squared_hinge"):
super().__init__(squared_hinge, reduction=reduction, name=name)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.CategoricalHinge")
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = maximum(neg - pos + 1, 0)
```
where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(
self, reduction="sum_over_batch_size", name="categorical_hinge"
):
super().__init__(categorical_hinge, reduction=reduction, name=name)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.KLDivergence")
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
Formula:
```python
loss = y_true * log(y_true / y_pred)
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(self, reduction="sum_over_batch_size", name="kl_divergence"):
super().__init__(kl_divergence, reduction=reduction, name=name)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.Poisson")
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` & `y_pred`.
Formula:
```python
loss = y_pred - y_true * log(y_pred)
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
"""
def __init__(self, reduction="sum_over_batch_size", name="poisson"):
super().__init__(poisson, reduction=reduction, name=name)
def get_config(self):
return Loss.get_config(self)
@keras_core_export("keras_core.losses.BinaryCrossentropy")
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss for binary (0 or 1) classification applications.
The loss function requires the following inputs:
- `y_true` (true label): This is either 0 or 1.
- `y_pred` (predicted value): This is the model's prediction, i.e, a single
floating-point value which either represents a
[logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
`from_logits=False`).
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` is probabilities (i.e., values in [0, 1]).
label_smoothing: Float in range [0, 1]. When 0, no smoothing occurs.
When > 0, we compute the loss between the predicted labels
and a smoothed version of the true labels, where the smoothing
squeezes the labels towards 0.5. Larger values of
`label_smoothing` correspond to heavier smoothing.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
Examples:
**Recommended Usage:** (set `from_logits=True`)
With `compile()` API:
```python
model.compile(
loss=keras_core.losses.BinaryCrossentropy(from_logits=True),
...
)
```
As a standalone function:
>>> # Example 1: (batch_size = 1, number of samples = 4)
>>> y_true = [0, 1, 0, 0]
>>> y_pred = [-18.6, 0.51, 2.94, -12.8]
>>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred)
0.865
>>> # Example 2: (batch_size = 2, number of samples = 4)
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
>>> # Using default 'auto'/'sum_over_batch_size' reduction type.
>>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred)
0.865
>>> # Using 'sample_weight' attribute
>>> bce(y_true, y_pred, sample_weight=[0.8, 0.2])
0.243
>>> # Using 'sum' reduction` type.
>>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True,
... reduction="sum")
>>> bce(y_true, y_pred)
1.730
>>> # Using 'none' reduction type.
>>> bce = keras_core.losses.BinaryCrossentropy(from_logits=True,
... reduction=None)
>>> bce(y_true, y_pred)
array([0.235, 1.496], dtype=float32)
**Default Usage:** (set `from_logits=False`)
>>> # Make the following updates to the above "Recommended Usage" section
>>> # 1. Set `from_logits=False`
>>> keras_core.losses.BinaryCrossentropy() # OR ...('from_logits=False')
>>> # 2. Update `y_pred` to use probabilities instead of logits
>>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
"""
def __init__(
self,
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="sum_over_batch_size",
name="binary_crossentropy",
):
super().__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def get_config(self):
return {
"name": self.name,
"reduction": self.reduction,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
@keras_core_export("keras_core.losses.BinaryFocalCrossentropy")
class BinaryFocalCrossentropy(LossFunctionWrapper):
"""Computes focal cross-entropy loss between true labels and predictions.
Binary cross-entropy loss is often used for binary (0 or 1) classification
tasks. The loss function requires the following inputs:
- `y_true` (true label): This is either 0 or 1.
- `y_pred` (predicted value): This is the model's prediction, i.e, a single
floating-point value which either represents a
[logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
when `from_logits=True`) or a probability (i.e, value in `[0., 1.]` when
`from_logits=False`).
According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
helps to apply a "focal factor" to down-weight easy examples and focus more
on hard examples. By default, the focal tensor is computed as follows:
`focal_factor = (1 - output) ** gamma` for class 1
`focal_factor = output ** gamma` for class 0
where `gamma` is a focusing parameter. When `gamma=0`, this function is
equivalent to the binary crossentropy loss.
Args:
apply_class_balancing: A bool, whether to apply weight balancing on the
binary classes 0 and 1.
alpha: A weight balancing factor for class 1, default is `0.25` as
mentioned in reference [Lin et al., 2018](
https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is
`1.0 - alpha`.
gamma: A focusing parameter used to compute the focal factor, default is
`2.0` as mentioned in the reference
[Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` are probabilities (i.e., values in `[0, 1]`).
label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs.
When > `0`, we compute the loss between the predicted labels
and a smoothed version of the true labels, where the smoothing
squeezes the labels towards `0.5`.
Larger values of `label_smoothing` correspond to heavier smoothing.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
Examples:
With the `compile()` API:
```python
model.compile(
loss=keras_core.losses.BinaryFocalCrossentropy(
gamma=2.0, from_logits=True),
...
)
```
As a standalone function:
>>> # Example 1: (batch_size = 1, number of samples = 4)
>>> y_true = [0, 1, 0, 0]
>>> y_pred = [-18.6, 0.51, 2.94, -12.8]
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... gamma=2, from_logits=True)
>>> loss(y_true, y_pred)
0.691
>>> # Apply class weight
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=2, from_logits=True)
>>> loss(y_true, y_pred)
0.51
>>> # Example 2: (batch_size = 2, number of samples = 4)
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
>>> # Using default 'auto'/'sum_over_batch_size' reduction type.
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... gamma=3, from_logits=True)
>>> loss(y_true, y_pred)
0.647
>>> # Apply class weight
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=3, from_logits=True)
>>> loss(y_true, y_pred)
0.482
>>> # Using 'sample_weight' attribute with focal effect
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... gamma=3, from_logits=True)
>>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
0.133
>>> # Apply class weight
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=3, from_logits=True)
>>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
0.097
>>> # Using 'sum' reduction` type.
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... gamma=4, from_logits=True,
... reduction="sum")
>>> loss(y_true, y_pred)
1.222
>>> # Apply class weight
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=4, from_logits=True,
... reduction="sum")
>>> loss(y_true, y_pred)
0.914
>>> # Using 'none' reduction type.
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... gamma=5, from_logits=True,
... reduction=None)
>>> loss(y_true, y_pred)
array([0.0017 1.1561], dtype=float32)
>>> # Apply class weight
>>> loss = keras_core.losses.BinaryFocalCrossentropy(
... apply_class_balancing=True, gamma=5, from_logits=True,
... reduction=None)
>>> loss(y_true, y_pred)
array([0.0004 0.8670], dtype=float32)
"""
def __init__(
self,
apply_class_balancing=False,
alpha=0.25,
gamma=2.0,
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="sum_over_batch_size",
name="binary_focal_crossentropy",
):
super().__init__(
binary_focal_crossentropy,
apply_class_balancing=apply_class_balancing,
alpha=alpha,
gamma=gamma,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
self.apply_class_balancing = apply_class_balancing
self.alpha = alpha
self.gamma = gamma
def get_config(self):
return {
"name": self.name,
"reduction": self.reduction,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
"apply_class_balancing": self.apply_class_balancing,
"alpha": self.alpha,
"gamma": self.gamma,
}
@keras_core_export("keras_core.losses.CategoricalCrossentropy")
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label
classes. We expect labels to be provided in a `one_hot` representation. If
you want to provide labels as integers, please use
`SparseCategoricalCrossentropy` loss. There should be `num_classes` floating
point values per feature, i.e., the shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. For example, if
`0.1`, use `0.1 / num_classes` for non-target labels and
`0.9 + 0.1 / num_classes` for target labels.
axis: The axis along which to compute crossentropy (the features
axis). Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
Examples:
Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cce = keras_core.losses.CategoricalCrossentropy()
>>> cce(y_true, y_pred)
1.177
>>> # Calling with 'sample_weight'.
>>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
0.814
>>> # Using 'sum' reduction type.
>>> cce = keras_core.losses.CategoricalCrossentropy(
... reduction="sum")
>>> cce(y_true, y_pred)
2.354
>>> # Using 'none' reduction type.
>>> cce = keras_core.losses.CategoricalCrossentropy(
... reduction=None)
>>> cce(y_true, y_pred)
array([0.0513, 2.303], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=keras_core.losses.CategoricalCrossentropy())
```
"""
def __init__(
self,
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="sum_over_batch_size",
name="categorical_crossentropy",
):
super().__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def get_config(self):
return {
"name": self.name,
"reduction": self.reduction,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
@keras_core_export("keras_core.losses.CategoricalFocalCrossentropy")
class CategoricalFocalCrossentropy(LossFunctionWrapper):
"""Computes the alpha balanced focal crossentropy loss.
Use this crossentropy loss function when there are two or more label
classes and if you want to handle class imbalance without using
`class_weights`. We expect labels to be provided in a `one_hot`
representation.
According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
helps to apply a focal factor to down-weight easy examples and focus more on
hard examples. The general formula for the focal loss (FL)
is as follows:
`FL(p_t) = (1 - p_t) ** gamma * log(p_t)`
where `p_t` is defined as follows:
`p_t = output if y_true == 1, else 1 - output`
`(1 - p_t) ** gamma` is the `modulating_factor`, where `gamma` is a focusing
parameter. When `gamma` = 0, there is no focal effect on the cross entropy.
`gamma` reduces the importance given to simple examples in a smooth manner.
The authors use alpha-balanced variant of focal loss (FL) in the paper:
`FL(p_t) = -alpha * (1 - p_t) ** gamma * log(p_t)`
where `alpha` is the weight factor for the classes. If `alpha` = 1, the
loss won't be able to handle class imbalance properly as all
classes will have the same weight. This can be a constant or a list of
constants. If alpha is a list, it must have the same length as the number
of classes.
The formula above can be generalized to:
`FL(p_t) = alpha * (1 - p_t) ** gamma * CrossEntropy(y_true, y_pred)`
where minus comes from `CrossEntropy(y_true, y_pred)` (CE).
Extending this to multi-class case is straightforward:
`FL(p_t) = alpha * (1 - p_t) ** gamma * CategoricalCE(y_true, y_pred)`
In the snippet below, there is `num_classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`(batch_size, num_classes)`.
Args:
alpha: A weight balancing factor for all classes, default is `0.25` as
mentioned in the reference. It can be a list of floats or a scalar.
In the multi-class case, alpha may be set by inverse class
frequency by using `compute_class_weight` from `sklearn.utils`.
gamma: A focusing parameter, default is `2.0` as mentioned in the
reference. It helps to gradually reduce the importance given to
simple (easy) examples in a smooth manner.
from_logits: Whether `output` is expected to be a logits tensor. By
default, we consider that `output` encodes a probability
distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. For example, if
`0.1`, use `0.1 / num_classes` for non-target labels and
`0.9 + 0.1 / num_classes` for target labels.
axis: The axis along which to compute crossentropy (the features
axis). Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
Examples:
Standalone usage:
>>> y_true = [[0., 1., 0.], [0., 0., 1.]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cce = keras_core.losses.CategoricalFocalCrossentropy()
>>> cce(y_true, y_pred)
0.23315276
>>> # Calling with 'sample_weight'.
>>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
0.1632
>>> # Using 'sum' reduction type.
>>> cce = keras_core.losses.CategoricalFocalCrossentropy(
... reduction="sum")
>>> cce(y_true, y_pred)
0.46631
>>> # Using 'none' reduction type.
>>> cce = keras_core.losses.CategoricalFocalCrossentropy(
... reduction=None)
>>> cce(y_true, y_pred)
array([3.2058331e-05, 4.6627346e-01], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='adam',
loss=keras_core.losses.CategoricalFocalCrossentropy())
```
"""
def __init__(
self,
alpha=0.25,
gamma=2.0,
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="sum_over_batch_size",
name="categorical_focal_crossentropy",
):
"""Initializes `CategoricalFocalCrossentropy` instance."""
super().__init__(
categorical_focal_crossentropy,
alpha=alpha,
gamma=gamma,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
self.alpha = alpha
self.gamma = gamma
def get_config(self):
return {
"name": self.name,
"reduction": self.reduction,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
"alpha": self.alpha,
"gamma": self.gamma,
}
@keras_core_export("keras_core.losses.SparseCategoricalCrossentropy")
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label
classes. We expect labels to be provided as integers. If you want to
provide labels using `one-hot` representation, please use
`CategoricalCrossentropy` loss. There should be `# classes` floating point
values per feature for `y_pred` and a single floating point value per
feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `num_classes` floating pointing values per example for
`y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred`
is `[batch_size, num_classes]`.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`.
Supported options are `"sum"`, `"sum_over_batch_size"` or `None`.
name: Optional name for the loss instance.
Examples:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> scce = keras_core.losses.SparseCategoricalCrossentropy()
>>> scce(y_true, y_pred)
1.177
>>> # Calling with 'sample_weight'.
>>> scce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
0.814
>>> # Using 'sum' reduction type.
>>> scce = keras_core.losses.SparseCategoricalCrossentropy(
... reduction="sum")
>>> scce(y_true, y_pred)
2.354
>>> # Using 'none' reduction type.
>>> scce = keras_core.losses.SparseCategoricalCrossentropy(
... reduction=None)
>>> scce(y_true, y_pred)
array([0.0513, 2.303], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=keras_core.losses.SparseCategoricalCrossentropy())
```
"""
def __init__(
self,
from_logits=False,
ignore_class=None,
reduction="sum_over_batch_size",
name="sparse_categorical_crossentropy",
):
super().__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
ignore_class=ignore_class,
)
self.from_logits = from_logits
self.ignore_class = ignore_class
def get_config(self):
return {
"name": self.name,
"reduction": self.reduction,
"from_logits": self.from_logits,
"ignore_class": self.ignore_class,
}
def convert_binary_labels_to_hinge(y_true):
"""Converts binary labels into -1/1 for hinge loss/metric calculation."""
are_zeros = ops.equal(y_true, 0)
are_ones = ops.equal(y_true, 1)
is_binary = ops.all((ops.logical_or(are_zeros, are_ones)))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2.0 * y_true - 1.0
def _return_labels_unconverted():
# Returns the labels unchanged if they are non-binary
return y_true
updated_y_true = ops.cond(
is_binary, _convert_binary_labels, _return_labels_unconverted
)
return updated_y_true
@keras_core_export(
[
"keras_core.metrics.hinge",
"keras_core.losses.hinge",
]
)
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)
```
Args:
y_true: The ground truth values. `y_true` values are expected to be -1
or 1. If binary (0 or 1) labels are provided they will be converted
to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.hinge(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, dtype=y_pred.dtype)
y_true = ops.convert_to_tensor(y_true)
y_true = convert_binary_labels_to_hinge(y_true)
return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)
@keras_core_export(
[
"keras_core.metrics.squared_hinge",
"keras_core.losses.squared_hinge",
]
)
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)
```
Args:
y_true: The ground truth values. `y_true` values are expected to be -1
or 1. If binary (0 or 1) labels are provided we will convert them
to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.squared_hinge(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
y_true = convert_binary_labels_to_hinge(y_true)
return ops.mean(
ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1
)
@keras_core_export(
[
"keras_core.metrics.categorical_hinge",
"keras_core.losses.categorical_hinge",
]
)
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = maximum(neg - pos + 1, 0)
```
where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`
Args:
y_true: The ground truth values. `y_true` values are expected to be
either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor) with
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Categorical hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = np.eye(np.max(y_true) + 1)[y_true]
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.categorical_hinge(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
pos = ops.sum(y_true * y_pred, axis=-1)
neg = ops.max((1.0 - y_true) * y_pred, axis=-1)
zero = ops.cast(0.0, y_pred.dtype)
return ops.maximum(neg - pos + 1.0, zero)
@keras_core_export(
[
"keras_core.metrics.mean_squared_error",
"keras_core.losses.mean_squared_error",
# Legacy aliases
"keras_core._legacy.losses.mse",
"keras_core._legacy.losses.MSE",
"keras_core._legacy.metrics.mse",
"keras_core._legacy.metrics.MSE",
]
)
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
Formula:
```python
loss = mean(square(y_true - y_pred), axis=-1)
```
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.mean_squared_error(y_true, y_pred)
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values with shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
return ops.mean(ops.square(y_true - y_pred), axis=-1)
@keras_core_export(
[
"keras_core.metrics.mean_absolute_error",
"keras_core.losses.mean_absolute_error",
# Legacy aliases
"keras_core._legacy.losses.MAE",
"keras_core._legacy.losses.mae",
"keras_core._legacy.metrics.MAE",
"keras_core._legacy.metrics.mae",
]
)
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
```python
loss = mean(abs(y_true - y_pred), axis=-1)
```
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.mean_absolute_error(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
return ops.mean(ops.abs(y_true - y_pred), axis=-1)
@keras_core_export(
[
"keras_core.metrics.mean_absolute_percentage_error",
"keras_core.losses.mean_absolute_percentage_error",
# Legacy aliases
"keras_core._legacy.losses.mape",
"keras_core._legacy.losses.MAPE",
"keras_core._legacy.metrics.mape",
"keras_core._legacy.metrics.MAPE",
]
)
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` & `y_pred`.
Formula:
```python
loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)
```
Division by zero is prevented by dividing by `maximum(y_true, epsilon)`
where `epsilon = keras_core.backend.epsilon()`
(default to `1e-7`).
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values with shape = `[batch_size, d0, ..
dN-1]`.
Example:
>>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.mean_absolute_percentage_error(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
epsilon = ops.convert_to_tensor(backend.epsilon())
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
diff = ops.abs((y_true - y_pred) / ops.maximum(ops.abs(y_true), epsilon))
return 100.0 * ops.mean(diff, axis=-1)
@keras_core_export(
[
"keras_core.metrics.mean_squared_logarithmic_error",
"keras_core.losses.mean_squared_logarithmic_error",
# Legacy aliases
"keras_core._legacy.losses.msle",
"keras_core._legacy.losses.MSLE",
"keras_core._legacy.metrics.msle",
"keras_core._legacy.metrics.MSLE",
]
)
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` & `y_pred`.
Formula:
```python
loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)
```
Note that `y_pred` and `y_true` cannot be less or equal to 0. Negative
values and 0 values will be replaced with `keras_core.backend.epsilon()`
(default to `1e-7`).
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values with shape = `[batch_size, d0, ..
dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.mean_squared_logarithmic_error(y_true, y_pred)
"""
epsilon = ops.convert_to_tensor(backend.epsilon())
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
first_log = ops.log(ops.maximum(y_pred, epsilon) + 1.0)
second_log = ops.log(ops.maximum(y_true, epsilon) + 1.0)
return ops.mean(ops.square(first_log - second_log), axis=-1)
@keras_core_export("keras_core.losses.cosine_similarity")
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Formula:
```python
loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
```
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. This makes it usable as a loss function in a
setting where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine
similarity will be 0 regardless of the proximity between predictions
and targets.
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity. Defaults to `-1`.
Returns:
Cosine similarity tensor.
Example:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
>>> loss = keras_core.losses.cosine_similarity(y_true, y_pred, axis=-1)
[-0., -0.99999994, 0.99999994]
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
y_pred = normalize(y_pred, axis=axis)
y_true = normalize(y_true, axis=axis)
return -ops.sum(y_true * y_pred, axis=axis)
@keras_core_export(["keras_core.losses.huber", "keras_core.metrics.huber"])
def huber(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
Formula:
```python
for x in error:
if abs(x) <= delta:
loss.append(0.5 * x^2)
elif abs(x) > delta:
loss.append(delta * abs(x) - 0.5 * delta^2)
loss = mean(loss, axis=-1)
```
See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).
Example:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = keras_core.losses.huber(y_true, y_pred)
0.155
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear. Defaults to `1.0`.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
delta = ops.convert_to_tensor(delta)
error = ops.subtract(y_pred, y_true)
abs_error = ops.abs(error)
half = ops.convert_to_tensor(0.5, dtype=abs_error.dtype)
return ops.mean(
ops.where(
abs_error <= delta,
half * ops.square(error),
delta * abs_error - half * ops.square(delta),
),
axis=-1,
)
@keras_core_export(
[
"keras_core.losses.log_cosh",
"keras_core.metrics.log_cosh",
# Legacy aliases
"keras_core._legacy.losses.logcosh",
"keras_core._legacy.metrics.logcosh",
]
)
def log_cosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
Formula:
```python
loss = mean(log(cosh(y_pred - y_true)), axis=-1)
```
Note that `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small
`x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works
mostly like the mean squared error, but will not be so strongly affected by
the occasional wildly incorrect prediction.
Example:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> loss = keras_core.losses.log_cosh(y_true, y_pred)
0.108
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values with shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_to_same_rank(y_true, y_pred)
log2 = ops.convert_to_tensor(ops.log(2.0), dtype=y_pred.dtype)
def _logcosh(x):
return x + ops.softplus(x * -2.0) - log2
return ops.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_core_export(
[
"keras_core.metrics.kl_divergence",
"keras_core.losses.kl_divergence",
# Legacy aliases
"keras_core._legacy.losses.KLD",
"keras_core._legacy.losses.kld",
"keras_core._legacy.losses.kullback_leibler_divergence",
"keras_core._legacy.metrics.KLD",
"keras_core._legacy.metrics.kld",
"keras_core._legacy.metrics.kullback_leibler_divergence",
]
)
def kl_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
Formula:
```python
loss = y_true * log(y_true / y_pred)
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
KL Divergence loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float32)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.kl_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = ops.clip(y_true, 1e-7, 1)
>>> y_pred = ops.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss, np.sum(y_true * np.log(y_true / y_pred), axis=-1))
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, y_pred.dtype)
y_true = ops.clip(y_true, backend.epsilon(), 1)
y_pred = ops.clip(y_pred, backend.epsilon(), 1)
return ops.sum(y_true * ops.log(y_true / y_pred), axis=-1)
@keras_core_export(
[
"keras_core.metrics.poisson",
"keras_core.losses.poisson",
]
)
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
Formula:
```python
loss = y_pred - y_true * log(y_pred)
```
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras_core.losses.poisson(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_pred = y_pred + 1e-7
>>> assert np.allclose(
... loss, np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
... atol=1e-5)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
epsilon = ops.convert_to_tensor(backend.epsilon())
return ops.mean(y_pred - y_true * ops.log(y_pred + epsilon), axis=-1)
@keras_core_export(
[
"keras_core.metrics.categorical_crossentropy",
"keras_core.losses.categorical_crossentropy",
]
)
def categorical_crossentropy(
y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1
):
"""Computes the categorical crossentropy loss.
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels
and `0.9 + 0.1 / num_classes` for target labels.
axis: Defaults to `-1`. The dimension along which the entropy is
computed.
Returns:
Categorical crossentropy loss value.
Example:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = keras_core.losses.categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.0513, 2.303], dtype=float32)
"""
if isinstance(axis, bool):
raise ValueError(
"`axis` must be of type `int`. "
f"Received: axis={axis} of type {type(axis)}"
)
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] == 1:
warnings.warn(
"In loss categorical_crossentropy, expected "
"y_pred.shape to be (batch_size, num_classes) "
f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. "
"Consider using 'binary_crossentropy' if you only have 2 classes.",
SyntaxWarning,
stacklevel=2,
)
if label_smoothing:
num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype)
y_true = y_true * (1.0 - label_smoothing) + (
label_smoothing / num_classes
)
return ops.categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis
)
@keras_core_export(
[
"keras_core.metrics.categorical_focal_crossentropy",
"keras_core.losses.categorical_focal_crossentropy",
]
)
def categorical_focal_crossentropy(
y_true,
y_pred,
alpha=0.25,
gamma=2.0,
from_logits=False,
label_smoothing=0.0,
axis=-1,
):
"""Computes the categorical focal crossentropy loss.
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
alpha: A weight balancing factor for all classes, default is `0.25` as
mentioned in the reference. It can be a list of floats or a scalar.
In the multi-class case, alpha may be set by inverse class
frequency by using `compute_class_weight` from `sklearn.utils`.
gamma: A focusing parameter, default is `2.0` as mentioned in the
reference. It helps to gradually reduce the importance given to
simple examples in a smooth manner. When `gamma` = 0, there is
no focal effect on the categorical crossentropy.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability
distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels
and `0.9 + 0.1 / num_classes` for target labels.
axis: Defaults to `-1`. The dimension along which the entropy is
computed.
Returns:
Categorical focal crossentropy loss value.
Example:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]]
>>> loss = keras_core.losses.categorical_focal_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([2.63401289e-04, 6.75912094e-01], dtype=float32)
"""
if isinstance(axis, bool):
raise ValueError(
"`axis` must be of type `int`. "
f"Received: axis={axis} of type {type(axis)}"
)
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] == 1:
warnings.warn(
"In loss categorical_focal_crossentropy, expected "
"y_pred.shape to be (batch_size, num_classes) "
f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. "
"Consider using 'binary_crossentropy' if you only have 2 classes.",
SyntaxWarning,
stacklevel=2,
)
if label_smoothing:
num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype)
y_true = y_true * (1.0 - label_smoothing) + (
label_smoothing / num_classes
)
if from_logits:
y_pred = ops.softmax(y_pred, axis=axis)
# Adjust the predictions so that the probability of
# each class for every sample adds up to 1
# This is needed to ensure that the cross entropy is
# computed correctly.
output = y_pred / ops.sum(y_pred, axis=axis, keepdims=True)
output = ops.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
# Calculate cross entropy
cce = -y_true * ops.log(output)
# Calculate factors
modulating_factor = ops.power(1.0 - output, gamma)
weighting_factor = ops.multiply(modulating_factor, alpha)
# Apply weighting factor
focal_cce = ops.multiply(weighting_factor, cce)
focal_cce = ops.sum(focal_cce, axis=axis)
return focal_cce
@keras_core_export(
[
"keras_core.metrics.sparse_categorical_crossentropy",
"keras_core.losses.sparse_categorical_crossentropy",
]
)
def sparse_categorical_crossentropy(
y_true, y_pred, from_logits=False, ignore_class=None, axis=-1
):
"""Computes the sparse categorical crossentropy loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
ignore_class: Optional integer. The ID of a class to be ignored during
loss computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
axis: Defaults to `-1`. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
Examples:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = keras_core.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.0513, 2.303], dtype=float32)
"""
if ignore_class is not None:
res_shape = ops.shape(y_pred)[:-1]
valid_mask = ops.not_equal(y_true, ops.cast(ignore_class, y_pred.dtype))
y_true = y_true * ops.cast(valid_mask, y_true.dtype)
y_pred = y_pred * ops.cast(
ops.expand_dims(valid_mask, -1), y_pred.dtype
)
res = ops.sparse_categorical_crossentropy(
y_true,
y_pred,
from_logits=from_logits,
axis=axis,
)
if ignore_class is not None:
valid_mask = ops.reshape(valid_mask, res_shape)
res = ops.where(valid_mask, res, 0.0)
try:
res._keras_mask = valid_mask
except AttributeError:
pass
return res
@keras_core_export(
[
"keras_core.metrics.binary_crossentropy",
"keras_core.losses.binary_crossentropy",
]
)
def binary_crossentropy(
y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1
):
"""Computes the binary crossentropy loss.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
squeezing them towards 0.5, that is,
using `1. - 0.5 * label_smoothing` for the target class
and `0.5 * label_smoothing` for the non-target class.
axis: The axis along which the mean is computed. Defaults to `-1`.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = keras_core.losses.binary_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.916 , 0.714], dtype=float32)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if label_smoothing:
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
return ops.mean(
ops.binary_crossentropy(y_true, y_pred, from_logits=from_logits),
axis=axis,
)
@keras_core_export(
[
"keras_core.metrics.binary_focal_crossentropy",
"keras_core.losses.binary_focal_crossentropy",
]
)
def binary_focal_crossentropy(
y_true,
y_pred,
apply_class_balancing=False,
alpha=0.25,
gamma=2.0,
from_logits=False,
label_smoothing=0.0,
axis=-1,
):
"""Computes the binary focal crossentropy loss.
According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
helps to apply a focal factor to down-weight easy examples and focus more on
hard examples. By default, the focal tensor is computed as follows:
`focal_factor = (1 - output) ** gamma` for class 1
`focal_factor = output ** gamma` for class 0
where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal
effect on the binary crossentropy loss.
If `apply_class_balancing == True`, this function also takes into account a
weight balancing factor for the binary classes 0 and 1 as follows:
`weight = alpha` for class 1 (`target == 1`)
`weight = 1 - alpha` for class 0
where `alpha` is a float in the range of `[0, 1]`.
Args:
y_true: Ground truth values, of shape `(batch_size, d0, .. dN)`.
y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`.
apply_class_balancing: A bool, whether to apply weight balancing on the
binary classes 0 and 1.
alpha: A weight balancing factor for class 1, default is `0.25` as
mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
gamma: A focusing parameter, default is `2.0` as mentioned in the
reference.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
squeezing them towards 0.5, that is,
using `1. - 0.5 * label_smoothing` for the target class
and `0.5 * label_smoothing` for the non-target class.
axis: The axis along which the mean is computed. Defaults to `-1`.
Returns:
Binary focal crossentropy loss value
with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = keras_core.losses.binary_focal_crossentropy(
... y_true, y_pred, gamma=2)
>>> assert loss.shape == (2,)
>>> loss
array([0.330, 0.206], dtype=float32)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if label_smoothing:
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
if from_logits:
y_pred = ops.sigmoid(y_pred)
bce = ops.binary_crossentropy(
target=y_true,
output=y_pred,
from_logits=False,
)
# Calculate focal factor
p_t = y_true * y_pred + (1 - y_true) * (1 - y_pred)
focal_factor = ops.power(1.0 - p_t, gamma)
focal_bce = focal_factor * bce
if apply_class_balancing:
weight = y_true * alpha + (1 - y_true) * (1 - alpha)
focal_bce = weight * focal_bce
return ops.mean(focal_bce, axis=axis)
| keras-core/keras_core/losses/losses.py/0 | {
"file_path": "keras-core/keras_core/losses/losses.py",
"repo_id": "keras-core",
"token_count": 27384
} | 46 |
from keras_core.api_export import keras_core_export
from keras_core.losses.losses import binary_crossentropy
from keras_core.losses.losses import categorical_crossentropy
from keras_core.losses.losses import kl_divergence
from keras_core.losses.losses import poisson
from keras_core.losses.losses import sparse_categorical_crossentropy
from keras_core.metrics import reduction_metrics
@keras_core_export("keras_core.metrics.KLDivergence")
class KLDivergence(reduction_metrics.MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and
`y_pred`.
Formula:
```python
metric = y_true * log(y_true / y_pred)
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.KLDivergence()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
0.45814306
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
0.9162892
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.KLDivergence()])
```
"""
def __init__(self, name="kl_divergence", dtype=None):
super().__init__(fn=kl_divergence, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.Poisson")
class Poisson(reduction_metrics.MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
Formula:
```python
metric = y_pred - y_true * log(y_pred)
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
Standalone usage:
>>> m = keras_core.metrics.Poisson()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.49999997
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.99999994
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.Poisson()])
```
"""
def __init__(self, name="poisson", dtype=None):
super().__init__(fn=poisson, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.BinaryCrossentropy")
class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected
to be a logits tensor. By default, we consider
that output encodes a probability distribution.
label_smoothing: (Optional) Float in `[0, 1]`.
When > 0, label values are smoothed,
meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use
a value of 0.1 for label "0" and 0.9 for label "1".
Examples:
Standalone usage:
>>> m = keras_core.metrics.BinaryCrossentropy()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
0.81492424
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
0.9162905
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.BinaryCrossentropy()])
```
"""
def __init__(
self,
name="binary_crossentropy",
dtype=None,
from_logits=False,
label_smoothing=0,
):
super().__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
}
@keras_core_export("keras_core.metrics.CategoricalCrossentropy")
class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are multiple
label classes (2 or more). It assumes that labels are one-hot encoded,
e.g., when labels values are `[2, 0, 1]`, then
`y_true` is `[[0, 0, 1], [1, 0, 0], [0, 1, 0]]`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected to be
a logits tensor. By default, we consider that output
encodes a probability distribution.
label_smoothing: (Optional) Float in `[0, 1]`.
When > 0, label values are smoothed, meaning the confidence
on label values are relaxed. e.g. `label_smoothing=0.2` means
that we will use a value of 0.1 for label
"0" and 0.9 for label "1".
axis: (Optional) Defaults to `-1`.
The dimension along which entropy is computed.
Examples:
Standalone usage:
>>> # EPSILON = 1e-7, y = y_true, y` = y_pred
>>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
>>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(y'), axis = -1)
>>> # = -((log 0.95), (log 0.1))
>>> # = [0.051, 2.302]
>>> # Reduced xent = (0.051 + 2.302) / 2
>>> m = keras_core.metrics.CategoricalCrossentropy()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result()
1.1769392
>>> m.reset_state()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=np.array([0.3, 0.7]))
>>> m.result()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.CategoricalCrossentropy()])
```
"""
def __init__(
self,
name="categorical_crossentropy",
dtype=None,
from_logits=False,
label_smoothing=0,
axis=-1,
):
super().__init__(
categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
@keras_core_export("keras_core.metrics.SparseCategoricalCrossentropy")
class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
It expects labels to be provided as integers. If you want to provide labels
that are one-hot encoded, please use the `CategoricalCrossentropy`
metric instead.
There should be `num_classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected
to be a logits tensor. By default, we consider that output
encodes a probability distribution.
axis: (Optional) Defaults to `-1`.
The dimension along which entropy is computed.
Examples:
Standalone usage:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred)
>>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
>>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(softmax), 1)
>>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
>>> # [-2.3026, -0.2231, -2.3026]]
>>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
>>> # xent = [0.0513, 2.3026]
>>> # Reduced xent = (0.0513 + 2.3026) / 2
>>> m = keras_core.metrics.SparseCategoricalCrossentropy()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result()
1.1769392
>>> m.reset_state()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=np.array([0.3, 0.7]))
>>> m.result()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.SparseCategoricalCrossentropy()])
```
"""
def __init__(
self,
name="sparse_categorical_crossentropy",
dtype=None,
from_logits=False,
axis=-1,
):
super().__init__(
sparse_categorical_crossentropy,
name=name,
dtype=dtype,
from_logits=from_logits,
axis=axis,
)
self.from_logits = from_logits
self.axis = axis
def get_config(self):
return {
"name": self.name,
"dtype": self.dtype,
"from_logits": self.from_logits,
"axis": self.axis,
}
| keras-core/keras_core/metrics/probabilistic_metrics.py/0 | {
"file_path": "keras-core/keras_core/metrics/probabilistic_metrics.py",
"repo_id": "keras-core",
"token_count": 4873
} | 47 |
import numpy as np
import pytest
from keras_core import backend
from keras_core import layers
from keras_core import testing
from keras_core.layers.core.input_layer import Input
from keras_core.models.functional import Functional
from keras_core.models.sequential import Sequential
@pytest.mark.requires_trainable_backend
class SequentialTest(testing.TestCase):
def test_basic_flow_with_input(self):
model = Sequential(name="seq")
model.add(Input(shape=(2,), batch_size=3))
model.add(layers.Dense(4))
model.add(layers.Dense(5))
model.summary()
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
# Test eager call
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(type(model._functional), Functional)
self.assertEqual(y.shape, (3, 5))
# Test symbolic call
x = backend.KerasTensor((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test `layers` constructor arg
model = Sequential(
layers=[
Input(shape=(2,), batch_size=3),
layers.Dense(4),
layers.Dense(5),
]
)
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test pop
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 2)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 4))
def test_legacy_flow_with_input_shape(self):
model = Sequential(name="seq")
model.add(layers.Dense(4, input_shape=(2,)))
model.add(layers.Dense(5))
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
self.assertEqual(type(model._functional), Functional)
# Input_dim works too
model = Sequential(name="seq")
model.add(layers.Dense(4, input_dim=2))
model.add(layers.Dense(5))
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
self.assertEqual(type(model._functional), Functional)
# Subsequent input_shapes are ignored
model = Sequential(name="seq")
model.add(layers.Dense(4, input_shape=(2,)))
model.add(layers.Dense(5, input_shape=(3, 4)))
self.assertEqual(len(model.layers), 2)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
self.assertEqual(type(model._functional), Functional)
def test_basic_flow_deferred(self):
model = Sequential(name="seq")
model.add(layers.Dense(4))
model.add(layers.Dense(5))
model.summary()
self.assertEqual(len(model.layers), 2)
# Test eager call
x = np.random.random((3, 2))
y = model(x)
self.assertTrue(model.built)
model.summary()
self.assertEqual(type(model._functional), Functional)
self.assertEqual(y.shape, (3, 5))
# Test symbolic call
x = backend.KerasTensor((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test `layers` constructor arg
model = Sequential(
layers=[
layers.Dense(4),
layers.Dense(5),
]
)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 5))
# Test pop
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 2)
x = np.random.random((3, 2))
y = model(x)
self.assertEqual(y.shape, (3, 4))
def test_dict_inputs(self):
class DictLayer(layers.Layer):
def call(self, inputs):
assert isinstance(inputs, dict)
return inputs
model = Sequential([DictLayer()])
x = {"a": np.random.random((3, 2)), "b": np.random.random((3, 2))}
y = model(x)
self.assertEqual(type(y), dict)
model.summary()
def test_list_inputs(self):
class ListLayer(layers.Layer):
def call(self, inputs):
assert isinstance(inputs, list)
return inputs
model = Sequential([ListLayer()])
x = [np.random.random((3, 2)), np.random.random((3, 2))]
y = model(x)
self.assertEqual(type(y), list)
model.summary()
def test_errors(self):
# Trying to pass 2 Inputs
model = Sequential()
model.add(Input(shape=(2,), batch_size=3))
with self.assertRaisesRegex(ValueError, "already been configured"):
model.add(Input(shape=(2,), batch_size=3))
with self.assertRaisesRegex(ValueError, "already been configured"):
model.add(layers.InputLayer(shape=(2,), batch_size=3))
# Same name 2x
model = Sequential()
model.add(layers.Dense(2, name="dense"))
with self.assertRaisesRegex(ValueError, "should have unique names"):
model.add(layers.Dense(2, name="dense"))
# No layers
model = Sequential()
x = np.random.random((3, 2))
with self.assertRaisesRegex(ValueError, "no layers"):
model(x)
# Build conflict
model = Sequential()
model.add(Input(shape=(2,), batch_size=3))
model.add(layers.Dense(2))
with self.assertRaisesRegex(ValueError, "already been configured"):
model.build((3, 4))
# But this works
model.build((3, 2))
def test_shape_inference_failure(self):
class DynamicLayer(layers.Layer):
def call(self, inputs):
return inputs + 1.0
def compute_output_spec(self, *args, **kwargs):
raise NotImplementedError
model = Sequential([DynamicLayer()])
x = np.random.random((3, 2))
y = model(x)
self.assertAllClose(y, x + 1)
model.summary()
def test_serialization(self):
# Unbuilt deferred
model = Sequential(name="seq")
model.add(layers.Dense(4))
model.add(layers.Dense(5))
revived = self.run_class_serialization_test(model)
self.assertLen(revived.layers, 2)
# Built deferred
model.build((2, 3))
revived = self.run_class_serialization_test(model)
self.assertLen(revived.layers, 2)
# Regular
model = Sequential(name="seq")
model.add(Input(shape=(2,), batch_size=3))
model.add(layers.Dense(4))
model.add(layers.Dense(5))
model.add(layers.Dense(6))
revived = self.run_class_serialization_test(model)
self.assertLen(revived.layers, 3)
# Weird
class DictLayer(layers.Layer):
def call(self, inputs):
assert isinstance(inputs, dict)
return inputs
model = Sequential([DictLayer()])
revived = self.run_class_serialization_test(
model, custom_objects={"DictLayer": DictLayer}
)
self.assertLen(revived.layers, 1)
def test_functional_properties(self):
model = Sequential(name="seq")
inputs = Input(shape=(2,))
model.add(inputs)
model.add(layers.Dense(4))
self.assertEqual(model.inputs, [inputs])
self.assertEqual(model.outputs, [model.layers[-1].output])
self.assertEqual(model.input_shape, (None, 2))
self.assertEqual(model.output_shape, (None, 4))
def test_bad_layer(self):
model = Sequential(name="seq")
with self.assertRaisesRegex(ValueError, "Only instances of"):
model.add({})
| keras-core/keras_core/models/sequential_test.py/0 | {
"file_path": "keras-core/keras_core/models/sequential_test.py",
"repo_id": "keras-core",
"token_count": 3854
} | 48 |
"""
MANIFEST:
abs
absolute
add
all
amax
amin
append
arange
arccos
arccosh
arcsin
arcsinh
arctan
arctan2
arctanh
argmax
argmin
argsort
array
average
bincount
broadcast_to
ceil
clip
concatenate
conj
conjugate
copy
cos
cosh
count_nonzero
cross
cumprod
cumsum
diag
diagonal
diff
digitize
divide
dot
dtype
einsum
empty
equal
exp
expand_dims
expm1
eye
flip
floor
full
full_like
greater
greater_equal
hstack
identity
imag
interp
isclose
isfinite
isinf
isnan
less
less_equal
linspace
log
log10
log1p
log2
logaddexp
logical_and
logical_not
logical_or
logspace
matmul
max
maximum
mean
median
meshgrid
mgrid
min
minimum
mod
moveaxis
multiply
nan_to_num
ndim
nonzero
not_equal
ones
ones_like
outer
pad
percentile
power
prod
ravel
real
reciprocal
repeat
reshape
roll
round
sign
sin
sinh
size
sort
split
sqrt
square
squeeze
stack
std
subtract
sum
swapaxes
take
take_along_axis
tan
tanh
tensordot
tile
trace
transpose
tri
tril
triu
true_divide
vdot
vstack
where
zeros
zeros_like
"""
import re
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.backend import KerasTensor
from keras_core.backend import any_symbolic_tensors
from keras_core.ops import operation_utils
from keras_core.ops.operation import Operation
from keras_core.ops.operation_utils import reduce_shape
def broadcast_shapes(shape1, shape2):
"""Broadcast input shapes to a unified shape.
Convert to list for mutability.
Args:
shape1: A tuple or list of integers.
shape2: A tuple or list of integers.
Returns:
output_shape (list of integers or `None`): The broadcasted shape.
Example:
>>> broadcast_shapes((5, 3), (1, 3))
[5, 3]
"""
shape1 = list(shape1)
shape2 = list(shape2)
origin_shape1 = shape1
origin_shape2 = shape2
if len(shape1) > len(shape2):
shape2 = [1] * (len(shape1) - len(shape2)) + shape2
if len(shape1) < len(shape2):
shape1 = [1] * (len(shape2) - len(shape1)) + shape1
output_shape = list(shape1)
for i in range(len(shape1)):
if shape1[i] == 1:
output_shape[i] = shape2[i]
elif shape1[i] is None:
output_shape[i] = None if shape2[i] == 1 else shape2[i]
else:
if shape2[i] == 1 or shape2[i] is None or shape2[i] == shape1[i]:
output_shape[i] = shape1[i]
else:
raise ValueError(
"Cannot broadcast shape, the failure dim has value "
f"{shape1[i]}, which cannot be broadcasted to {shape2[i]}. "
f"Input shapes are: {origin_shape1} and {origin_shape2}."
)
return output_shape
def shape_equal(shape1, shape2, axis=None, allow_none=True):
"""Check if two shapes are equal.
Args:
shape1: A list or tuple of integers for first shape to be compared.
shape2: A list or tuple of integers for second shape to be compared.
axis: An integer, list, or tuple of integers (optional):
Axes to ignore during comparison. Defaults to `None`.
allow_none (bool, optional): If `True`, allows `None` in a shape
to match any value in the corresponding position of the other shape.
Defaults to `True`.
Returns:
bool: `True` if shapes are considered equal based on the criteria,
`False` otherwise.
Examples:
>>> shape_equal((32, 64, 128), (32, 64, 128))
True
>>> shape_equal((32, 64, 128), (32, 64, 127))
False
>>> shape_equal((32, 64, None), (32, 64, 128), allow_none=True)
True
>>> shape_equal((32, 64, None), (32, 64, 128), allow_none=False)
False
>>> shape_equal((32, 64, 128), (32, 63, 128), axis=1)
True
>>> shape_equal((32, 64, 128), (32, 63, 127), axis=(1, 2))
True
>>> shape_equal((32, 64, 128), (32, 63, 127), axis=[1,2])
True
>>> shape_equal((32, 64), (32, 64, 128))
False
"""
if len(shape1) != len(shape2):
return False
shape1 = list(shape1)
shape2 = list(shape2)
if axis is not None:
if isinstance(axis, int):
axis = [axis]
for ax in axis:
shape1[ax] = -1
shape2[ax] = -1
if allow_none:
for i in range(len(shape1)):
if shape1[i] is None:
shape1[i] = shape2[i]
if shape2[i] is None:
shape2[i] = shape1[i]
return shape1 == shape2
class Absolute(Operation):
def call(self, x):
return backend.numpy.absolute(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.absolute", "keras_core.ops.numpy.absolute"])
def absolute(x):
"""Compute the absolute value element-wise.
`keras_core.ops.abs` is a shorthand for this function.
Args:
x: Input tensor.
Returns:
An array containing the absolute value of each element in `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([-1.2, 1.2])
>>> keras_core.ops.absolute(x)
array([1.2, 1.2], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Absolute().symbolic_call(x)
return backend.numpy.absolute(x)
class Abs(Absolute):
pass
@keras_core_export(["keras_core.ops.abs", "keras_core.ops.numpy.abs"])
def abs(x):
"""Shorthand for `keras_core.ops.absolute`."""
return absolute(x)
class Add(Operation):
def call(self, x1, x2):
return backend.numpy.add(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
x1_sparse = getattr(x1, "sparse", True)
x2_sparse = getattr(x2, "sparse", True)
output_sparse = x1_sparse and x2_sparse
return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse)
@keras_core_export(["keras_core.ops.add", "keras_core.ops.numpy.add"])
def add(x1, x2):
"""Add arguments element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
The tensor containing the element-wise sum of `x1` and `x2`.
Examples:
>>> x1 = keras_core.ops.convert_to_tensor([1, 4])
>>> x2 = keras_core.ops.convert_to_tensor([5, 6])
>>> keras_core.ops.add(x1, x2)
array([6, 10], dtype=int32)
`keras_core.ops.add` also broadcasts shapes:
>>> x1 = keras_core.ops.convert_to_tensor(
... [[5, 4],
... [5, 6]]
... )
>>> x2 = keras_core.ops.convert_to_tensor([5, 6])
>>> keras_core.ops.add(x1, x2)
array([[10 10]
[10 12]], shape=(2, 2), dtype=int32)
"""
if any_symbolic_tensors((x1, x2)):
return Add().symbolic_call(x1, x2)
return backend.numpy.add(x1, x2)
class All(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.all(
x,
axis=self.axis,
keepdims=self.keepdims,
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(
x.shape,
axis=self.axis,
keepdims=self.keepdims,
),
dtype="bool",
)
@keras_core_export(["keras_core.ops.all", "keras_core.ops.numpy.all"])
def all(x, axis=None, keepdims=False):
"""Test whether all array elements along a given axis evaluate to `True`.
Args:
x: Input tensor.
axis: An integer or tuple of integers that represent the axis along
which a logical AND reduction is performed. The default
(`axis=None`) is to perform a logical AND over all the dimensions
of the input array. `axis` may be negative, in which case it counts
for the last to the first axis.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the input array. Defaults to`False`.
Returns:
The tensor containing the logical AND reduction over the `axis`.
Examples:
>>> x = keras_core.ops.convert_to_tensor([True, False])
>>> keras_core.ops.all(x)
array(False, shape=(), dtype=bool)
>>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras_core.ops.all(x, axis=0)
array([ True False], shape=(2,), dtype=bool)
`keepdims=True` outputs a tensor with dimensions reduced to one.
>>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras_core.ops.all(x, keepdims=True)
array([[False]], shape=(1, 1), dtype=bool)
"""
if any_symbolic_tensors((x,)):
return All(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.all(x, axis=axis, keepdims=keepdims)
class Any(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.any(
x,
axis=self.axis,
keepdims=self.keepdims,
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(
x.shape,
axis=self.axis,
keepdims=self.keepdims,
),
dtype="bool",
)
@keras_core_export(["keras_core.ops.any", "keras_core.ops.numpy.any"])
def any(x, axis=None, keepdims=False):
"""Test whether any array element along a given axis evaluates to `True`.
Args:
x: Input tensor.
axis: An integer or tuple of integers that represent the axis along
which a logical OR reduction is performed. The default
(`axis=None`) is to perform a logical OR over all the dimensions
of the input array. `axis` may be negative, in which case it counts
for the last to the first axis.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the input array. Defaults to`False`.
Returns:
The tensor containing the logical OR reduction over the `axis`.
Examples:
>>> x = keras_core.ops.convert_to_tensor([True, False])
>>> keras_core.ops.any(x)
array(True, shape=(), dtype=bool)
>>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras_core.ops.any(x, axis=0)
array([ True True], shape=(2,), dtype=bool)
`keepdims=True` outputs a tensor with dimensions reduced to one.
>>> x = keras_core.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras_core.ops.all(x, keepdims=True)
array([[False]], shape=(1, 1), dtype=bool)
"""
if any_symbolic_tensors((x,)):
return Any(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.any(x, axis=axis, keepdims=keepdims)
class Amax(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.amax(
x,
axis=self.axis,
keepdims=self.keepdims,
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_core_export(["keras_core.ops.amax", "keras_core.ops.numpy.amax"])
def amax(x, axis=None, keepdims=False):
"""Returns the maximum of an array or maximum value along an axis.
Args:
x: Input tensor.
axis: Axis along which to compute the maximum.
By default (`axis=None`), find the maximum value in all the
dimensions of the input array.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions that are broadcast to the size of the original
input tensor. Defaults to `False`.
Returns:
An array with the maximum value. If `axis=None`, the result is a scalar
value representing the maximum element in the entire array. If `axis` is
given, the result is an array with the maximum values along
the specified axis.
Examples:
>>> x = keras_core.ops.convert_to_tensor([[1, 3, 5], [2, 3, 6]])
>>> keras_core.ops.amax(x)
array(6, dtype=int32)
>>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]])
>>> keras_core.ops.amax(x, axis=0)
array([1, 6, 8], dtype=int32)
>>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]])
>>> keras_core.ops.amax(x, axis=1, keepdims=True)
array([[8], [5]], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Amax(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.amax(x, axis=axis, keepdims=keepdims)
class Amin(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.amin(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_core_export(["keras_core.ops.amin", "keras_core.ops.numpy.amin"])
def amin(x, axis=None, keepdims=False):
"""Returns the minimum of an array or minimum value along an axis.
Args:
x: Input tensor.
axis: Axis along which to compute the minimum.
By default (`axis=None`), find the minimum value in all the
dimensions of the input array.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions that are broadcast to the size of the original
input tensor. Defaults to `False`.
Returns:
An array with the minimum value. If `axis=None`, the result is a scalar
value representing the minimum element in the entire array. If `axis` is
given, the result is an array with the minimum values along
the specified axis.
Examples:
>>> x = keras_core.ops.convert_to_tensor([1, 3, 5, 2, 3, 6])
>>> keras_core.ops.amin(x)
array(1, dtype=int32)
>>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]])
>>> keras_core.ops.amin(x, axis=0)
array([1,5,3], dtype=int32)
>>> x = keras_core.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]])
>>> keras_core.ops.amin(x, axis=1, keepdims=True)
array([[1],[3]], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Amin(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.amin(x, axis=axis, keepdims=keepdims)
class Append(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x1, x2):
return backend.numpy.append(x1, x2, axis=self.axis)
def compute_output_spec(self, x1, x2):
x1_shape = x1.shape
x2_shape = x2.shape
if self.axis is None:
if None in x1_shape or None in x2_shape:
output_shape = [None]
else:
output_shape = [int(np.prod(x1_shape) + np.prod(x2_shape))]
return KerasTensor(output_shape, dtype=x1.dtype)
if not shape_equal(x1_shape, x2_shape, [self.axis]):
raise ValueError(
"`append` requires inputs to have the same shape except the "
f"`axis={self.axis}`, but received shape {x1_shape} and "
f"{x2_shape}."
)
output_shape = list(x1_shape)
output_shape[self.axis] = x1_shape[self.axis] + x2_shape[self.axis]
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.append", "keras_core.ops.numpy.append"])
def append(
x1,
x2,
axis=None,
):
"""Append tensor `x2` to the end of tensor `x1`.
Args:
x1: First input tensor.
x2: Second input tensor.
axis: Axis along which tensor `x2` is appended to tensor `x1`.
If `None`, both tensors are flattened before use.
Returns:
A tensor with the values of `x2` appended to `x1`.
Examples:
>>> x1 = keras_core.ops.convert_to_tensor([1, 2, 3])
>>> x2 = keras_core.ops.convert_to_tensor([[4, 5, 6], [7, 8, 9]])
>>> keras_core.ops.append(x1, x2)
array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)
When `axis` is specified, `x1` and `x2` must have compatible shapes.
>>> x1 = keras_core.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]])
>>> x2 = keras_core.ops.convert_to_tensor([[7, 8, 9]])
>>> keras_core.ops.append(x1, x2, axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=int32)
>>> x3 = keras_core.ops.convert_to_tensor([7, 8, 9])
>>> keras_core.ops.append(x1, x3, axis=0)
Traceback (most recent call last):
...
TypeError: Cannot concatenate arrays with different numbers of
dimensions: got (2, 3), (3,).
"""
if any_symbolic_tensors((x1, x2)):
return Append(axis=axis).symbolic_call(x1, x2)
return backend.numpy.append(x1, x2, axis=axis)
class Arange(Operation):
def call(self, start, stop=None, step=1, dtype=None):
return backend.numpy.arange(start, stop, step=step, dtype=dtype)
def compute_output_spec(self, start, stop=None, step=1, dtype=None):
if stop is None:
start, stop = 0, start
output_shape = [np.ceil((stop - start) / step).astype(int)]
return KerasTensor(output_shape, dtype=dtype)
@keras_core_export(["keras_core.ops.arange", "keras_core.ops.numpy.arange"])
def arange(start, stop=None, step=1, dtype=None):
"""Return evenly spaced values within a given interval.
`arange` can be called with a varying number of positional arguments:
* `arange(stop)`: Values are generated within the half-open interval
`[0, stop)` (in other words, the interval including start but excluding
stop).
* `arange(start, stop)`: Values are generated within the half-open interval
`[start, stop)`.
* `arange(start, stop, step)`: Values are generated within the half-open
interval `[start, stop)`, with spacing between values given by step.
Args:
start: Integer or real, representing the start of the interval. The
interval includes this value.
stop: Integer or real, representing the end of the interval. The
interval does not include this value, except in some cases where
`step` is not an integer and floating point round-off affects the
lenght of `out`. Defaults to `None`.
step: Integer or real, represent the spacing between values. For any
output `out`, this is the distance between two adjacent values,
`out[i+1] - out[i]`. The default step size is 1. If `step` is
specified as a position argument, `start` must also be given.
dtype: The type of the output array. If `dtype` is not given, infer the
data type from the other input arguments.
Returns:
Tensor of evenly spaced values.
For floating point arguments, the length of the result is
`ceil((stop - start)/step)`. Because of floating point overflow, this
rule may result in the last element of out being greater than stop.
Examples:
>>> keras_core.ops.arange(3)
array([0, 1, 2], dtype=int32)
>>> keras_core.ops.arange(3.0)
array([0., 1., 2.], dtype=float32)
>>> keras_core.ops.arange(3, 7)
array([3, 4, 5, 6], dtype=int32)
>>> keras_core.ops.arange(3, 7, 2)
array([3, 5], dtype=int32)
"""
return backend.numpy.arange(start, stop, step=step, dtype=dtype)
class Arccos(Operation):
def call(self, x):
return backend.numpy.arccos(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.arccos", "keras_core.ops.numpy.arccos"])
def arccos(x):
"""Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if `y = cos(x)`, then `x = arccos(y)`.
Args:
x: Input tensor.
Returns:
Tensor of the angle of the ray intersecting the unit circle at the given
x-coordinate in radians `[0, pi]`.
Example:
>>> x = keras_core.ops.convert_to_tensor([1, -1])
>>> keras_core.ops.arccos(x)
array([0.0, 3.1415927], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arccos().symbolic_call(x)
return backend.numpy.arccos(x)
class Arccosh(Operation):
def call(self, x):
return backend.numpy.arccosh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.arccosh", "keras_core.ops.numpy.arccosh"])
def arccosh(x):
"""Inverse hyperbolic cosine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as x.
Example:
>>> x = keras_core.ops.convert_to_tensor([10, 100])
>>> keras_core.ops.arccosh(x)
array([2.993223, 5.298292], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arccosh().symbolic_call(x)
return backend.numpy.arccosh(x)
class Arcsin(Operation):
def call(self, x):
return backend.numpy.arcsin(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.arcsin", "keras_core.ops.numpy.arcsin"])
def arcsin(x):
"""Inverse sine, element-wise.
Args:
x: Input tensor.
Returns:
Tensor of the inverse sine of each element in `x`, in radians and in
the closed interval `[-pi/2, pi/2]`.
Example:
>>> x = keras_core.ops.convert_to_tensor([1, -1, 0])
>>> keras_core.ops.arcsin(x)
array([ 1.5707964, -1.5707964, 0.], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arcsin().symbolic_call(x)
return backend.numpy.arcsin(x)
class Arcsinh(Operation):
def call(self, x):
return backend.numpy.arcsinh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.arcsinh", "keras_core.ops.numpy.arcsinh"])
def arcsinh(x):
"""Inverse hyperbolic sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
Example:
>>> x = keras_core.ops.convert_to_tensor([1, -1, 0])
>>> keras_core.ops.arcsinh(x)
array([0.88137364, -0.88137364, 0.0], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arcsinh().symbolic_call(x)
return backend.numpy.arcsinh(x)
class Arctan(Operation):
def call(self, x):
return backend.numpy.arctan(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.arctan", "keras_core.ops.numpy.arctan"])
def arctan(x):
"""Trigonometric inverse tangent, element-wise.
Args:
x: Input tensor.
Returns:
Tensor of the inverse tangent of each element in `x`, in the interval
`[-pi/2, pi/2]`.
Example:
>>> x = keras_core.ops.convert_to_tensor([0, 1])
>>> keras_core.ops.arctan(x)
array([0., 0.7853982], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arctan().symbolic_call(x)
return backend.numpy.arctan(x)
class Arctan2(Operation):
def call(self, x1, x2):
return backend.numpy.arctan2(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
outputs_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(outputs_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.arctan2", "keras_core.ops.numpy.arctan2"])
def arctan2(x1, x2):
"""Element-wise arc tangent of `x1/x2` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that `arctan2(x1, x2)` is the
signed angle in radians between the ray ending at the origin and passing
through the point `(1, 0)`, and the ray ending at the origin and passing
through the point `(x2, x1)`. (Note the role reversal: the "y-coordinate"
is the first function parameter, the "x-coordinate" is the second.) By IEEE
convention, this function is defined for `x2 = +/-0` and for either or both
of `x1` and `x2` `= +/-inf`.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Tensor of angles in radians, in the range `[-pi, pi]`.
Examples:
Consider four points in different quadrants:
>>> x = keras_core.ops.convert_to_tensor([-1, +1, +1, -1])
>>> y = keras_core.ops.convert_to_tensor([-1, -1, +1, +1])
>>> keras_core.ops.arctan2(y, x) * 180 / numpy.pi
array([-135., -45., 45., 135.], dtype=float32)
Note the order of the parameters. `arctan2` is defined also when x2=0 and
at several other points, obtaining values in the range `[-pi, pi]`:
>>> keras_core.ops.arctan2(
... keras_core.ops.array([1., -1.]),
... keras_core.ops.array([0., 0.]),
... )
array([ 1.5707964, -1.5707964], dtype=float32)
>>> keras_core.ops.arctan2(
... keras_core.ops.array([0., 0., numpy.inf]),
... keras_core.ops.array([+0., -0., numpy.inf]),
... )
array([0., 3.1415925, 0.7853982], dtype=float32)
"""
if any_symbolic_tensors((x1, x2)):
return Arctan2().symbolic_call(x1, x2)
return backend.numpy.arctan2(x1, x2)
class Arctanh(Operation):
def call(self, x):
return backend.numpy.arctanh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.arctanh", "keras_core.ops.numpy.arctanh"])
def arctanh(x):
"""Inverse hyperbolic tangent, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Arctanh().symbolic_call(x)
return backend.numpy.arctanh(x)
class Argmax(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.argmax(x, axis=self.axis)
def compute_output_spec(self, x):
if self.axis is None:
return KerasTensor([], dtype="int32")
return KerasTensor(
reduce_shape(x.shape, axis=[self.axis]), dtype="int32"
)
@keras_core_export(["keras_core.ops.argmax", "keras_core.ops.numpy.argmax"])
def argmax(x, axis=None):
"""Returns the indices of the maximum values along an axis.
Args:
x: Input tensor.
axis: By default, the index is into the flattened tensor, otherwise
along the specified axis.
Returns:
Tensor of indices. It has the same shape as `x`, with the dimension
along `axis` removed.
Example:
>>> x = keras_core.ops.arange(6).reshape(2, 3) + 10
>>> x
array([[10, 11, 12],
[13, 14, 15]], dtype=int32)
>>> keras_core.ops.argmax(x)
array(5, dtype=int32)
>>> keras_core.ops.argmax(x, axis=0)
array([1, 1, 1], dtype=int32)
>>> keras_core.ops.argmax(x, axis=1)
array([2, 2], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Argmax(axis=axis).symbolic_call(x)
return backend.numpy.argmax(x, axis=axis)
class Argmin(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.argmin(x, axis=self.axis)
def compute_output_spec(self, x):
if self.axis is None:
return KerasTensor([], dtype="int32")
return KerasTensor(
reduce_shape(x.shape, axis=[self.axis]), dtype="int32"
)
@keras_core_export(["keras_core.ops.argmin", "keras_core.ops.numpy.argmin"])
def argmin(x, axis=None):
"""Returns the indices of the minium values along an axis.
Args:
x: Input tensor.
axis: By default, the index is into the flattened tensor, otherwise
along the specified axis.
Returns:
Tensor of indices. It has the same shape as `x`, with the dimension
along `axis` removed.
Example:
>>> x = keras_core.ops.arange(6).reshape(2, 3) + 10
>>> x
array([[10, 11, 12],
[13, 14, 15]], dtype=int32)
>>> keras_core.ops.argmin(x)
array(0, dtype=int32)
>>> keras_core.ops.argmin(x, axis=0)
array([0, 0, 0], dtype=int32)
>>> keras_core.ops.argmin(x, axis=1)
array([0, 0], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Argmin(axis=axis).symbolic_call(x)
return backend.numpy.argmin(x, axis=axis)
class Argsort(Operation):
def __init__(self, axis=-1):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.argsort(x, axis=self.axis)
def compute_output_spec(self, x):
if self.axis is None:
return KerasTensor([int(np.prod(x.shape))], dtype="int32")
return KerasTensor(x.shape, dtype="int32")
@keras_core_export(["keras_core.ops.argsort", "keras_core.ops.numpy.argsort"])
def argsort(x, axis=-1):
"""Returns the indices that would sort a tensor.
Args:
x: Input tensor.
axis: Axis along which to sort. Defaults to`-1` (the last axis). If
`None`, the flattened tensor is used.
Returns:
Tensor of indices that sort `x` along the specified `axis`.
Examples:
One dimensional array:
>>> x = keras_core.ops.array([3, 1, 2])
>>> keras_core.ops.argsort(x)
array([1, 2, 0], dtype=int32)
Two-dimensional array:
>>> x = keras_core.ops.array([[0, 3], [3, 2], [4, 5]])
>>> x
array([[0, 3],
[3, 2],
[4, 5]], dtype=int32)
>>> keras_core.ops.argsort(x, axis=0)
array([[0, 1],
[1, 0],
[2, 2]], dtype=int32)
>>> keras_core.ops.argsort(x, axis=1)
array([[0, 1],
[1, 0],
[0, 1]], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Argsort(axis=axis).symbolic_call(x)
return backend.numpy.argsort(x, axis=axis)
class Array(Operation):
def call(self, x, dtype=None):
return backend.numpy.array(x, dtype=dtype)
def compute_output_spec(self, x, dtype=None):
return KerasTensor(x.shape, dtype=dtype)
@keras_core_export(["keras_core.ops.array", "keras_core.ops.numpy.array"])
def array(x, dtype=None):
"""Create a tensor.
Args:
x: Input tensor.
dtype: The desired data-type for the tensor.
Returns:
A tensor.
Examples:
>>> keras_core.ops.array([1, 2, 3])
array([1, 2, 3], dtype=int32)
>>> keras_core.ops.array([1, 2, 3], dtype="float32")
array([1., 2., 3.], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Array().symbolic_call(x, dtype=dtype)
return backend.numpy.array(x, dtype=dtype)
class Average(Operation):
def __init__(self, axis=None):
super().__init__()
# np.average() does not support axis as tuple as declared by the
# docstring, it only supports int or None.
self.axis = axis
def call(self, x, weights=None):
return backend.numpy.average(x, weights=weights, axis=self.axis)
def compute_output_spec(self, x, weights=None):
if weights is not None:
shape_match = shape_equal(x.shape, weights.shape, allow_none=True)
if self.axis is not None:
shape_match_on_axis = shape_equal(
[x.shape[self.axis]], weights.shape, allow_none=True
)
if self.axis is None:
if weights is None or shape_match:
return KerasTensor(
[],
dtype=x.dtype,
)
else:
raise ValueError(
"`weights` must have the same shape as `x` when "
f"`axis=None`, but received `weights.shape={weights.shape}`"
f" and `x.shape={x.shape}`."
)
if weights is None or shape_match_on_axis or shape_match:
return KerasTensor(
reduce_shape(x.shape, axis=[self.axis]),
dtype=x.dtype,
)
else:
# `weights` can either be a 1D array of length `x.shape[axis]` or
# of the same shape as `x`.
raise ValueError(
"`weights` must have the same size as `x` at "
f"`axis={self.axis}` but received "
f"`weights.shape={weights.shape}` while x.shape at "
f"`{self.axis}` is `{x.shape[self.axis]}`."
)
@keras_core_export(["keras_core.ops.average", "keras_core.ops.numpy.average"])
def average(x, axis=None, weights=None):
"""Compute the weighted average along the specified axis.
Args:
x: Input tensor.
axis: Integer along which to average `x`. The default, `axis=None`,
will average over all of the elements of the input tensor. If axis
is negative it counts from the last to the first axis.
weights: Tensor of wieghts associated with the values in `x`. Each
value in `x` contributes to the average according to its
associated weight. The weights array can either be 1-D (in which
case its length must be the size of a along the given axis) or of
the same shape as `x`. If `weights=None` (default), then all data
in `x` are assumed to have a weight equal to one.
The 1-D calculation is: `avg = sum(a * weights) / sum(weights)`.
The only constraint on weights is that `sum(weights)` must not be 0.
Returns:
Return the average along the specified axis.
Examples:
>>> data = keras_core.ops.arange(1, 5)
>>> data
array([1, 2, 3, 4], dtype=int32)
>>> keras_core.ops.average(data)
array(2.5, dtype=float32)
>>> keras_core.ops.average(
... keras_core.ops.arange(1, 11),
... weights=keras_core.ops.arange(10, 0, -1)
... )
array(4., dtype=float32)
>>> data = keras_core.ops.arange(6).reshape((3, 2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]], dtype=int32)
>>> keras_core.ops.average(
... data,
... axis=1,
... weights=keras_core.ops.array([1./4, 3./4])
... )
array([0.75, 2.75, 4.75], dtype=float32)
>>> keras_core.ops.average(
... data,
... weights=keras_core.ops.array([1./4, 3./4])
... )
Traceback (most recent call last):
...
ValueError: Axis must be specified when shapes of a and weights differ.
"""
if any_symbolic_tensors((x,)):
return Average(axis=axis).symbolic_call(x, weights=weights)
return backend.numpy.average(x, weights=weights, axis=axis)
class Bincount(Operation):
def __init__(self, weights=None, minlength=0):
super().__init__()
self.weights = weights
self.minlength = minlength
def call(self, x):
return backend.numpy.bincount(
x, weights=self.weights, minlength=self.minlength
)
def compute_output_spec(self, x):
out_shape = backend.numpy.amax(x) + 1
return KerasTensor(out_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.bincount", "keras_core.ops.numpy.bincount"])
def bincount(x, weights=None, minlength=0):
"""Count the number of occurrences of each value in a tensor of integers.
Args:
x: Input tensor.
It must be of dimension 1, and it must only contain non-negative
integer(s).
weights: Weight tensor.
It must have the same length as `x`. The default value is `None`.
If specified, `x` is weighted by it, i.e. if `n = x[i]`,
`out[n] += weight[i]` instead of the default behavior `out[n] += 1`.
minlength: An integer.
The default value is 0. If specified, there will be at least
this number of bins in the output tensor. If greater than
`max(x) + 1`, each value of the output at an index higher than
`max(x)` is set to 0.
Returns:
1D tensor where each element gives the number of occurrence(s) of its
index value in x. Its length is the maximum between `max(x) + 1` and
minlength.
Examples:
>>> x = keras_core.ops.array([1, 2, 2, 3], dtype="uint8")
>>> keras_core.ops.bincount(x)
array([0, 1, 2, 1], dtype=int32)
>>> weights = x / 2
>>> weights
array([0.5, 1., 1., 1.5], dtype=float64)
>>> keras_core.ops.bincount(x, weights=weights)
array([0., 0.5, 2., 1.5], dtype=float64)
>>> minlength = (keras_core.ops.max(x).numpy() + 1) + 2 # 6
>>> keras_core.ops.bincount(x, minlength=minlength)
array([0, 1, 2, 1, 0, 0], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Bincount(weights=weights, minlength=minlength).symbolic_call(x)
return backend.numpy.bincount(x, weights=weights, minlength=minlength)
class BroadcastTo(Operation):
def __init__(self, shape):
super().__init__()
self.shape = shape
def call(self, x):
return backend.numpy.broadcast_to(x, self.shape)
def compute_output_spec(self, x):
# Catch broadcasting errors for clear error messages.
broadcast_shapes(x.shape, self.shape)
return KerasTensor(self.shape, dtype=x.dtype)
@keras_core_export(
[
"keras_core.ops.broadcast_to",
"keras_core.ops.numpy.broadcast_to",
]
)
def broadcast_to(x, shape):
"""Broadcast a tensor to a new shape.
Args:
x: The tensor to broadcast.
shape: The shape of the desired tensor. A single integer `i` is
interpreted as `(i,)`.
Returns:
A tensor with the desired shape.
Examples:
>>> x = keras_core.ops.array([1, 2, 3])
>>> keras_core.ops.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
if any_symbolic_tensors((x,)):
return BroadcastTo(shape=shape).symbolic_call(x)
return backend.numpy.broadcast_to(x, shape)
class Ceil(Operation):
def call(self, x):
return backend.numpy.ceil(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.ceil", "keras_core.ops.numpy.ceil"])
def ceil(x):
"""Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`.
Args:
x: Input tensor.
Returns:
The ceiling of each element in `x`.
"""
if any_symbolic_tensors((x,)):
return Ceil().symbolic_call(x)
return backend.numpy.ceil(x)
class Clip(Operation):
def __init__(self, x_min, x_max):
super().__init__()
self.x_min = x_min
self.x_max = x_max
def call(self, x):
return backend.numpy.clip(x, self.x_min, self.x_max)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.clip", "keras_core.ops.numpy.clip"])
def clip(x, x_min, x_max):
"""Clip (limit) the values in a tensor.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of `[0, 1]` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Args:
x: Input tensor.
x_min: Minimum value.
x_max: Maximum value.
Returns:
The clipped tensor.
"""
if any_symbolic_tensors((x,)):
return Clip(x_min, x_max).symbolic_call(x)
return backend.numpy.clip(x, x_min, x_max)
class Concatenate(Operation):
def __init__(self, axis=0):
super().__init__()
if axis is None:
raise ValueError("`axis` cannot be None for `concatenate`.")
self.axis = axis
def call(self, xs):
return backend.numpy.concatenate(xs, axis=self.axis)
def compute_output_spec(self, xs):
first_shape = xs[0].shape
total_size_on_axis = 0
all_sparse = True
for x in xs:
if not shape_equal(
x.shape, first_shape, axis=[self.axis], allow_none=True
):
raise ValueError(
"Every value in `xs` must have the same shape except on "
f"the `axis` dim. But found element of shape {x.shape}, "
f"which is different from the first element's "
f"shape {first_shape}."
)
if total_size_on_axis is None or x.shape[self.axis] is None:
total_size_on_axis = None
else:
total_size_on_axis += x.shape[self.axis]
if not x.sparse:
all_sparse = False
output_shape = list(first_shape)
output_shape[self.axis] = total_size_on_axis
return KerasTensor(output_shape, dtype=x.dtype, sparse=all_sparse)
@keras_core_export(
[
"keras_core.ops.concatenate",
"keras_core.ops.numpy.concatenate",
]
)
def concatenate(xs, axis=0):
"""Join a sequence of tensors along an existing axis.
Args:
xs: The sequence of tensors to concatenate.
axis: The axis along which the tensors will be joined. Defaults to `0`.
Returns:
The concatenated tensor.
"""
if any_symbolic_tensors(xs):
return Concatenate(axis=axis).symbolic_call(xs)
return backend.numpy.concatenate(xs, axis=axis)
class Conjugate(Operation):
def call(self, x):
return backend.numpy.conjugate(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(
["keras_core.ops.conjugate", "keras_core.ops.numpy.conjugate"]
)
def conjugate(x):
"""Returns the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the sign
of its imaginary part.
`keras_core.ops.conj` is a shorthand for this function.
Args:
x: Input tensor.
Returns:
The complex conjugate of each element in `x`.
"""
if any_symbolic_tensors((x,)):
return Conjugate().symbolic_call(x)
return backend.numpy.conjugate(x)
class Conj(Conjugate):
pass
@keras_core_export(["keras_core.ops.conj", "keras_core.ops.numpy.conj"])
def conj(x):
"""Shorthand for `keras_core.ops.conjugate`."""
return conjugate(x)
class Copy(Operation):
def call(self, x):
return backend.numpy.copy(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.copy", "keras_core.ops.numpy.copy"])
def copy(x):
"""Returns a copy of `x`.
Args:
x: Input tensor.
Returns:
A copy of `x`.
"""
if any_symbolic_tensors((x,)):
return Copy().symbolic_call(x)
return backend.numpy.copy(x)
class Cos(Operation):
def call(self, x):
return backend.numpy.cos(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.cos", "keras_core.ops.numpy.cos"])
def cos(x):
"""Cosine, element-wise.
Args:
x: Input tensor.
Returns:
The corresponding cosine values.
"""
if any_symbolic_tensors((x,)):
return Cos().symbolic_call(x)
return backend.numpy.cos(x)
class Cosh(Operation):
def call(self, x):
return backend.numpy.cosh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.cosh", "keras_core.ops.numpy.cosh"])
def cosh(x):
"""Hyperbolic cosine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Cosh().symbolic_call(x)
return backend.numpy.cosh(x)
class CountNonzero(Operation):
def __init__(self, axis=None):
super().__init__()
if isinstance(axis, int):
self.axis = (axis,)
else:
self.axis = axis
def call(self, x):
return backend.numpy.count_nonzero(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis),
dtype="int32",
)
@keras_core_export(
[
"keras_core.ops.count_nonzero",
"keras_core.ops.numpy.count_nonzero",
]
)
def count_nonzero(x, axis=None):
"""Counts the number of non-zero values in `x` along the given `axis`.
If no axis is specified then all non-zeros in the tensor are counted.
Args:
x: Input tensor.
axis: Axis or tuple of axes along which to count the number of
non-zeros. Defaults to `None`.
Returns:
int or tensor of ints.
Examples:
>>> x = keras_core.ops.array([[0, 1, 7, 0], [3, 0, 2, 19]])
>>> keras_core.ops.count_nonzero(x)
5
>>> keras_core.ops.count_nonzero(x, axis=0)
array([1, 1, 2, 1], dtype=int64)
>>> keras_core.ops.count_nonzero(x, axis=1)
array([2, 3], dtype=int64)
"""
if any_symbolic_tensors((x,)):
return CountNonzero(axis=axis).symbolic_call(x)
return backend.numpy.count_nonzero(x, axis=axis)
class Cross(Operation):
def __init__(self, axisa=-1, axisb=-1, axisc=-1, axis=None):
super().__init__()
if axis is not None:
self.axisa = axis
self.axisb = axis
self.axisc = axis
else:
self.axisa = axisa
self.axisb = axisb
self.axisc = axisc
def call(self, x1, x2):
return backend.numpy.cross(x1, x2, self.axisa, self.axisb, self.axisc)
def compute_output_spec(self, x1, x2):
x1_shape = list(x1.shape)
x2_shape = list(x2.shape)
x1_value_size = x1_shape[self.axisa]
x2_value_size = x2_shape[self.axisa]
del x1_shape[self.axisa]
del x2_shape[self.axisb]
output_shape = broadcast_shapes(x1_shape, x2_shape)
if x1_value_size is not None and x1_value_size not in (2, 3):
raise ValueError(
"`x1`'s dim on `axis={axisa}` must be either 2 or 3, but "
f"received: {x1_value_size}"
)
if x2_value_size is not None and x2_value_size not in (2, 3):
raise ValueError(
"`x2`'s dim on `axis={axisb}` must be either 2 or 3, but "
f"received: {x2_value_size}"
)
if x1_value_size == 3 or x2_value_size == 3:
value_size = [3]
else:
value_size = []
output_shape = (
output_shape[: self.axisc] + value_size + output_shape[self.axisc :]
)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.cross", "keras_core.ops.numpy.cross"])
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Returns the cross product of two (arrays of) vectors.
The cross product of `x1` and `x2` in R^3 is a vector
perpendicular to both `x1` and `x2`. If `x1` and `x2` are arrays of
vectors, the vectors are defined by the last axis of `x1` and `x2`
by default, and these axes can have dimensions 2 or 3.
Where the dimension of either `x1` or `x2` is 2, the third component of
the input vector is assumed to be zero and the cross product calculated
accordingly.
In cases where both input vectors have dimension 2, the z-component of
the cross product is returned.
Args:
x1: Components of the first vector(s).
x2: Components of the second vector(s).
axisa: Axis of `x1` that defines the vector(s). Defaults to `-1`.
axisb: Axis of `x2` that defines the vector(s). Defaults to `-1`.
axisc: Axis of the result containing the cross product vector(s).
Ignored if both input vectors have dimension 2, as the return is
scalar. By default, the last axis.
axis: If defined, the axis of `x1`, `x2` and the result that
defines the vector(s) and cross product(s). Overrides `axisa`,
`axisb` and `axisc`.
Note:
Torch backend does not support two dimensional vectors, or the
arguments `axisa`, `axisb` and `axisc`. Use `axis` instead.
Returns:
Vector cross product(s).
"""
if any_symbolic_tensors((x1, x2)):
return Cross(
axisa=axisa, axisb=axisb, axisc=axisc, axis=axis
).symbolic_call(x1, x2)
return backend.numpy.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
class Cumprod(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.cumprod(x, axis=self.axis)
def compute_output_spec(self, x):
if self.axis is None:
if None in x.shape:
output_shape = (None,)
else:
output_shape = (int(np.prod(x.shape)),)
return KerasTensor(output_shape, dtype="int32")
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.cumprod", "keras_core.ops.numpy.cumprod"])
def cumprod(x, axis=None):
"""Return the cumulative product of elements along a given axis.
Args:
x: Input tensor.
axis: Axis along which the cumulative product is computed.
By default the input is flattened.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Cumprod(axis=axis).symbolic_call(x)
return backend.numpy.cumprod(x, axis=axis)
class Cumsum(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.cumsum(x, axis=self.axis)
def compute_output_spec(self, x):
if self.axis is None:
if None in x.shape:
output_shape = (None,)
else:
output_shape = (int(np.prod(x.shape)),)
return KerasTensor(output_shape, dtype="int32")
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.cumsum", "keras_core.ops.numpy.cumsum"])
def cumsum(x, axis=None):
"""Returns the cumulative sum of elements along a given axis.
Args:
x: Input tensor.
axis: Axis along which the cumulative sum is computed.
By default the input is flattened.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Cumsum(axis=axis).symbolic_call(x)
return backend.numpy.cumsum(x, axis=axis)
class Diag(Operation):
def __init__(self, k=0):
super().__init__()
self.k = k
def call(self, x):
return backend.numpy.diag(x, k=self.k)
def compute_output_spec(self, x):
x_shape = x.shape
if len(x_shape) == 1:
if x_shape[0] is None:
output_shape = [None, None]
else:
output_shape = [
x_shape[0] + int(np.abs(self.k)),
x_shape[0] + int(np.abs(self.k)),
]
elif len(x_shape) == 2:
if None in x_shape:
output_shape = [None]
else:
shorter_side = np.minimum(x_shape[0], x_shape[1])
if self.k > 0:
remaining = x_shape[1] - self.k
else:
remaining = x_shape[0] + self.k
output_shape = [
int(np.maximum(0, np.minimum(remaining, shorter_side)))
]
else:
raise ValueError(
f"`x` must be 1-D or 2-D, but received shape {x.shape}."
)
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.diag", "keras_core.ops.numpy.diag"])
def diag(x, k=0):
"""Extract a diagonal or construct a diagonal array.
Args:
x: Input tensor. If `x` is 2-D, returns the k-th diagonal of `x`.
If `x` is 1-D, return a 2-D tensor with `x` on the k-th diagonal.
k: The diagonal to consider. Defaults to `0`. Use `k > 0` for diagonals
above the main diagonal, and `k < 0` for diagonals below
the main diagonal.
Returns:
The extracted diagonal or constructed diagonal tensor.
Examples:
>>> from keras_core import ops
>>> x = ops.arange(9).reshape((3, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> ops.diag(x)
array([0, 4, 8])
>>> ops.diag(x, k=1)
array([1, 5])
>>> ops.diag(x, k=-1)
array([3, 7])
>>> ops.diag(ops.diag(x)))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
if any_symbolic_tensors((x,)):
return Diag(k=k).symbolic_call(x)
return backend.numpy.diag(x, k=k)
class Diagonal(Operation):
def __init__(self, offset=0, axis1=0, axis2=1):
super().__init__()
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def call(self, x):
return backend.numpy.diagonal(
x,
offset=self.offset,
axis1=self.axis1,
axis2=self.axis2,
)
def compute_output_spec(self, x):
x_shape = list(x.shape)
if len(x_shape) < 2:
raise ValueError(
"`diagonal` requires an array of at least two dimensions, but "
"`x` is of shape {x.shape}."
)
shape_2d = [x_shape[self.axis1], x_shape[self.axis2]]
x_shape[self.axis1] = -1
x_shape[self.axis2] = -1
output_shape = list(filter((-1).__ne__, x_shape))
if None in shape_2d:
diag_shape = [None]
else:
shorter_side = np.minimum(shape_2d[0], shape_2d[1])
if self.offset > 0:
remaining = shape_2d[1] - self.offset
else:
remaining = shape_2d[0] + self.offset
diag_shape = [
int(np.maximum(0, np.minimum(remaining, shorter_side)))
]
output_shape = output_shape + diag_shape
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.diagonal", "keras_core.ops.numpy.diagonal"])
def diagonal(x, offset=0, axis1=0, axis2=1):
"""Return specified diagonals.
If `x` is 2-D, returns the diagonal of `x` with the given offset, i.e., the
collection of elements of the form `x[i, i+offset]`.
If `x` has more than two dimensions, the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal
is returned.
The shape of the resulting array can be determined by removing `axis1`
and `axis2` and appending an index to the right equal to the size of
the resulting diagonals.
Args:
x: Input tensor.
offset: Offset of the diagonal from the main diagonal.
Can be positive or negative. Defaults to `0`.(main diagonal).
axis1: Axis to be used as the first axis of the 2-D sub-arrays.
Defaults to `0`.(first axis).
axis2: Axis to be used as the second axis of the 2-D sub-arrays.
Defaults to `1` (second axis).
Returns:
Tensor of diagonals.
Examples:
>>> from keras_core import ops
>>> x = ops.arange(4).reshape((2, 2))
>>> x
array([[0, 1],
[2, 3]])
>>> x.diagonal()
array([0, 3])
>>> x.diagonal(1)
array([1])
>>> x = ops.arange(8).reshape((2, 2, 2))
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> x.diagonal(0, 0, 1)
array([[0, 6],
[1, 7]])
"""
if any_symbolic_tensors((x,)):
return Diagonal(
offset=offset,
axis1=axis1,
axis2=axis2,
).symbolic_call(x)
return backend.numpy.diagonal(
x,
offset=offset,
axis1=axis1,
axis2=axis2,
)
class Digitize(Operation):
def call(self, x, bins):
return backend.numpy.digitize(x, bins)
def compute_output_spec(self, x, bins):
bins_shape = bins.shape
if len(bins_shape) > 1:
raise ValueError(
f"`bins` must be a 1D array. Received: bins={bins} "
f"with shape bins.shape={bins_shape}"
)
return KerasTensor(x.shape, dtype="int32")
@keras_core_export(["keras_core.ops.digitize", "keras_core.ops.numpy.digitize"])
def digitize(x, bins):
"""Returns the indices of the bins to which each value in `x` belongs.
Args:
x: Input array to be binned.
bins: Array of bins. It has to be one-dimensional and monotonically
increasing.
Returns:
Output array of indices, of same shape as `x`.
Example:
>>> x = np.array([0.0, 1.0, 3.0, 1.6])
>>> bins = np.array([0.0, 3.0, 4.5, 7.0])
>>> keras_core.ops.digitize(x, bins)
array([1, 1, 2, 1])
"""
if any_symbolic_tensors((x, bins)):
return Digitize().symbolic_call(x, bins)
return backend.numpy.digitize(x, bins)
class Dot(Operation):
def call(self, x1, x2):
return backend.numpy.dot(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = list(getattr(x1, "shape", []))
x2_shape = list(getattr(x2, "shape", []))
if x1_shape == [] or x2_shape == []:
return multiply(x1, x2)
if len(x1_shape) == 1 and len(x2_shape) == 1:
return KerasTensor([], dtype=x1.dtype)
if len(x2_shape) == 1:
if x1_shape[-1] != x2_shape[0]:
raise ValueError(
"Shape must match on the last axis of `x1` and `x2` when "
"`x1` is N-d array while `x2` is 1-D, but receive shape "
f"`x1.shape={x1.shape}` and x2.shape=`{x2.shape}`."
)
return KerasTensor(x1_shape[:-1], dtype=x1.dtype)
if (
x1_shape[-1] is None
or x2_shape[-2] is None
or x1_shape[-1] == x2_shape[-2]
):
del x1_shape[-1]
del x2_shape[-2]
return KerasTensor(x1_shape + x2_shape, dtype=x1.dtype)
raise ValueError(
"Shape must match on the last axis of `x1` and second last "
"axis of `x2` when `x1` is N-d array while `x2` is M-D, but "
f"received `x1.shape={x1.shape}` and x2.shape=`{x2.shape}`."
)
@keras_core_export(["keras_core.ops.dot", "keras_core.ops.numpy.dot"])
def dot(x1, x2):
"""Dot product of two tensors.
- If both `x1` and `x2` are 1-D tensors, it is inner product of vectors
(without complex conjugation).
- If both `x1` and `x2` are 2-D tensors, it is matrix multiplication.
- If either `x1` or `x2` is 0-D (scalar), it is equivalent to `x1 * x2`.
- If `x1` is an N-D tensor and `x2` is a 1-D tensor, it is a sum product
over the last axis of `x1` and `x2`.
- If `x1` is an N-D tensor and `x2` is an M-D tensor (where `M>=2`),
it is a sum product over the last axis of `x1` and the second-to-last
axis of `x2`: `dot(x1, x2)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])`.
Args:
x1: First argument.
x2: Second argument.
Note:
Torch backend does not accept 0-D tensors as arguments.
Returns:
Dot product of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Dot().symbolic_call(x1, x2)
return backend.numpy.dot(x1, x2)
class Einsum(Operation):
def __init__(self, subscripts):
super().__init__()
self.subscripts = subscripts
def call(self, *operands):
return backend.numpy.einsum(self.subscripts, *operands)
def compute_output_spec(self, *operands):
"""Compute the output shape of `einsum`.
The shape computation follows the steps below:
1. Find all letters in the input specs (left part of "->"), and
break them into two categories: letters appearing more than once
go to `reduced_dims`, otherwise go to `kept_dims`.
2. Adjust `reduced_dims` and `kept_dims` based on the output spec
(right part of "->"). The rule is if the letter appears in the
output spec, then move it to `kept_dims`, otherwise move it to
`reduced_dims`.
3. Compute the target output shape. If no output spec is set, then
the target output shape will be "...{kept_dims}", e.g., "...ijk",
else it will be the same as output spec. "..." is a wildcard that
could map shape of arbitrary length.
4. For each operand in `operands`, map the shape specified in the input
spec to the output target, e.g, if operand is of shape [2,3,4],
input spec is "i..." and output target is "i...jk", then 2 will go
the index 0. For dims not represented by any letter, insert to the
wildcard part. For each letter in output target not appearing in
input spec, the dim will be 1 for broadcasting. After 4, each
operand should have a target shape containing only number and
`None`.
5. Broadcast all shapes computed from 4, and the result is the output
shape.
Let's take an example to illustrate the steps above. Let's define:
```python
x = KerasTensor([None, 3, 4])
y = KerasTensor(2, 4, 3)
z = knp.einsum("...ij, kji->...k", x, y)
```
1. `reduced_dims` is {"i", "j"}, `kept_dims` is {"k"}.
2. `reduced_dims` is still {"i", "j"}, and `kept_dims` is {"k"}.
3. Output target is "...k".
4. For `x`, the input spec is "...ij", and the output target is "...k".
"i" and "j" do not appear in the output target, so no replacement
happens, and [None] goes to wildcard. Afterwards, "k" is replaced
by 1, so we get shape [None, 1]. Applying the same logic to `y`, we
get shape [2].
5. Broadcast [None, 1] and [2], and we get [None, 2], which is the
output shape.
"""
split_subscripts = self.subscripts.split("->")
if len(split_subscripts) > 2:
raise ValueError(
"At most one '->' is supported in `einsum` subscripts, but "
f"received {self.subscripts}."
)
if len(split_subscripts) == 2:
subscripts = split_subscripts[0]
output_spec = split_subscripts[1]
else:
subscripts = self.subscripts
output_spec = None
input_specs = subscripts.split(",")
if len(input_specs) != len(operands):
raise ValueError(
f"Number of operands ({len(operands)}) does not match the "
f"number of input specs ({len(input_specs)}) in `einsum`, "
f"received subscripts={self.subscripts}."
)
reduced_dims = set()
kept_dims = set()
for s in subscripts:
if not s.isalpha():
continue
if s not in reduced_dims and s not in kept_dims:
kept_dims.add(s)
elif s in kept_dims:
kept_dims.remove(s)
reduced_dims.add(s)
if output_spec is not None:
# The output spec changes the rule of kept_dims and reduced_dims.
# In short, dims appearing in the output spec will be kept, and
# dims not appearing in the output spec will be reduced.
kept_dims_copy = kept_dims.copy()
reduced_dims_copy = reduced_dims.copy()
for dim in kept_dims:
if dim not in output_spec:
kept_dims_copy.remove(dim)
reduced_dims_copy.add(dim)
for dim in reduced_dims:
if dim in output_spec:
reduced_dims_copy.remove(dim)
kept_dims_copy.add(dim)
kept_dims = kept_dims_copy
reduced_dims = reduced_dims_copy
reduced_dims = sorted(reduced_dims)
kept_dims = sorted(kept_dims)
if output_spec is None:
target_broadcast_spec = "..." + "".join(kept_dims)
else:
target_broadcast_spec = output_spec
expanded_operands_shapes = []
for x, spec in zip(operands, input_specs):
x_shape = getattr(x, "shape", [])
x_shape = [-1 if size is None else size for size in x_shape]
split_spec = spec.split("...")
expanded_shape = target_broadcast_spec
if len(split_spec) == 1:
# In this case, the input spec is just a string of letters,
# e.g., "ijk".
if len(x_shape) != len(split_spec[0]):
raise ValueError(
"Number of dimensions in the subscript does not "
"match the number of dimensions in the operand, "
f"received subscript `{spec}` and operand of shape "
f"{x_shape}."
)
for size, s in zip(x_shape, split_spec[0]):
# Replace the letter with the right shape.
expanded_shape = expanded_shape.replace(s, str(size) + " ")
expanded_shape = expanded_shape.replace("...", "")
else:
# In this case, the input spec has "...", e.g., "i...j", "i...",
# or "...j".
for i in range(len(split_spec[0])):
expanded_shape = expanded_shape.replace(
split_spec[0][i], str(x_shape[i]) + " "
)
for i in range(len(split_spec[1])):
expanded_shape = expanded_shape.replace(
split_spec[1][-i - 1], str(x_shape[-i - 1]) + " "
)
# Shape matched by "..." will be inserted to the position of
# "...".
wildcard_shape_start_index = len(split_spec[0])
wildcard_shape_end_index = (
len(x_shape)
if len(split_spec[1]) == 0
else -len(split_spec[1])
)
wildcard_shape = x_shape[
wildcard_shape_start_index:wildcard_shape_end_index
]
wildcard_shape_str = (
" ".join([str(size) for size in wildcard_shape]) + " "
)
expanded_shape = expanded_shape.replace(
"...", wildcard_shape_str
)
# Replace all letters not yet handled with "1" for broadcasting.
expanded_shape = re.sub("[a-z]", "1 ", expanded_shape)
expanded_shape = expanded_shape.split()
expanded_shape = [
None if size == "-1" else int(size) for size in expanded_shape
]
expanded_operands_shapes.append(expanded_shape)
output_shape = expanded_operands_shapes[0]
for shape in expanded_operands_shapes[1:]:
output_shape = broadcast_shapes(output_shape, shape)
dtype = None
for x in operands:
if hasattr(x, "dtype"):
dtype = x.dtype
break
return KerasTensor(output_shape, dtype=dtype)
@keras_core_export(["keras_core.ops.einsum", "keras_core.ops.numpy.einsum"])
def einsum(subscripts, *operands):
"""Evaluates the Einstein summation convention on the operands.
Args:
subscripts: Specifies the subscripts for summation as comma separated
list of subscript labels. An implicit (classical Einstein
summation) calculation is performed unless the explicit indicator
`->` is included as well as subscript labels of the precise
output form.
operands: The operands to compute the Einstein sum of.
Returns:
The calculation based on the Einstein summation convention.
Example:
>>> from keras_core import ops
>>> a = ops.arange(25).reshape(5, 5)
>>> b = ops.arange(5)
>>> c = ops.arange(6).reshape(2, 3)
Trace of a matrix:
>>> ops.einsum("ii", a)
60
>>> ops.einsum(a, [0, 0])
60
>>> ops.trace(a)
60
Extract the diagonal:
>>> ops.einsum("ii -> i", a)
array([ 0, 6, 12, 18, 24])
>>> ops.einsum(a, [0, 0], [0])
array([ 0, 6, 12, 18, 24])
>>> ops.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis:
>>> ops.einsum("ij -> i", a)
array([ 10, 35, 60, 85, 110])
>>> ops.einsum(a, [0, 1], [0])
array([ 10, 35, 60, 85, 110])
>>> ops.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional tensors summing a single axis can be done
with ellipsis:
>>> ops.einsum("...j -> ...", a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [..., 1], [...])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose or reorder any number of axes:
>>> ops.einsum("ji", c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> ops.einsum("ij -> ji", c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> ops.einsum(c, [1, 0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> ops.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Matrix vector multiplication:
>>> ops.einsum("ij, j", a, b)
array([ 30, 80, 130, 180, 230])
>>> ops.einsum(a, [0, 1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> ops.einsum("...j, j", a, b)
array([ 30, 80, 130, 180, 230])
"""
if any_symbolic_tensors(operands):
return Einsum(subscripts).symbolic_call(*operands)
return backend.numpy.einsum(subscripts, *operands)
class Empty(Operation):
def call(self, shape, dtype="float32"):
return backend.numpy.empty(shape, dtype=dtype)
def compute_output_spec(self, shape, dtype="float32"):
return KerasTensor(shape, dtype=dtype)
@keras_core_export(["keras_core.ops.empty", "keras_core.ops.numpy.empty"])
def empty(shape, dtype="float32"):
"""Return a tensor of given shape and type filled with uninitialized data.
Args:
shape: Shape of the empty tensor.
dtype: Desired data type of the empty tensor.
Returns:
The empty tensor.
"""
return backend.numpy.empty(shape, dtype=dtype)
class Equal(Operation):
def call(self, x1, x2):
return backend.numpy.equal(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.equal", "keras_core.ops.numpy.equal"])
def equal(x1, x2):
"""Returns `(x1 == x2)` element-wise.
Args:
x1: Tensor to compare.
x2: Tensor to compare.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Equal().symbolic_call(x1, x2)
return backend.numpy.equal(x1, x2)
class Exp(Operation):
def call(self, x):
return backend.numpy.exp(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.exp", "keras_core.ops.numpy.exp"])
def exp(x):
"""Calculate the exponential of all elements in the input tensor.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise exponential of `x`.
"""
if any_symbolic_tensors((x,)):
return Exp().symbolic_call(x)
return backend.numpy.exp(x)
class ExpandDims(Operation):
def __init__(self, axis):
super().__init__()
if isinstance(axis, list):
raise ValueError(
"The `axis` argument to `expand_dims` should be an integer, "
f"but received a list: {axis}."
)
self.axis = axis
def call(self, x):
return backend.numpy.expand_dims(x, self.axis)
def compute_output_spec(self, x):
x_shape = list(x.shape)
if self.axis < 0:
axis = len(x.shape) + 1 + self.axis
else:
axis = self.axis
output_shape = x_shape[:axis] + [1] + x_shape[axis:]
return KerasTensor(output_shape, dtype=x.dtype, sparse=x.sparse)
@keras_core_export(
[
"keras_core.ops.expand_dims",
"keras_core.ops.numpy.expand_dims",
]
)
def expand_dims(x, axis):
"""Expand the shape of a tensor.
Insert a new axis at the `axis` position in the expanded tensor shape.
Args:
x: Input tensor.
axis: Position in the expanded axes where the new axis
(or axes) is placed.
Returns:
Output tensor with the number of dimensions increased.
"""
if any_symbolic_tensors((x,)):
return ExpandDims(axis=axis).symbolic_call(x)
return backend.numpy.expand_dims(x, axis)
class Expm1(Operation):
def call(self, x):
return backend.numpy.expm1(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.expm1", "keras_core.ops.numpy.expm1"])
def expm1(x):
"""Calculate `exp(x) - 1` for all elements in the tensor.
Args:
x: Input values.
Returns:
Output tensor, element-wise exponential minus one.
"""
if any_symbolic_tensors((x,)):
return Expm1().symbolic_call(x)
return backend.numpy.expm1(x)
class Flip(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.flip(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.flip", "keras_core.ops.numpy.flip"])
def flip(x, axis=None):
"""Reverse the order of elements in the tensor along the given axis.
The shape of the tensor is preserved, but the elements are reordered.
Args:
x: Input tensor.
axis: Axis or axes along which to flip the tensor. The default,
`axis=None`, will flip over all of the axes of the input tensor.
Returns:
Output tensor with entries of `axis` reversed.
"""
if any_symbolic_tensors((x,)):
return Flip(axis=axis).symbolic_call(x)
return backend.numpy.flip(x, axis=axis)
class Floor(Operation):
def call(self, x):
return backend.numpy.floor(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.floor", "keras_core.ops.numpy.floor"])
def floor(x):
"""Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that `i <= x`.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise floor of `x`.
"""
if any_symbolic_tensors((x,)):
return Floor().symbolic_call(x)
return backend.numpy.floor(x)
class Full(Operation):
def call(self, shape, fill_value, dtype=None):
return backend.numpy.full(shape, fill_value, dtype=dtype)
def compute_output_spec(self, shape, fill_value, dtype=None):
return KerasTensor(shape, dtype=dtype)
@keras_core_export(["keras_core.ops.full", "keras_core.ops.numpy.full"])
def full(shape, fill_value, dtype=None):
"""Return a new tensor of given shape and type, filled with `fill_value`.
Args:
shape: Shape of the new tensor.
fill_value: Fill value.
dtype: Desired data type of the tensor.
Returns:
Output tensor.
"""
return backend.numpy.full(shape, fill_value, dtype=dtype)
class FullLike(Operation):
def call(self, x, fill_value, dtype=None):
return backend.numpy.full_like(x, fill_value, dtype=dtype)
def compute_output_spec(self, x, fill_value, dtype=None):
return KerasTensor(x.shape, dtype=dtype)
@keras_core_export(
["keras_core.ops.full_like", "keras_core.ops.numpy.full_like"]
)
def full_like(x, fill_value, dtype=None):
"""Return a full tensor with the same shape and type as the given tensor.
Args:
x: Input tensor.
fill_value: Fill value.
dtype: Overrides data type of the result.
Returns:
Tensor of `fill_value` with the same shape and type as `x`.
"""
if any_symbolic_tensors((x,)):
return FullLike().symbolic_call(x, fill_value, dtype=dtype)
return backend.numpy.full_like(x, fill_value, dtype=dtype)
class GetItem(Operation):
def call(self, x, key):
if isinstance(key, list):
key = tuple(key)
return x[key]
def compute_output_spec(self, x, key):
remaining_shape = list(x.shape)
new_shape = []
if isinstance(key, int):
remaining_key = [key]
elif isinstance(key, tuple):
remaining_key = list(key)
else:
raise ValueError(
f"Unsupported key type for array slice. Recieved: `{key}`"
)
num_ellipses = remaining_key.count(Ellipsis)
if num_ellipses > 1:
raise ValueError(
f"Slice should only have one ellipsis. Recieved: `{key}`"
)
elif num_ellipses == 0:
# Add an implicit final ellipsis.
remaining_key.append(Ellipsis)
# Consume slice key element by element.
while True:
if not remaining_key:
break
subkey = remaining_key.pop(0)
# Check for `newaxis` and `Ellipsis`.
if subkey == Ellipsis:
# Keep as many slices remain in our key, omitting `newaxis`.
needed = len(remaining_key) - remaining_key.count(np.newaxis)
consumed = len(remaining_shape) - needed
new_shape += remaining_shape[:consumed]
remaining_shape = remaining_shape[consumed:]
continue
# All frameworks follow numpy for newaxis. `np.newaxis == None`.
if subkey == np.newaxis:
new_shape.append(1)
continue
# At this point, we need to consume a new axis from the shape.
if not remaining_shape:
raise ValueError(
f"Array has shape {x.shape} but slice "
f"has to many indices. Recieved: `{key}`"
)
length = remaining_shape.pop(0)
if isinstance(subkey, int):
if length is not None:
index = subkey if subkey >= 0 else subkey + length
if index < 0 or index >= length:
raise ValueError(
f"Array has shape {x.shape} but out-of-bounds "
f"index {key} was requested."
)
elif isinstance(subkey, slice):
if length is not None:
# python3 friendly way to compute a slice length.
new_length = len(range(*subkey.indices(length)))
new_shape.append(new_length)
else:
new_shape.append(length)
else:
raise ValueError(
f"Unsupported key type for array slice. Recieved: `{key}`"
)
return KerasTensor(tuple(new_shape), dtype=x.dtype)
@keras_core_export(["keras_core.ops.get_item", "keras_core.ops.numpy.get_item"])
def get_item(x, key):
"""Return `x[key]`."""
if any_symbolic_tensors((x,)):
return GetItem().symbolic_call(x, key)
return x[key]
class Greater(Operation):
def call(self, x1, x2):
return backend.numpy.greater(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.greater", "keras_core.ops.numpy.greater"])
def greater(x1, x2):
"""Return the truth value of `x1 > x2` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Greater().symbolic_call(x1, x2)
return backend.numpy.greater(x1, x2)
class GreaterEqual(Operation):
def call(self, x1, x2):
return backend.numpy.greater_equal(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
[
"keras_core.ops.greater_equal",
"keras_core.ops.numpy.greater_equal",
]
)
def greater_equal(x1, x2):
"""Return the truth value of `x1 >= x2` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return GreaterEqual().symbolic_call(x1, x2)
return backend.numpy.greater_equal(x1, x2)
class Hstack(Operation):
def call(self, xs):
return backend.numpy.hstack(xs)
def compute_output_spec(self, xs):
first_shape = xs[0].shape
total_size_on_axis = 0
for x in xs:
if not shape_equal(x.shape, first_shape, axis=[1], allow_none=True):
raise ValueError(
"Every value in `xs` must have the same shape except on "
f"the `axis` dim. But found element of shape {x.shape}, "
f"which is different from the first element's "
f"shape {first_shape}."
)
if total_size_on_axis is None or x.shape[1] is None:
total_size_on_axis = None
else:
total_size_on_axis += x.shape[1]
output_shape = list(first_shape)
output_shape[1] = total_size_on_axis
return KerasTensor(output_shape)
@keras_core_export(["keras_core.ops.hstack", "keras_core.ops.numpy.hstack"])
def hstack(xs):
"""Stack tensors in sequence horizontally (column wise).
This is equivalent to concatenation along the first axis for 1-D tensors,
and along the second axis for all other tensors.
Args:
xs: Sequence of tensors.
Returns:
The tensor formed by stacking the given tensors.
"""
if any_symbolic_tensors((xs,)):
return Hstack().symbolic_call(xs)
return backend.numpy.hstack(xs)
class Identity(Operation):
def call(self, n, dtype="float32"):
return backend.numpy.identity(n, dtype=dtype)
def compute_output_spec(self, n, dtype="float32"):
return KerasTensor([n, n], dtype=dtype)
@keras_core_export(["keras_core.ops.identity", "keras_core.ops.numpy.identity"])
def identity(n, dtype="float32"):
"""Return the identity tensor.
The identity tensor is a square tensor with ones on the main diagonal and
zeros elsewhere.
Args:
n: Number of rows (and columns) in the `n x n` output tensor.
dtype: Data type of the output tensor.
Returns:
The identity tensor.
"""
return backend.numpy.identity(n, dtype=dtype)
class Imag(Operation):
def call(self, x):
return backend.numpy.imag(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.imag", "keras_core.ops.numpy.imag"])
def imag(x):
"""Return the imaginary part of the complex argument.
Args:
x: Input tensor.
Returns:
The imaginary component of the complex argument.
"""
if any_symbolic_tensors((x,)):
return Imag().symbolic_call(x)
return backend.numpy.imag(x)
class Isclose(Operation):
def call(self, x1, x2):
return backend.numpy.isclose(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.isclose", "keras_core.ops.numpy.isclose"])
def isclose(x1, x2):
"""Return whether two tensors are element-wise almost equal.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x1, x2)):
return Isclose().symbolic_call(x1, x2)
return backend.numpy.isclose(x1, x2)
class Isfinite(Operation):
def call(self, x):
return backend.numpy.isfinite(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype="bool")
@keras_core_export(["keras_core.ops.isfinite", "keras_core.ops.numpy.isfinite"])
def isfinite(x):
"""Return whether a tensor is finite, element-wise.
Real values are finite when they are not NaN, not positive infinity, and
not negative infinity. Complex values are finite when both their real
and imaginary parts are finite.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x,)):
return Isfinite().symbolic_call(x)
return backend.numpy.isfinite(x)
class Isinf(Operation):
def call(self, x):
return backend.numpy.isinf(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype="bool")
@keras_core_export(["keras_core.ops.isinf", "keras_core.ops.numpy.isinf"])
def isinf(x):
"""Test element-wise for positive or negative infinity.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x,)):
return Isinf().symbolic_call(x)
return backend.numpy.isinf(x)
class Isnan(Operation):
def call(self, x):
return backend.numpy.isnan(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype="bool")
@keras_core_export(["keras_core.ops.isnan", "keras_core.ops.numpy.isnan"])
def isnan(x):
"""Test element-wise for NaN and return result as a boolean tensor.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x,)):
return Isnan().symbolic_call(x)
return backend.numpy.isnan(x)
class Less(Operation):
def call(self, x1, x2):
return backend.numpy.less(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.less", "keras_core.ops.numpy.less"])
def less(x1, x2):
"""Return the truth value of `x1 < x2` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Less().symbolic_call(x1, x2)
return backend.numpy.less(x1, x2)
class LessEqual(Operation):
def call(self, x1, x2):
return backend.numpy.less_equal(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
[
"keras_core.ops.less_equal",
"keras_core.ops.numpy.less_equal",
]
)
def less_equal(x1, x2):
"""Return the truth value of `x1 <= x2` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return LessEqual().symbolic_call(x1, x2)
return backend.numpy.less_equal(x1, x2)
class Linspace(Operation):
def __init__(
self, num=50, endpoint=True, retstep=False, dtype=float, axis=0
):
super().__init__()
self.num = num
self.endpoint = endpoint
self.retstep = retstep
self.dtype = dtype
self.axis = axis
def call(self, start, stop):
return backend.numpy.linspace(
start,
stop,
num=self.num,
endpoint=self.endpoint,
retstep=self.retstep,
dtype=self.dtype,
axis=self.axis,
)
def compute_output_spec(self, start, stop):
start_shape = getattr(start, "shape", [])
stop_shape = getattr(stop, "shape", [])
output_shape = broadcast_shapes(start_shape, stop_shape)
if self.axis == -1:
output_shape = output_shape + [self.num]
elif self.axis >= 0:
output_shape = (
output_shape[: self.axis]
+ [self.num]
+ output_shape[self.axis :]
)
else:
output_shape = (
output_shape[: self.axis + 1]
+ [self.num]
+ output_shape[self.axis + 1 :]
)
dtype = self.dtype if self.dtype is not None else start.dtype
if self.retstep:
return (KerasTensor(output_shape, dtype=dtype), None)
return KerasTensor(output_shape, dtype=dtype)
@keras_core_export(["keras_core.ops.linspace", "keras_core.ops.numpy.linspace"])
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
"""Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the interval
`[start, stop]`.
The endpoint of the interval can optionally be excluded.
Args:
start: The starting value of the sequence.
stop: The end value of the sequence, unless `endpoint` is set to
`False`. In that case, the sequence consists of all but the last
of `num + 1` evenly spaced samples, so that `stop` is excluded.
Note that the step size changes when `endpoint` is `False`.
num: Number of samples to generate. Defaults to `50`. Must be
non-negative.
endpoint: If `True`, `stop` is the last sample. Otherwise, it is
not included. Defaults to`True`.
retstep: If `True`, return `(samples, step)`, where `step` is the
spacing between samples.
dtype: The type of the output tensor.
axis: The axis in the result to store the samples. Relevant only if
start or stop are array-like. Defaults to `0`.
Note:
Torch backend does not support `axis` argument.
Returns:
A tensor of evenly spaced numbers.
If `retstep` is `True`, returns `(samples, step)`
"""
if any_symbolic_tensors((start, stop)):
return Linspace(num, endpoint, retstep, dtype, axis)(start, stop)
return backend.numpy.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
class Log(Operation):
def call(self, x):
return backend.numpy.log(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.log", "keras_core.ops.numpy.log"])
def log(x):
"""Natural logarithm, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise natural logarithm of `x`.
"""
if any_symbolic_tensors((x,)):
return Log().symbolic_call(x)
return backend.numpy.log(x)
class Log10(Operation):
def call(self, x):
return backend.numpy.log10(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.log10", "keras_core.ops.numpy.log10"])
def log10(x):
"""Return the base 10 logarithm of the input tensor, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise base 10 logarithm of `x`.
"""
if any_symbolic_tensors((x,)):
return Log10().symbolic_call(x)
return backend.numpy.log10(x)
class Log1p(Operation):
def call(self, x):
return backend.numpy.log1p(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.log1p", "keras_core.ops.numpy.log1p"])
def log1p(x):
"""Returns the natural logarithm of one plus the `x`, element-wise.
Calculates `log(1 + x)`.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise natural logarithm of `1 + x`.
"""
if any_symbolic_tensors((x,)):
return Log1p().symbolic_call(x)
return backend.numpy.log1p(x)
class Log2(Operation):
def call(self, x):
return backend.numpy.log2(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.log2", "keras_core.ops.numpy.log2"])
def log2(x):
"""Base-2 logarithm of `x`, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise base-2 logarithm of `x`.
"""
if any_symbolic_tensors((x,)):
return Log2().symbolic_call(x)
return backend.numpy.log2(x)
class Logaddexp(Operation):
def call(self, x1, x2):
return backend.numpy.logaddexp(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
["keras_core.ops.logaddexp", "keras_core.ops.numpy.logaddexp"]
)
def logaddexp(x1, x2):
"""Logarithm of the sum of exponentiations of the inputs.
Calculates `log(exp(x1) + exp(x2))`.
Args:
x1: Input tensor.
x2: Input tensor.
Returns:
Output tensor, element-wise logarithm of the sum of exponentiations
of the inputs.
"""
if any_symbolic_tensors((x1, x2)):
return Logaddexp().symbolic_call(x1, x2)
return backend.numpy.logaddexp(x1, x2)
class LogicalAnd(Operation):
def call(self, x1, x2):
return backend.numpy.logical_and(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
[
"keras_core.ops.logical_and",
"keras_core.ops.numpy.logical_and",
]
)
def logical_and(x1, x2):
"""Computes the element-wise logical AND of the given input tensors.
Zeros are treated as `False` and non-zeros are treated as `True`.
Args:
x1: Input tensor.
x2: Input tensor.
Returns:
Output tensor, element-wise logical AND of the inputs.
"""
if any_symbolic_tensors((x1, x2)):
return LogicalAnd().symbolic_call(x1, x2)
return backend.numpy.logical_and(x1, x2)
class LogicalNot(Operation):
def call(self, x):
return backend.numpy.logical_not(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(
[
"keras_core.ops.logical_not",
"keras_core.ops.numpy.logical_not",
]
)
def logical_not(x):
"""Computes the element-wise NOT of the given input tensor.
Zeros are treated as `False` and non-zeros are treated as `True`.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise logical NOT of the input.
"""
if any_symbolic_tensors((x,)):
return LogicalNot().symbolic_call(x)
return backend.numpy.logical_not(x)
class LogicalOr(Operation):
def call(self, x1, x2):
return backend.numpy.logical_or(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
[
"keras_core.ops.logical_or",
"keras_core.ops.numpy.logical_or",
]
)
def logical_or(x1, x2):
"""Computes the element-wise logical OR of the given input tensors.
Zeros are treated as `False` and non-zeros are treated as `True`.
Args:
x1: Input tensor.
x2: Input tensor.
Returns:
Output tensor, element-wise logical OR of the inputs.
"""
if any_symbolic_tensors((x1, x2)):
return LogicalOr().symbolic_call(x1, x2)
return backend.numpy.logical_or(x1, x2)
class Logspace(Operation):
def __init__(self, num=50, endpoint=True, base=10, dtype=float, axis=0):
super().__init__()
self.num = num
self.endpoint = endpoint
self.base = base
self.dtype = dtype
self.axis = axis
def call(self, start, stop):
return backend.numpy.logspace(
start,
stop,
num=self.num,
endpoint=self.endpoint,
base=self.base,
dtype=self.dtype,
axis=self.axis,
)
def compute_output_spec(self, start, stop):
start_shape = getattr(start, "shape", [])
stop_shape = getattr(stop, "shape", [])
output_shape = broadcast_shapes(start_shape, stop_shape)
if self.axis == -1:
output_shape = output_shape + [self.num]
elif self.axis >= 0:
output_shape = (
output_shape[: self.axis]
+ [self.num]
+ output_shape[self.axis :]
)
else:
output_shape = (
output_shape[: self.axis + 1]
+ [self.num]
+ output_shape[self.axis + 1 :]
)
dtype = self.dtype if self.dtype is not None else start.dtype
return KerasTensor(output_shape, dtype=dtype)
@keras_core_export(["keras_core.ops.logspace", "keras_core.ops.numpy.logspace"])
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
"""Returns numbers spaced evenly on a log scale.
In linear space, the sequence starts at `base ** start` and ends with
`base ** stop` (see `endpoint` below).
Args:
start: The starting value of the sequence.
stop: The final value of the sequence, unless `endpoint` is `False`.
In that case, `num + 1` values are spaced over the interval in
log-space, of which all but the last (a sequence of length `num`)
are returned.
num: Number of samples to generate. Defaults to `50`.
endpoint: If `True`, `stop` is the last sample. Otherwise, it is not
included. Defaults to`True`.
base: The base of the log space. Defaults to `10`.
dtype: The type of the output tensor.
axis: The axis in the result to store the samples. Relevant only
if start or stop are array-like.
Note:
Torch backend does not support `axis` argument.
Returns:
A tensor of evenly spaced samples on a log scale.
"""
if any_symbolic_tensors((start, stop)):
return Logspace(num, endpoint, base, dtype, axis)(start, stop)
return backend.numpy.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
class Matmul(Operation):
def call(self, x1, x2):
return backend.numpy.matmul(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
if len(x1_shape) == 1:
x1_shape = (1, x1_shape[0])
if len(x2_shape) == 1:
x2_shape = (x2_shape[0], 1)
if (
x1_shape[-1] is not None
and x2_shape[-2] is not None
and x1_shape[-1] != x2_shape[-2]
):
raise ValueError(
"Inner dimensions (`x1.shape[-1]` and `x2.shape[-2]`) must be "
f"equal, but received `x1.shape={x1.shape}` and "
f"`x2.shape={x2.shape}`."
)
leading_shape = broadcast_shapes(x1_shape[:-2], x2_shape[:-2])
last_2_dims_shape = [x1_shape[-2], x2_shape[-1]]
output_shape = leading_shape + last_2_dims_shape
if len(x1.shape) == 1:
del output_shape[-2]
if len(x2.shape) == 1:
del output_shape[-1]
x1_sparse = getattr(x1, "sparse", True)
x2_sparse = getattr(x2, "sparse", True)
output_sparse = x1_sparse and x2_sparse
return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse)
@keras_core_export(["keras_core.ops.matmul", "keras_core.ops.numpy.matmul"])
def matmul(x1, x2):
"""Matrix product of two tensors.
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If either tensor is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first tensor is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended
1 is removed.
- If the second tensor is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
Args:
x1: First tensor.
x2: Second tensor.
Returns:
Output tensor, matrix product of the inputs.
"""
if any_symbolic_tensors((x1, x2)):
return Matmul().symbolic_call(x1, x2)
# The below conversion works around an outstanding JAX bug.
x1 = backend.convert_to_tensor(x1)
x2 = backend.convert_to_tensor(x2)
return backend.numpy.matmul(x1, x2)
class Max(Operation):
def __init__(self, axis=None, keepdims=False, initial=None):
super().__init__()
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
self.initial = initial
def call(self, x):
return backend.numpy.max(
x, axis=self.axis, keepdims=self.keepdims, initial=self.initial
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_core_export(["keras_core.ops.max", "keras_core.ops.numpy.max"])
def max(x, axis=None, keepdims=False, initial=None):
"""Return the maximum of a tensor or maximum along an axis.
Args:
x: Input tensor.
axis: Axis or axes along which to operate. By default, flattened input
is used.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one. Defaults to`False`.
initial: The minimum value of an output element. Defaults to`None`.
Returns:
Maximum of `x`.
"""
if any_symbolic_tensors((x,)):
return Max(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(
x
)
return backend.numpy.max(x, axis=axis, keepdims=keepdims, initial=initial)
class Maximum(Operation):
def call(self, x1, x2):
return backend.numpy.maximum(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
x1_sparse = getattr(x1, "sparse", True)
x2_sparse = getattr(x2, "sparse", True)
output_sparse = x1_sparse and x2_sparse
return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse)
@keras_core_export(["keras_core.ops.maximum", "keras_core.ops.numpy.maximum"])
def maximum(x1, x2):
"""Element-wise maximum of `x1` and `x2`.
Args:
x1: First tensor.
x2: Second tensor.
Returns:
Output tensor, element-wise maximum of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Maximum().symbolic_call(x1, x2)
return backend.numpy.maximum(x1, x2)
class Meshgrid(Operation):
def __init__(self, indexing="xy"):
super().__init__()
if indexing not in ("xy", "ij"):
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij', "
"but received {index}."
)
self.indexing = indexing
def call(self, *x):
return backend.numpy.meshgrid(*x, indexing=self.indexing)
def compute_output_spec(self, *x):
output_shape = []
for xi in x:
if len(xi.shape) == 0:
size = 1
else:
if None in xi.shape:
size = None
else:
size = int(np.prod(xi.shape))
output_shape.append(size)
if self.indexing == "ij":
return [KerasTensor(output_shape) for _ in range(len(x))]
tmp = output_shape[0]
output_shape[0] = output_shape[1]
output_shape[1] = tmp
return [KerasTensor(output_shape) for _ in range(len(x))]
@keras_core_export(["keras_core.ops.meshgrid", "keras_core.ops.numpy.meshgrid"])
def meshgrid(*x, indexing="xy"):
"""Creates grids of coordinates from coordinate vectors.
Given `N` 1-D tensors `T0, T1, ..., TN-1` as inputs with corresponding
lengths `S0, S1, ..., SN-1`, this creates an `N` N-dimensional tensors
`G0, G1, ..., GN-1` each with shape `(S0, ..., SN-1)` where the output
`Gi` is constructed by expanding `Ti` to the result shape.
Args:
x: 1-D tensors representing the coordinates of a grid.
indexing: Cartesian (`"xy"`, default) or matrix (`"ij"`) indexing
of output.
Returns:
Sequence of N tensors.
Example:
>>> from keras_core import ops
>>> x = ops.array([1, 2, 3])
>>> y = ops.array([4, 5, 6])
>>> grid_x, grid_y = ops.meshgrid(x, y, indexing="ij")
>>> grid_x
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> grid_y
array([[4, 5, 6],
[4, 5, 6],
[4, 5, 6]])
"""
if any_symbolic_tensors(x):
return Meshgrid(indexing=indexing).symbolic_call(*x)
return backend.numpy.meshgrid(*x, indexing=indexing)
class Min(Operation):
def __init__(self, axis=None, keepdims=False, initial=None):
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
self.initial = initial
def call(self, x):
return backend.numpy.min(
x, axis=self.axis, keepdims=self.keepdims, initial=self.initial
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_core_export(["keras_core.ops.min", "keras_core.ops.numpy.min"])
def min(x, axis=None, keepdims=False, initial=None):
"""Return the minimum of a tensor or minimum along an axis.
Args:
x: Input tensor.
axis: Axis or axes along which to operate. By default, flattened input
is used.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one. Defaults to`False`.
initial: The maximum value of an output element. Defaults to`None`.
Returns:
Minimum of `x`.
"""
if any_symbolic_tensors((x,)):
return Min(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(
x
)
return backend.numpy.min(x, axis=axis, keepdims=keepdims, initial=initial)
class Minimum(Operation):
def call(self, x1, x2):
return backend.numpy.minimum(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
x1_sparse = getattr(x1, "sparse", True)
x2_sparse = getattr(x2, "sparse", True)
output_sparse = x1_sparse and x2_sparse
return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse)
@keras_core_export(["keras_core.ops.minimum", "keras_core.ops.numpy.minimum"])
def minimum(x1, x2):
"""Element-wise minimum of `x1` and `x2`.
Args:
x1: First tensor.
x2: Second tensor.
Returns:
Output tensor, element-wise minimum of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Minimum().symbolic_call(x1, x2)
return backend.numpy.minimum(x1, x2)
class Mod(Operation):
def call(self, x1, x2):
return backend.numpy.mod(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.mod", "keras_core.ops.numpy.mod"])
def mod(x1, x2):
"""Returns the element-wise remainder of division.
Args:
x1: First tensor.
x2: Second tensor.
Returns:
Output tensor, element-wise remainder of division.
"""
if any_symbolic_tensors((x1, x2)):
return Mod().symbolic_call(x1, x2)
return backend.numpy.mod(x1, x2)
class Moveaxis(Operation):
def __init__(self, source, destination):
super().__init__()
if isinstance(source, int):
self.source = [source]
else:
self.source = source
if isinstance(destination, int):
self.destination = [destination]
else:
self.destination = destination
if len(self.source) != len(self.destination):
raise ValueError(
"`source` and `destination` arguments must have the same "
f"number of elements, but received `source={source}` and "
f"`destination={destination}`."
)
def call(self, x):
return backend.numpy.moveaxis(x, self.source, self.destination)
def compute_output_spec(self, x):
x_shape = list(x.shape)
output_shape = [-1 for _ in range(len(x.shape))]
for sc, dst in zip(self.source, self.destination):
output_shape[dst] = x_shape[sc]
x_shape[sc] = -1
i, j = 0, 0
while i < len(output_shape):
while i < len(output_shape) and output_shape[i] != -1:
# Find the first dim unset.
i += 1
while j < len(output_shape) and x_shape[j] == -1:
# Find the first dim not being passed.
j += 1
if i == len(output_shape):
break
output_shape[i] = x_shape[j]
i += 1
j += 1
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.moveaxis", "keras_core.ops.numpy.moveaxis"])
def moveaxis(x, source, destination):
"""Move axes of a tensor to new positions.
Other axes remain in their original order.
Args:
x: Tensor whose axes should be reordered.
source: Original positions of the axes to move. These must be unique.
destination: Destinations positions for each of the original axes.
These must also be unique.
Returns:
Tensor with moved axes.
"""
if any_symbolic_tensors((x,)):
return Moveaxis(source, destination).symbolic_call(x)
return backend.numpy.moveaxis(x, source=source, destination=destination)
class NanToNum(Operation):
def call(self, x):
return backend.numpy.nan_to_num(x)
@keras_core_export(
[
"keras_core.ops.nan_to_num",
"keras_core.ops.numpy.nan_to_num",
]
)
def nan_to_num(x):
"""Replace NaN with zero and infinity with large finite numbers.
Args:
x: Input data.
Returns:
`x`, with non-finite values replaced.
"""
return backend.numpy.nan_to_num(x)
class Ndim(Operation):
def call(self, x):
return backend.numpy.ndim(
x,
)
def compute_output_spec(self, x):
return KerasTensor([len(x.shape)])
@keras_core_export(["keras_core.ops.ndim", "keras_core.ops.numpy.ndim"])
def ndim(x):
"""Return the number of dimensions of a tensor.
Args:
x: Input tensor.
Returns:
The number of dimensions in `x`.
"""
if any_symbolic_tensors((x,)):
return Ndim().symbolic_call(x)
return backend.numpy.ndim(x)
class Nonzero(Operation):
def call(self, x):
return backend.numpy.nonzero(x)
@keras_core_export(["keras_core.ops.nonzero", "keras_core.ops.numpy.nonzero"])
def nonzero(x):
"""Return the indices of the elements that are non-zero.
Args:
x: Input tensor.
Returns:
Indices of elements that are non-zero.
"""
return backend.numpy.nonzero(x)
class NotEqual(Operation):
def call(self, x1, x2):
return backend.numpy.not_equal(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
["keras_core.ops.not_equal", "keras_core.ops.numpy.not_equal"]
)
def not_equal(x1, x2):
"""Return `(x1 != x2)` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparsion of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return NotEqual().symbolic_call(x1, x2)
return backend.numpy.not_equal(x1, x2)
class OnesLike(Operation):
def call(self, x, dtype=None):
return backend.numpy.ones_like(x, dtype=dtype)
def compute_output_spec(self, x, dtype=None):
if dtype is None:
dtype = x.dtype
return KerasTensor(x.shape, dtype=dtype)
@keras_core_export(
["keras_core.ops.ones_like", "keras_core.ops.numpy.ones_like"]
)
def ones_like(x, dtype=None):
"""Return a tensor of ones with the same shape and type of `x`.
Args:
x: Input tensor.
dtype: Overrides the data type of the result.
Returns:
A tensor of ones with the same shape and type as `x`.
"""
if any_symbolic_tensors((x,)):
return OnesLike().symbolic_call(x, dtype=dtype)
return backend.numpy.ones_like(x, dtype=dtype)
class ZerosLike(Operation):
def call(self, x, dtype=None):
return backend.numpy.zeros_like(x, dtype=dtype)
def compute_output_spec(self, x, dtype=None):
if dtype is None:
dtype = x.dtype
return KerasTensor(x.shape, dtype=dtype)
@keras_core_export(
[
"keras_core.ops.zeros_like",
"keras_core.ops.numpy.zeros_like",
]
)
def zeros_like(x, dtype=None):
"""Return a tensor of zeros with the same shape and type as `x`.
Args:
x: Input tensor.
dtype: Overrides the data type of the result.
Returns:
A tensor of zeros with the same shape and type as `x`.
"""
if any_symbolic_tensors((x,)):
return ZerosLike().symbolic_call(x, dtype=dtype)
return backend.numpy.zeros_like(x, dtype=dtype)
class Outer(Operation):
def call(self, x1, x2):
return backend.numpy.outer(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [1])
x2_shape = getattr(x2, "shape", [1])
if None in x1_shape:
x1_flatten_shape = None
else:
x1_flatten_shape = int(np.prod(x1_shape))
if None in x2_shape:
x2_flatten_shape = None
else:
x2_flatten_shape = int(np.prod(x2_shape))
output_shape = [x1_flatten_shape, x2_flatten_shape]
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.outer", "keras_core.ops.numpy.outer"])
def outer(x1, x2):
"""Compute the outer product of two vectors.
Given two vectors `x1` and `x2`, the outer product is:
```
out[i, j] = x1[i] * x2[j]
```
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Outer product of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Outer().symbolic_call(x1, x2)
return backend.numpy.outer(x1, x2)
class Pad(Operation):
def __init__(self, pad_width, mode="constant"):
super().__init__()
self.pad_width = self._process_pad_width(pad_width)
self.mode = mode
def _process_pad_width(self, pad_width):
if isinstance(pad_width, int):
return ((pad_width, pad_width),)
if isinstance(pad_width, (tuple, list)) and isinstance(
pad_width[0], int
):
return (pad_width,)
first_len = len(pad_width[0])
for i, pw in enumerate(pad_width):
if len(pw) != first_len:
raise ValueError(
"`pad_width` should be a list of tuples of length 2 or "
f"1, but received {pad_width}."
)
if len(pw) == 1:
pad_width[i] = (pw[0], pw[0])
return pad_width
def call(self, x):
return backend.numpy.pad(x, pad_width=self.pad_width, mode=self.mode)
def compute_output_spec(self, x):
output_shape = list(x.shape)
if len(self.pad_width) == 1:
pad_width = [self.pad_width[0] for _ in range(len(output_shape))]
elif len(self.pad_width) == len(output_shape):
pad_width = self.pad_width
else:
raise ValueError(
"`pad_width` must have the same length as `x.shape`, but "
f"received {len(self.pad_width)} and {len(x.shape)}."
)
for i in range(len(output_shape)):
if output_shape[i] is None:
output_shape[i] = None
else:
output_shape[i] += pad_width[i][0] + pad_width[i][1]
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.pad", "keras_core.ops.numpy.pad"])
def pad(x, pad_width, mode="constant"):
"""Pad a tensor.
Args:
x: Tensor to pad.
pad_width: Number of values padded to the edges of each axis.
`((before_1, after_1), ...(before_N, after_N))` unique pad
widths for each axis.
`((before, after),)` yields same before and after pad for
each axis.
`(pad,)` or `int` is a shortcut for `before = after = pad`
width for all axes.
mode: One of `"constant"`, `"edge"`, `"linear_ramp"`,
`"maximum"`, `"mean"`, `"median"`, `"minimum"`,
`"reflect"`, `"symmetric"`, `"wrap"`, `"empty"`,
`"circular"`. Defaults to`"constant"`.
Note:
Torch backend only supports modes `"constant"`, `"reflect"`,
`"symmetric"` and `"circular"`.
Only Torch backend supports `"circular"` mode.
Note:
Tensorflow backend only supports modes `"constant"`, `"reflect"`
and `"symmetric"`.
Returns:
Padded tensor.
"""
if any_symbolic_tensors((x,)):
return Pad(pad_width, mode=mode).symbolic_call(x)
return backend.numpy.pad(x, pad_width, mode=mode)
class Prod(Operation):
def __init__(self, axis=None, keepdims=False, dtype=None):
super().__init__()
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
self.dtype = dtype
def call(self, x):
return backend.numpy.prod(
x,
axis=self.axis,
keepdims=self.keepdims,
dtype=self.dtype,
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=self.dtype,
)
@keras_core_export(["keras_core.ops.prod", "keras_core.ops.numpy.prod"])
def prod(x, axis=None, keepdims=False, dtype=None):
"""Return the product of tensor elements over a given axis.
Args:
x: Input tensor.
axis: Axis or axes along which a product is performed. The default,
`axis=None`, will compute the product of all elements
in the input tensor.
keepdims: If this is set to `True`, the axes which are reduce
are left in the result as dimensions with size one.
dtype: Data type of the returned tensor.
Returns:
Product of elements of `x` over the given axis or axes.
"""
if any_symbolic_tensors((x,)):
return Prod(axis=axis, keepdims=keepdims, dtype=dtype).symbolic_call(x)
return backend.numpy.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
class Ravel(Operation):
def call(self, x):
return backend.numpy.ravel(x)
def compute_output_spec(self, x):
if None in x.shape:
output_shape = [
None,
]
else:
output_shape = [int(np.prod(x.shape))]
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.ravel", "keras_core.ops.numpy.ravel"])
def ravel(x):
"""Return a contiguous flattened tensor.
A 1-D tensor, containing the elements of the input, is returned.
Args:
x: Input tensor.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Ravel().symbolic_call(x)
return backend.numpy.ravel(x)
class Real(Operation):
def call(self, x):
return backend.numpy.real(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape)
@keras_core_export(["keras_core.ops.real", "keras_core.ops.numpy.real"])
def real(x):
"""Return the real part of the complex argument.
Args:
x: Input tensor.
Returns:
The real component of the complex argument.
"""
if any_symbolic_tensors((x,)):
return Real().symbolic_call(x)
return backend.numpy.real(x)
class Reciprocal(Operation):
def call(self, x):
return backend.numpy.reciprocal(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape)
@keras_core_export(
[
"keras_core.ops.reciprocal",
"keras_core.ops.numpy.reciprocal",
]
)
def reciprocal(x):
"""Return the reciprocal of the argument, element-wise.
Calculates `1/x`.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise reciprocal of `x`.
"""
if any_symbolic_tensors((x,)):
return Reciprocal().symbolic_call(x)
return backend.numpy.reciprocal(x)
class Repeat(Operation):
def __init__(self, repeats, axis=None):
super().__init__()
self.axis = axis
self.repeats = repeats
def call(self, x):
return backend.numpy.repeat(x, self.repeats, axis=self.axis)
def compute_output_spec(self, x):
x_shape = list(x.shape)
if self.axis is None:
if None in x_shape:
return KerasTensor([None], dtype=x.dtype)
x_flatten_size = int(np.prod(x_shape))
if isinstance(self.repeats, int):
output_shape = [x_flatten_size * self.repeats]
else:
output_shape = [int(np.sum(self.repeats))]
return KerasTensor(output_shape, dtype=x.dtype)
size_on_ax = x_shape[self.axis]
output_shape = x_shape
if isinstance(self.repeats, int):
if size_on_ax is None:
output_shape[self.axis] = None
else:
output_shape[self.axis] = size_on_ax * self.repeats
else:
output_shape[self.axis] = int(np.sum(self.repeats))
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.repeat", "keras_core.ops.numpy.repeat"])
def repeat(x, repeats, axis=None):
"""Repeat each element of a tensor after themselves.
Args:
x: Input tensor.
repeats: The number of repetitions for each element.
axis: The axis along which to repeat values. By default, use
the flattened input array, and return a flat output array.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Repeat(repeats, axis=axis).symbolic_call(x)
return backend.numpy.repeat(x, repeats, axis=axis)
class Reshape(Operation):
def __init__(self, new_shape):
super().__init__()
self.new_shape = new_shape
def call(self, x):
return backend.numpy.reshape(x, self.new_shape)
def compute_output_spec(self, x):
output_shape = operation_utils.compute_reshape_output_shape(
x.shape, self.new_shape, "new_shape"
)
return KerasTensor(output_shape, dtype=x.dtype, sparse=x.sparse)
@keras_core_export(["keras_core.ops.reshape", "keras_core.ops.numpy.reshape"])
def reshape(x, new_shape):
"""Gives a new shape to a tensor without changing its data.
Args:
x: Input tensor.
new_shape: The new shape should be compatible with the original shape.
One shape dimension can be -1 in which case the value is
inferred from the length of the array and remaining dimensions.
Returns:
The reshaped tensor.
"""
if any_symbolic_tensors((x,)):
return Reshape(new_shape).symbolic_call(x)
return backend.numpy.reshape(x, new_shape)
class Roll(Operation):
def __init__(self, shift, axis=None):
super().__init__()
self.shift = shift
self.axis = axis
def call(self, x):
return backend.numpy.roll(x, self.shift, self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.roll", "keras_core.ops.numpy.roll"])
def roll(x, shift, axis=None):
"""Roll tensor elements along a given axis.
Elements that roll beyond the last position are re-introduced at the first.
Args:
x: Input tensor.
shift: The number of places by which elements are shifted.
axis: The axis along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Roll(shift, axis=axis).symbolic_call(x)
return backend.numpy.roll(x, shift, axis=axis)
class Round(Operation):
def __init__(self, decimals=0):
super().__init__()
self.decimals = decimals
def call(self, x):
return backend.numpy.round(x, self.decimals)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.round", "keras_core.ops.numpy.round"])
def round(x, decimals=0):
"""Evenly round to the given number of decimals.
Args:
x: Input tensor.
decimals: Number of decimal places to round to. Defaults to `0`.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x,)):
return Round(decimals).symbolic_call(x)
return backend.numpy.round(x, decimals)
class Sign(Operation):
def call(self, x):
return backend.numpy.sign(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype="int32")
@keras_core_export(["keras_core.ops.sign", "keras_core.ops.numpy.sign"])
def sign(x):
"""Returns a tensor with the signs of the elements of `x`.
Args:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Sign().symbolic_call(x)
return backend.numpy.sign(x)
class Sin(Operation):
def call(self, x):
return backend.numpy.sin(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape)
@keras_core_export(["keras_core.ops.sin", "keras_core.ops.numpy.sin"])
def sin(x):
"""Trigonomeric sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Sin().symbolic_call(x)
return backend.numpy.sin(x)
class Sinh(Operation):
def call(self, x):
return backend.numpy.sinh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.sinh", "keras_core.ops.numpy.sinh"])
def sinh(x):
"""Hyperbolic sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Sinh().symbolic_call(x)
return backend.numpy.sinh(x)
class Size(Operation):
def call(self, x):
return backend.numpy.size(x)
def compute_output_spec(self, x):
return KerasTensor([], dtype="int32")
@keras_core_export(["keras_core.ops.size", "keras_core.ops.numpy.size"])
def size(x):
"""Return the number of elements in a tensor.
Args:
x: Input tensor.
Returns:
Number of elements in `x`.
"""
if any_symbolic_tensors((x,)):
return Size().symbolic_call(x)
return backend.numpy.size(x)
class Sort(Operation):
def __init__(self, axis=-1):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.sort(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, x.dtype)
@keras_core_export(["keras_core.ops.sort", "keras_core.ops.numpy.sort"])
def sort(x, axis=-1):
"""Sorts the elements of `x` along a given axis in ascending order.
Args:
x: Input tensor.
axis: Axis along which to sort. If `None`, the tensor is flattened
before sorting. Defaults to `-1`; the last axis.
Returns:
Sorted tensor.
"""
if any_symbolic_tensors((x,)):
return Sort(axis=axis).symbolic_call(x)
return backend.numpy.sort(x, axis=axis)
class Split(Operation):
def __init__(self, indices_or_sections, axis=0):
super().__init__()
if not isinstance(indices_or_sections, int):
indices_or_sections = tuple(indices_or_sections)
self.indices_or_sections = indices_or_sections
self.axis = axis
def call(self, x):
return backend.numpy.split(x, self.indices_or_sections, axis=self.axis)
def compute_output_spec(self, x):
x_shape = list(x.shape)
x_size_on_axis = x_shape[self.axis]
if isinstance(self.indices_or_sections, int):
if x_size_on_axis is None:
x_shape[self.axis] = None
return [
KerasTensor(x_shape, dtype=x.dtype)
for _ in range(self.indices_or_sections)
]
if np.mod(x_size_on_axis, self.indices_or_sections) != 0:
raise ValueError(
"`x` size on given `axis` must be dividible by "
"`indices_or_sections` when `indices_or_sections` is an "
f"int. But received {x_size_on_axis} and "
f"{self.indices_or_sections}."
)
size = x_size_on_axis // self.indices_or_sections
x_shape[self.axis] = size
return [
KerasTensor(x_shape, dtype=x.dtype)
for _ in range(self.indices_or_sections)
]
indices_or_sections = (0, *self.indices_or_sections, x_size_on_axis)
output_size = np.diff(indices_or_sections)
outputs = []
for i in range(len(output_size)):
output_shape = list(x_shape)
output_shape[self.axis] = int(output_size[i])
outputs.append(KerasTensor(output_shape, dtype=x.dtype))
return outputs
@keras_core_export(["keras_core.ops.split", "keras_core.ops.numpy.split"])
def split(x, indices_or_sections, axis=0):
"""Split a tensor into chunks.
Args:
x: Input tensor.
indices_or_sections: Either an integer indicating the number of
sections along `axis` or a list of integers indicating the indices
along `axis` at which the tensor is split.
indices_or_sections: If an integer, N, the tensor will be split into N
equal sections along `axis`. If a 1-D array of sorted integers,
the entries indicate indices at which the tensor will be split
along `axis`.
axis: Axis along which to split. Defaults to `0`.
Note:
A split does not have to result in equal division when using
Torch backend.
Returns:
A list of tensors.
"""
if any_symbolic_tensors((x,)):
return Split(indices_or_sections, axis=axis).symbolic_call(x)
return backend.numpy.split(x, indices_or_sections, axis=axis)
class Stack(Operation):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def call(self, xs):
return backend.numpy.stack(xs, axis=self.axis)
def compute_output_spec(self, xs):
first_shape = xs[0].shape
for x in xs:
if not shape_equal(x.shape, first_shape, axis=[], allow_none=True):
raise ValueError(
"Every value in `xs` must have the same shape. But found "
f"element of shape {x.shape}, which is different from the "
f"first element's shape {first_shape}."
)
size_on_axis = len(xs)
output_shape = list(first_shape)
if self.axis == -1:
output_shape = output_shape + [size_on_axis]
elif self.axis >= 0:
output_shape.insert(self.axis, size_on_axis)
else:
output_shape.insert(self.axis + 1, size_on_axis)
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.stack", "keras_core.ops.numpy.stack"])
def stack(x, axis=0):
"""Join a sequence of tensors along a new axis.
The `axis` parameter specifies the index of the new axis in the
dimensions of the result.
Args:
x: A sequence of tensors.
axis: Axis along which to stack. Defaults to `0`.
Returns:
The stacked tensor.
"""
if any_symbolic_tensors((x,)):
return Stack(axis=axis).symbolic_call(x)
return backend.numpy.stack(x, axis=axis)
class Std(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.std(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
)
@keras_core_export(["keras_core.ops.std", "keras_core.ops.numpy.std"])
def std(x, axis=None, keepdims=False):
"""Compute the standard deviation along the specified axis.
Args:
x: Input tensor.
axis: Axis along which to compute standard deviation.
Default is to compute the standard deviation of the
flattened tensor.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Returns:
Output tensor containing the standard deviation values.
"""
if any_symbolic_tensors((x,)):
return Std(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.std(x, axis=axis, keepdims=keepdims)
class Swapaxes(Operation):
def __init__(self, axis1, axis2):
super().__init__()
self.axis1 = axis1
self.axis2 = axis2
def call(self, x):
return backend.numpy.swapaxes(x, self.axis1, self.axis2)
def compute_output_spec(self, x):
x_shape = list(x.shape)
tmp = x_shape[self.axis1]
x_shape[self.axis1] = x_shape[self.axis2]
x_shape[self.axis2] = tmp
return KerasTensor(x_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.swapaxes", "keras_core.ops.numpy.swapaxes"])
def swapaxes(x, axis1, axis2):
"""Interchange two axes of a tensor.
Args:
x: Input tensor.
axis1: First axis.
axis2: Second axis.
Returns:
A tensor with the axes swapped.
"""
if any_symbolic_tensors((x,)):
return Swapaxes(axis1, axis2).symbolic_call(x)
return backend.numpy.swapaxes(x, axis1=axis1, axis2=axis2)
class Take(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x, indices):
return backend.numpy.take(x, indices, axis=self.axis)
def compute_output_spec(self, x, indices):
x_shape = list(x.shape)
if isinstance(indices, KerasTensor):
indices_shape = list(indices.shape)
else:
indices_shape = list(getattr(np.array(indices), "shape", []))
if self.axis is None:
return KerasTensor(indices_shape, dtype=x.dtype)
# make sure axis is non-negative
axis = len(x_shape) + self.axis if self.axis < 0 else self.axis
output_shape = x_shape[:axis] + indices_shape + x_shape[axis + 1 :]
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.take", "keras_core.ops.numpy.take"])
def take(x, indices, axis=None):
"""Take elements from a tensor along an axis.
Args:
x: Source tensor.
indices: The indices of the values to extract.
axis: The axis over which to select values. By default, the
flattened input tensor is used.
Returns:
The corresponding tensor of values.
"""
if any_symbolic_tensors((x, indices)):
return Take(axis=axis).symbolic_call(x, indices)
return backend.numpy.take(x, indices, axis=axis)
class TakeAlongAxis(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x, indices):
return backend.numpy.take_along_axis(x, indices, axis=self.axis)
def compute_output_spec(self, x, indices):
x_shape = list(x.shape)
indices_shape = list(indices.shape)
if self.axis is None:
x_shape = [None] if None in x_shape else [int(np.prod(x_shape))]
if len(x_shape) != len(indices_shape):
raise ValueError(
"`x` and `indices` must have the same number of dimensions, "
f"but receive shape {x_shape} and {indices_shape}."
)
del x_shape[self.axis]
del indices_shape[self.axis]
output_shape = broadcast_shapes(x_shape, indices_shape)
size_on_axis = indices.shape[self.axis]
if self.axis == -1:
output_shape = output_shape + [size_on_axis]
elif self.axis >= 0:
output_shape.insert(self.axis, size_on_axis)
else:
output_shape.insert(self.axis + 1, size_on_axis)
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(
[
"keras_core.ops.take_along_axis",
"keras_core.ops.numpy.take_along_axis",
]
)
def take_along_axis(x, indices, axis=None):
"""Select values from `x` at the 1-D `indices` along the given axis.
Args:
x: Source tensor.
indices: The indices of the values to extract.
axis: The axis over which to select values. By default, the flattened
input tensor is used.
Returns:
The corresponding tensor of values.
"""
if any_symbolic_tensors((x, indices)):
return TakeAlongAxis(axis=axis).symbolic_call(x, indices)
return backend.numpy.take_along_axis(x, indices, axis=axis)
class Tan(Operation):
def call(self, x):
return backend.numpy.tan(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape)
@keras_core_export(["keras_core.ops.tan", "keras_core.ops.numpy.tan"])
def tan(x):
"""Compute tangent, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Tan().symbolic_call(x)
return backend.numpy.tan(x)
class Tanh(Operation):
def call(self, x):
return backend.numpy.tanh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.tanh", "keras_core.ops.numpy.tanh"])
def tanh(x):
"""Hyperbolic tangent, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Tanh().symbolic_call(x)
return backend.numpy.tanh(x)
class Tensordot(Operation):
def __init__(self, axes=2):
super().__init__()
self.axes = axes
def call(self, x1, x2):
return backend.numpy.tensordot(x1, x2, axes=self.axes)
def compute_output_spec(self, x1, x2):
x1_shape = list(getattr(x1, "shape", []))
x2_shape = list(getattr(x2, "shape", []))
if not isinstance(self.axes, int):
x1_select_shape = [x1_shape[ax] for ax in self.axes[0]]
x2_select_shape = [x2_shape[ax] for ax in self.axes[1]]
if not shape_equal(
x1_select_shape, x2_select_shape, allow_none=True
):
raise ValueError(
"Shape mismatch on `x1[axes[0]]` and `x2[axes[1]]`, "
f"received {x1_select_shape} and {x2_select_shape}."
)
for ax in self.axes[0]:
x1_shape[ax] = -1
for ax in self.axes[1]:
x2_shape[ax] = -1
x1_shape = list(filter((-1).__ne__, x1_shape))
x2_shape = list(filter((-1).__ne__, x2_shape))
output_shape = x1_shape + x2_shape
return KerasTensor(output_shape, dtype=x1.dtype)
if self.axes <= 0:
output_shape = x1_shape + x2_shape
else:
output_shape = x1_shape[: -self.axes] + x2_shape[self.axes :]
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
["keras_core.ops.tensordot", "keras_core.ops.numpy.tensordot"]
)
def tensordot(x1, x2, axes=2):
"""Compute the tensor dot product along specified axes.
Args:
x1: First tensor.
x2: Second tensor.
axes: - If an integer, N, sum over the last N axes of `x1` and the
first N axes of `x2` in order. The sizes of the corresponding
axes must match.
- Or, a list of axes to be summed over, first sequence applying
to `x1`, second to `x2`. Both sequences must be of the
same length.
Returns:
The tensor dot product of the inputs.
"""
if any_symbolic_tensors((x1, x2)):
return Tensordot(axes=axes).symbolic_call(x1, x2)
return backend.numpy.tensordot(x1, x2, axes=axes)
class Tile(Operation):
def __init__(self, repeats):
super().__init__()
self.repeats = repeats
def call(self, x):
return backend.numpy.tile(x, self.repeats)
def compute_output_spec(self, x):
x_shape = list(x.shape)
repeats = self.repeats
if len(x_shape) > len(repeats):
repeats = [1] * (len(x_shape) - len(repeats)) + repeats
else:
x_shape = [1] * (len(repeats) - len(x_shape)) + x_shape
output_shape = []
for x_size, repeat in zip(x_shape, repeats):
if x_size is None:
output_shape.append(None)
else:
output_shape.append(x_size * repeat)
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.tile", "keras_core.ops.numpy.tile"])
def tile(x, repeats):
"""Repeat `x` the number of times given by `repeats`.
If `repeats` has length `d`, the result will have dimension of
`max(d, x.ndim)`.
If `x.ndim < d`, `x` is promoted to be d-dimensional by prepending
new axes.
If `x.ndim > d`, `repeats` is promoted to `x.ndim` by prepending 1's to it.
Args:
x: Input tensor.
repeats: The number of repetitions of `x` along each axis.
Returns:
The tiled output tensor.
"""
if any_symbolic_tensors((x,)):
return Tile(
repeats,
).symbolic_call(x)
return backend.numpy.tile(x, repeats)
class Trace(Operation):
def __init__(self, offset=0, axis1=0, axis2=1):
super().__init__()
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def call(self, x):
return backend.numpy.trace(
x, offset=self.offset, axis1=self.axis1, axis2=self.axis2
)
def compute_output_spec(self, x):
x_shape = list(x.shape)
x_shape[self.axis1] = -1
x_shape[self.axis2] = -1
output_shape = list(filter((-1).__ne__, x_shape))
return KerasTensor(output_shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.trace", "keras_core.ops.numpy.trace"])
def trace(x, offset=0, axis1=0, axis2=1):
"""Return the sum along diagonals of the tensor.
If `x` is 2-D, the sum along its diagonal with the given offset is
returned, i.e., the sum of elements `x[i, i+offset]` for all `i`.
If a has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-arrays whose traces are
returned.
The shape of the resulting tensor is the same as that of `x` with `axis1`
and `axis2` removed.
Args:
x: Input tensor.
offset: Offset of the diagonal from the main diagonal. Can be
both positive and negative. Defaults to `0`.
axis1: Axis to be used as the first axis of the 2-D sub-arrays.
Defaults to `0`.(first axis).
axis2: Axis to be used as the second axis of the 2-D sub-arrays.
Defaults to `1` (second axis).
Returns:
If `x` is 2-D, the sum of the diagonal is returned. If `x` has
larger dimensions, then a tensor of sums along diagonals is
returned.
"""
if any_symbolic_tensors((x,)):
return Trace(offset, axis1, axis2).symbolic_call(x)
return backend.numpy.trace(x, offset=offset, axis1=axis1, axis2=axis2)
class Tri(Operation):
def call(self, N, M=None, k=0, dtype="float32"):
return backend.numpy.tri(N, M=M, k=k, dtype=dtype)
def compute_output_spec(self, N, M=None, k=0, dtype="float32"):
if M is None:
M = N
return KerasTensor((N, M), dtype=dtype)
@keras_core_export(["keras_core.ops.tri", "keras_core.ops.numpy.tri"])
def tri(N, M=None, k=0, dtype="float32"):
"""Return a tensor with ones at and below a diagonal and zeros elsewhere.
Args:
N: Number of rows in the tensor.
M: Number of columns in the tensor.
k: The sub-diagonal at and below which the array is filled.
`k = 0` is the main diagonal, while `k < 0` is below it, and
`k > 0` is above. The default is 0.
dtype: Data type of the returned tensor. The default is "float32".
Returns:
Tensor with its lower triangle filled with ones and zeros elsewhere.
`T[i, j] == 1` for `j <= i + k`, 0 otherwise.
"""
return backend.numpy.tri(N, M=M, k=k, dtype=dtype)
class Tril(Operation):
def __init__(self, k=0):
super().__init__()
self.k = k
def call(self, x):
return backend.numpy.tril(x, k=self.k)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.tril", "keras_core.ops.numpy.tril"])
def tril(x, k=0):
"""Return lower triangle of a tensor.
For tensors with `ndim` exceeding 2, `tril` will apply to the
final two axes.
Args:
x: Input tensor.
k: Diagonal above which to zero elements. Defaults to `0`. the
main diagonal. `k < 0` is below it, and `k > 0` is above it.
Returns:
Lower triangle of `x`, of same shape and data type as `x`.
"""
if any_symbolic_tensors((x,)):
return Tril(k=k).symbolic_call(x)
return backend.numpy.tril(x, k=k)
class Triu(Operation):
def __init__(self, k=0):
super().__init__()
self.k = k
def call(self, x):
return backend.numpy.triu(x, k=self.k)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.triu", "keras_core.ops.numpy.triu"])
def triu(x, k=0):
"""Return upper triangle of a tensor.
For tensors with `ndim` exceeding 2, `triu` will apply to the
final two axes.
Args:
x: Input tensor.
k: Diagonal below which to zero elements. Defaults to `0`. the
main diagonal. `k < 0` is below it, and `k > 0` is above it.
Returns:
Upper triangle of `x`, of same shape and data type as `x`.
"""
if any_symbolic_tensors((x,)):
return Triu(k=k).symbolic_call(x)
return backend.numpy.triu(x, k=k)
class Vdot(Operation):
def call(self, x1, x2):
return backend.numpy.vdot(x1, x2)
def compute_output_spec(self, x1, x2):
return KerasTensor([], dtype=x1.dtype)
@keras_core_export(["keras_core.ops.vdot", "keras_core.ops.numpy.vdot"])
def vdot(x1, x2):
"""Return the dot product of two vectors.
If the first argument is complex, the complex conjugate of the first
argument is used for the calculation of the dot product.
Multidimensional tensors are flattened before the dot product is taken.
Args:
x1: First input tensor. If complex, its complex conjugate is taken
before calculation of the dot product.
x2: Second input tensor.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x1, x2)):
return Vdot().symbolic_call(x1, x2)
return backend.numpy.vdot(x1, x2)
class Vstack(Operation):
def call(self, xs):
return backend.numpy.vstack(xs)
def compute_output_spec(self, xs):
first_shape = xs[0].shape
total_size_on_axis = 0
for x in xs:
if not shape_equal(x.shape, first_shape, axis=[0], allow_none=True):
raise ValueError(
"Every value in `xs` must have the same shape except on "
f"the `axis` dim. But found element of shape {x.shape}, "
f"which is different from the first element's "
f"shape {first_shape}."
)
if total_size_on_axis is None or x.shape[0] is None:
total_size_on_axis = None
else:
total_size_on_axis += x.shape[0]
output_shape = list(first_shape)
output_shape[0] = total_size_on_axis
return KerasTensor(output_shape)
@keras_core_export(["keras_core.ops.vstack", "keras_core.ops.numpy.vstack"])
def vstack(xs):
"""Stack tensors in sequence vertically (row wise).
Args:
xs: Sequence of tensors.
Returns:
Tensor formed by stacking the given tensors.
"""
if any_symbolic_tensors((xs,)):
return Vstack().symbolic_call(xs)
return backend.numpy.vstack(xs)
class Where(Operation):
def call(self, condition, x1=None, x2=None):
return backend.numpy.where(condition, x1, x2)
def compute_output_spec(self, condition, x1, x2):
condition_shape = getattr(condition, "shape", [])
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(condition_shape, x1_shape)
output_shape = broadcast_shapes(output_shape, x2_shape)
output_dtype = getattr(x1, "dtype", "int")
return KerasTensor(output_shape, dtype=output_dtype)
@keras_core_export(["keras_core.ops.where", "keras_core.ops.numpy.where"])
def where(condition, x1=None, x2=None):
"""Return elements chosen from `x1` or `x2` depending on `condition`.
Args:
condition: Where `True`, yield `x1`, otherwise yield `x2`.
x1: Values from which to choose when `condition` is `True`.
x2: Values from which to choose when `condition` is `False`.
Returns:
A tensor with elements from `x1` where `condition` is `True`, and
elements from `x2` where `condition` is `False`.
"""
if (x1 is None and x2 is not None) or (x1 is not None and x2 is None):
raise ValueError(
"`x1` and `x2` either both should be `None`"
" or both should have non-None value."
)
if any_symbolic_tensors((condition, x1, x2)):
return Where().symbolic_call(condition, x1, x2)
return backend.numpy.where(condition, x1, x2)
class Subtract(Operation):
def call(self, x1, x2):
return backend.numpy.subtract(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
x1_sparse = getattr(x1, "sparse", True)
x2_sparse = getattr(x2, "sparse", True)
output_sparse = x1_sparse and x2_sparse
return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse)
@keras_core_export(["keras_core.ops.subtract", "keras_core.ops.numpy.subtract"])
def subtract(x1, x2):
"""Subtract arguments element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise difference of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Subtract().symbolic_call(x1, x2)
return backend.numpy.subtract(x1, x2)
class Multiply(Operation):
def call(self, x1, x2):
return backend.numpy.multiply(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
x1_sparse = getattr(x1, "sparse", True)
x2_sparse = getattr(x2, "sparse", True)
output_sparse = x1_sparse or x2_sparse
return KerasTensor(output_shape, dtype=x1.dtype, sparse=output_sparse)
@keras_core_export(["keras_core.ops.multiply", "keras_core.ops.numpy.multiply"])
def multiply(x1, x2):
"""Multiply arguments element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise product of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Multiply().symbolic_call(x1, x2)
return backend.numpy.multiply(x1, x2)
class Divide(Operation):
def call(self, x1, x2):
return backend.numpy.divide(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.divide", "keras_core.ops.numpy.divide"])
def divide(x1, x2):
"""Divide arguments element-wise.
`keras_core.ops.true_divide` is an alias for this function.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, the quotient `x1/x2`, element-wise.
"""
if any_symbolic_tensors((x1, x2)):
return Divide().symbolic_call(x1, x2)
return backend.numpy.divide(x1, x2)
class TrueDivide(Operation):
def call(self, x1, x2):
return backend.numpy.true_divide(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
[
"keras_core.ops.true_divide",
"keras_core.ops.numpy.true_divide",
]
)
def true_divide(x1, x2):
"""Alias for `keras_core.ops.divide`."""
if any_symbolic_tensors((x1, x2)):
return TrueDivide().symbolic_call(x1, x2)
return backend.numpy.true_divide(x1, x2)
class Power(Operation):
def call(self, x1, x2):
return backend.numpy.power(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(["keras_core.ops.power", "keras_core.ops.numpy.power"])
def power(x1, x2):
"""First tensor elements raised to powers from second tensor, element-wise.
Args:
x1: The bases.
x2: The exponents.
Returns:
Output tensor, the bases in `x1` raised to the exponents in `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Power().symbolic_call(x1, x2)
return backend.numpy.power(x1, x2)
class Negative(Operation):
def call(self, x):
return backend.numpy.negative(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.negative", "keras_core.ops.numpy.negative"])
def negative(x):
"""Numerical negative, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, `y = -x`.
"""
if any_symbolic_tensors((x,)):
return Negative().symbolic_call(x)
return backend.numpy.negative(x)
class Square(Operation):
def call(self, x):
return backend.numpy.square(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.square", "keras_core.ops.numpy.square"])
def square(x):
"""Return the element-wise square of the input.
Args:
x: Input tensor.
Returns:
Output tensor, the square of `x`.
"""
if any_symbolic_tensors((x,)):
return Square().symbolic_call(x)
return backend.numpy.square(x)
class Sqrt(Operation):
def call(self, x):
x = backend.convert_to_tensor(x)
return backend.numpy.sqrt(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_core_export(["keras_core.ops.sqrt", "keras_core.ops.numpy.sqrt"])
def sqrt(x):
"""Return the non-negative square root of a tensor, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, the non-negative square root of `x`.
"""
if any_symbolic_tensors((x,)):
return Sqrt().symbolic_call(x)
x = backend.convert_to_tensor(x)
return backend.numpy.sqrt(x)
class Squeeze(Operation):
def __init__(self, axis=None):
super().__init__()
self.axis = axis
def call(self, x):
return backend.numpy.squeeze(x, axis=self.axis)
def compute_output_spec(self, x):
input_shape = list(x.shape)
if self.axis is None:
output_shape = list(filter((1).__ne__, input_shape))
return KerasTensor(output_shape, dtype=x.dtype, sparse=x.sparse)
else:
if input_shape[self.axis] != 1:
raise ValueError(
f"Cannot squeeze axis {self.axis}, because the dimension "
"is not 1."
)
del input_shape[self.axis]
return KerasTensor(input_shape, dtype=x.dtype, sparse=x.sparse)
@keras_core_export(["keras_core.ops.squeeze", "keras_core.ops.numpy.squeeze"])
def squeeze(x, axis=None):
"""Remove axes of length one from `x`.
Args:
x: Input tensor.
axis: Select a subset of the entries of length one in the shape.
Returns:
The input tensor with all or a subset of the dimensions of
length 1 removed.
"""
if any_symbolic_tensors((x,)):
return Squeeze(axis=axis).symbolic_call(x)
return backend.numpy.squeeze(x, axis=axis)
class Transpose(Operation):
def __init__(self, axes=None):
super().__init__()
self.axes = axes
def call(self, x):
return backend.numpy.transpose(x, axes=self.axes)
def compute_output_spec(self, x):
x_shape = x.shape
if self.axes is None:
return KerasTensor(x_shape[::-1], dtype=x.dtype, sparse=x.sparse)
if len(self.axes) != len(x_shape):
raise ValueError(
"axis must be a list of the same length as the input shape, "
f"expected {len(x_shape)}, but received {len(self.axes)}."
)
output_shape = []
for ax in self.axes:
output_shape.append(x_shape[ax])
return KerasTensor(output_shape, dtype=x.dtype, sparse=x.sparse)
@keras_core_export(
["keras_core.ops.transpose", "keras_core.ops.numpy.transpose"]
)
def transpose(x, axes=None):
"""Returns a tensor with `axes` transposed.
Args:
x: Input tensor.
axes: Sequence of integers. Permutation of the dimensions of `x`.
By default, the order of the axes are reversed.
Returns:
`x` with its axes permuted.
"""
if any_symbolic_tensors((x,)):
return Transpose(axes=axes).symbolic_call(x)
return backend.numpy.transpose(x, axes=axes)
class Mean(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.mean(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_core_export(["keras_core.ops.mean", "keras_core.ops.numpy.mean"])
def mean(x, axis=None, keepdims=False):
"""Compute the arithmetic mean along the specified axes.
Args:
x: Input tensor.
axis: Axis or axes along which the means are computed. The default
is to compute the mean of the flattened tensor.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Returns:
Output tensor containing the mean values.
"""
if any_symbolic_tensors((x,)):
return Mean(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.mean(x, axis=axis, keepdims=keepdims)
class Var(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.var(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_core_export(["keras_core.ops.var", "keras_core.ops.numpy.var"])
def var(x, axis=None, keepdims=False):
"""Compute the variance along the specified axes.
Args:
x: Input tensor.
axis: Axis or axes along which the variance is computed. The default
is to compute the variance of the flattened tensor.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Returns:
Output tensor containing the variance.
"""
if any_symbolic_tensors((x,)):
return Var(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.var(x, axis=axis, keepdims=keepdims)
class Sum(Operation):
def __init__(self, axis=None, keepdims=False):
super().__init__()
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.sum(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_core_export(["keras_core.ops.sum", "keras_core.ops.numpy.sum"])
def sum(x, axis=None, keepdims=False):
"""Sum of a tensor over the given axes.
Args:
x: Input tensor.
axis: Axis or axes along which the sum is computed. The default is to
compute the sum of the flattened tensor.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Returns:
Output tensor containing the sum.
"""
if any_symbolic_tensors((x,)):
return Sum(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.sum(x, axis=axis, keepdims=keepdims)
class Zeros(Operation):
def call(self, shape, dtype="float32"):
return backend.numpy.zeros(shape, dtype=dtype)
def compute_output_spec(self, shape, dtype="float32"):
return KerasTensor(shape, dtype=dtype)
@keras_core_export(["keras_core.ops.zeros", "keras_core.ops.numpy.zeros"])
def zeros(shape, dtype="float32"):
"""Return a new tensor of given shape and type, filled with zeros.
Args:
shape: Shape of the new tensor.
dtype: Desired data type of the tensor.
Returns:
Tensor of zeros with the given shape and dtype.
"""
return backend.numpy.zeros(shape, dtype=dtype)
class Ones(Operation):
def call(self, shape, dtype="float32"):
return backend.numpy.ones(shape, dtype=dtype)
def compute_output_spec(self, shape, dtype="float32"):
return KerasTensor(shape, dtype=dtype)
@keras_core_export(["keras_core.ops.ones", "keras_core.ops.numpy.ones"])
def ones(shape, dtype="float32"):
"""Return a new tensor of given shape and type, filled with ones.
Args:
shape: Shape of the new tensor.
dtype: Desired data type of the tensor.
Returns:
Tensor of ones with the given shape and dtype.
"""
return backend.numpy.ones(shape, dtype=dtype)
class Eye(Operation):
def call(self, N, M=None, k=0, dtype="float32"):
return backend.numpy.eye(N, M=M, k=k, dtype=dtype)
def compute_output_spec(self, N, M=None, k=0, dtype="float32"):
if M is None:
M = N
return KerasTensor((N, M), dtype=dtype)
@keras_core_export(["keras_core.ops.eye", "keras_core.ops.numpy.eye"])
def eye(N, M=None, k=0, dtype="float32"):
"""Return a 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
N: Number of rows in the output.
M: Number of columns in the output. If `None`, defaults to `N`.
k: Index of the diagonal: 0 (the default) refers to the main
diagonal, a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype: Data type of the returned tensor.
Returns:
Tensor with ones on the k-th diagonal and zeros elsewhere.
"""
return backend.numpy.eye(N, M=M, k=k, dtype=dtype)
class FloorDivide(Operation):
def call(self, x1, x2):
return backend.numpy.floor_divide(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
["keras_core.ops.floor_divide", "keras_core.ops.numpy.floor_divide"]
)
def floor_divide(x1, x2):
"""Returns the largest integer smaller or equal to the division of inputs.
Args:
x1: Numerator.
x2: Denominator.
Returns:
Output tensor, `y = floor(x1/x2)`
"""
if any_symbolic_tensors((x1, x2)):
return FloorDivide().symbolic_call(x1, x2)
return backend.numpy.floor_divide(x1, x2)
class LogicalXor(Operation):
def call(self, x1, x2):
return backend.numpy.logical_xor(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype=x1.dtype)
@keras_core_export(
["keras_core.ops.logical_xor", "keras_core.ops.numpy.logical_xor"]
)
def logical_xor(x1, x2):
"""Compute the truth value of `x1 XOR x2`, element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x1, x2)):
return LogicalXor().symbolic_call(x1, x2)
return backend.numpy.logical_xor(x1, x2)
| keras-core/keras_core/ops/numpy.py/0 | {
"file_path": "keras-core/keras_core/ops/numpy.py",
"repo_id": "keras-core",
"token_count": 79220
} | 49 |
import numpy as np
import pytest
import keras_core
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.optimizers.adam import Adam
class AdamTest(testing.TestCase):
def test_config(self):
optimizer = Adam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adam(amsgrad=True)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.9982], [0.9974], [0.9965], [0.9955]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
@pytest.mark.requires_trainable_backend
def test_ema(self):
# TODO: test correctness
model = keras_core.Sequential([keras_core.layers.Dense(10)])
model.compile(optimizer=Adam(use_ema=True), loss="mse")
x = keras_core.ops.zeros((1, 5))
y = keras_core.ops.zeros((1, 10))
model.fit(x, y)
| keras-core/keras_core/optimizers/adam_test.py/0 | {
"file_path": "keras-core/keras_core/optimizers/adam_test.py",
"repo_id": "keras-core",
"token_count": 1543
} | 50 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.optimizers import optimizer
@keras_core_export(["keras_core.optimizers.RMSprop"])
class RMSprop(optimizer.Optimizer):
"""Optimizer that implements the RMSprop algorithm.
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the
gradients, and uses that average to estimate the variance.
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
rho: float, defaults to 0.9. Discounting factor for the old gradients.
momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
momentum value, with a decay rate equals to `1 - momentum`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
to 1e-7.
centered: Boolean. If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
{{base_optimizer_keyword_args}}
Usage:
>>> opt = keras_core.optimizers.RMSprop(learning_rate=0.1)
>>> var1 = keras_core.backend.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1
>>> opt.minimize(loss, [var1])
>>> var1
9.683772
Reference:
- [Hinton, 2012](
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-7,
centered=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=100,
name="rmsprop",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
name=name,
**kwargs,
)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._velocities = []
for var in var_list:
self._velocities.append(
self.add_variable_from_reference(var, "velocity")
)
self._momentums = []
if self.momentum > 0:
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(var, "momentum")
)
self._average_gradients = []
if self.centered:
for var in var_list:
self._average_gradients.append(
self.add_variable_from_reference(var, "average_gradient")
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
velocity = self._velocities[self._get_variable_index(variable)]
momentum = None
if self.momentum > 0:
momentum = self._momentums[self._get_variable_index(variable)]
average_grad = None
if self.centered:
average_grad = self._average_gradients[
self._get_variable_index(variable)
]
rho = self.rho
velocity.assign(rho * velocity + (1 - rho) * ops.square(gradient))
if self.centered:
average_grad.assign(rho * average_grad + (1 - rho) * gradient)
denominator = velocity - ops.square(average_grad) + self.epsilon
else:
denominator = velocity + self.epsilon
increment = lr * gradient / ops.sqrt(denominator)
if self.momentum > 0:
momentum.assign(self.momentum * momentum + increment)
variable.assign(variable - momentum)
else:
variable.assign(variable - increment)
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"momentum": self.momentum,
"epsilon": self.epsilon,
"centered": self.centered,
}
)
return config
RMSprop.__doc__ = RMSprop.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras-core/keras_core/optimizers/rmsprop.py/0 | {
"file_path": "keras-core/keras_core/optimizers/rmsprop.py",
"repo_id": "keras-core",
"token_count": 2485
} | 51 |
import keras_core
from keras_core import testing
from keras_core.saving import object_registration
from keras_core.saving import serialization_lib
class TestObjectRegistration(testing.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass:
pass
def check_get_in_thread():
with object_registration.custom_object_scope(
{"CustomClass": CustomClass, "custom_fn": custom_fn}
):
actual_custom_fn = keras_core.activations.get("custom_fn")
self.assertEqual(actual_custom_fn, custom_fn)
actual_custom_class = keras_core.regularizers.get("CustomClass")
self.assertEqual(actual_custom_class.__class__, CustomClass)
with object_registration.custom_object_scope(
{"CustomClass": CustomClass, "custom_fn": custom_fn}
):
actual_custom_fn = keras_core.activations.get("custom_fn")
self.assertEqual(actual_custom_fn, custom_fn)
actual_custom_class = keras_core.regularizers.get("CustomClass")
self.assertEqual(actual_custom_class.__class__, CustomClass)
checked_thread = self.checkedThread(check_get_in_thread)
checked_thread.start()
checked_thread.join()
def test_serialize_custom_class_with_default_name(self):
@object_registration.register_keras_serializable()
class TestClass:
def __init__(self, value):
self._value = value
def get_config(self):
return {"value": self._value}
@classmethod
def from_config(cls, config):
return cls(**config)
serialized_name = "Custom>TestClass"
inst = TestClass(value=10)
class_name = object_registration.GLOBAL_CUSTOM_NAMES[TestClass]
self.assertEqual(serialized_name, class_name)
config = serialization_lib.serialize_keras_object(inst)
self.assertEqual("TestClass", config["class_name"])
new_inst = serialization_lib.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, TestClass)
self.assertEqual(10, new_inst._value)
def test_serialize_custom_class_with_custom_name(self):
@object_registration.register_keras_serializable(
"TestPackage", "CustomName"
)
class OtherTestClass:
def __init__(self, val):
self._val = val
def get_config(self):
return {"val": self._val}
@classmethod
def from_config(cls, config):
return cls(**config)
serialized_name = "TestPackage>CustomName"
inst = OtherTestClass(val=5)
class_name = object_registration.GLOBAL_CUSTOM_NAMES[OtherTestClass]
self.assertEqual(serialized_name, class_name)
fn_class_name = object_registration.get_registered_name(OtherTestClass)
self.assertEqual(fn_class_name, class_name)
cls = object_registration.get_registered_object(fn_class_name)
self.assertEqual(OtherTestClass, cls)
config = keras_core.saving.serialize_keras_object(inst)
self.assertEqual("OtherTestClass", config["class_name"])
new_inst = keras_core.saving.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, OtherTestClass)
self.assertEqual(5, new_inst._val)
def test_serialize_custom_function(self):
@object_registration.register_keras_serializable()
def my_fn():
return 42
serialized_name = "Custom>my_fn"
class_name = object_registration.GLOBAL_CUSTOM_NAMES[my_fn]
self.assertEqual(serialized_name, class_name)
fn_class_name = object_registration.get_registered_name(my_fn)
self.assertEqual(fn_class_name, class_name)
config = keras_core.saving.serialize_keras_object(my_fn)
fn = keras_core.saving.deserialize_keras_object(config)
self.assertEqual(42, fn())
fn_2 = object_registration.get_registered_object(fn_class_name)
self.assertEqual(42, fn_2())
def test_serialize_custom_class_without_get_config_fails(self):
with self.assertRaisesRegex(
ValueError,
"Cannot register a class that does not have a get_config.*",
):
@object_registration.register_keras_serializable(
"TestPackage", "TestClass"
)
class TestClass:
def __init__(self, value):
self._value = value
| keras-core/keras_core/saving/object_registration_test.py/0 | {
"file_path": "keras-core/keras_core/saving/object_registration_test.py",
"repo_id": "keras-core",
"token_count": 2169
} | 52 |
from keras_core import backend
def is_in_jax_tracing_scope():
if backend.backend() == "jax":
x = backend.numpy.ones(())
if x.__class__.__name__ == "DynamicJaxprTracer":
return True
return False
| keras-core/keras_core/utils/jax_utils.py/0 | {
"file_path": "keras-core/keras_core/utils/jax_utils.py",
"repo_id": "keras-core",
"token_count": 103
} | 53 |
# Call For Contributions
Contributors looking for a task can look at the following list to find an item
to work on. Should you decide to contribute a component, please comment on the
corresponding GitHub issue that you will be working on the component. A team
member will then follow up by assigning the issue to you.
[There is a contributions welcome label available here](https://github.com/keras-team/keras-cv/issues?page=2&q=is%3Aissue+is%3Aopen+label%3Acontribution-welcome)
| keras-cv/CALL_FOR_CONTRIBUTIONS.md/0 | {
"file_path": "keras-cv/CALL_FOR_CONTRIBUTIONS.md",
"repo_id": "keras-cv",
"token_count": 129
} | 54 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import Mosaic
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
IMAGES,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
LABELS,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldMosaic(BaseImageAugmentationLayer):
"""Mosaic implements the mosaic data augmentation technique.
Mosaic data augmentation first takes 4 images from the batch and makes a
grid. After that based on the offset, a crop is taken to form the mosaic
image. Labels are in the same ratio as the area of their images in the
output image. Bounding boxes are translated according to the position of
the 4 images.
Args:
offset: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `offset` is used to determine the offset
of the mosaic center from the top-left corner of the mosaic. If a
tuple is used, the x and y coordinates of the mosaic center are
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`. Defaults to
(0.25, 0.75).
bounding_box_format: a case-insensitive string (for example, "xyxy") to
be passed if bounding boxes are being augmented by this layer.
Each bounding box is defined by at least these 4 values. The inputs
may contain additional information such as classes and confidence
after these 4 values but these values will be ignored and returned
as is. For detailed information on the supported formats, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/). Defaults to None.
seed: integer, used to create a random seed.
References:
- [Yolov4 paper](https://arxiv.org/pdf/2004.10934).
- [Yolov5 implementation](https://github.com/ultralytics/yolov5).
- [YoloX implementation](https://github.com/Megvii-BaseDetection/YOLOX)
Sample usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
labels = tf.one_hot(labels,10)
labels = tf.cast(tf.squeeze(labels), tf.float32)
mosaic = keras_cv.layers.preprocessing.Mosaic()
output = mosaic({'images': images, 'labels': labels})
# output == {'images': updated_images, 'labels': updated_labels}
```
""" # noqa: E501
def __init__(
self, offset=(0.25, 0.75), bounding_box_format=None, seed=None, **kwargs
):
super().__init__(seed=seed, **kwargs)
self.offset = offset
self.bounding_box_format = bounding_box_format
self.center_sampler = preprocessing_utils.parse_factor(
offset, param_name="offset", seed=seed
)
self.seed = seed
def _batch_augment(self, inputs):
self._validate_inputs(inputs)
images = inputs.get("images", None)
labels = inputs.get("labels", None)
bounding_boxes = inputs.get("bounding_boxes", None)
batch_size = tf.shape(images)[0]
# pick 3 indices for every batch to create the mosaic output with.
permutation_order = tf.random.uniform(
(batch_size, 3),
minval=0,
maxval=batch_size,
dtype=tf.int32,
)
# concatenate the batches with permutation order to get all 4 images of
# the mosaic
permutation_order = tf.concat(
[tf.expand_dims(tf.range(batch_size), axis=-1), permutation_order],
axis=-1,
)
input_height, input_width, _ = images.shape[1:]
mosaic_centers_x = (
self.center_sampler(
tf.expand_dims(batch_size, axis=0), dtype=self.compute_dtype
)
* input_width
)
mosaic_centers_y = (
self.center_sampler(
shape=tf.expand_dims(batch_size, axis=0),
dtype=self.compute_dtype,
)
* input_height
)
mosaic_centers = tf.stack((mosaic_centers_x, mosaic_centers_y), axis=-1)
# return the mosaics
images = tf.vectorized_map(
lambda index: self._update_image(
images, permutation_order, mosaic_centers, index
),
tf.range(batch_size),
)
if labels is not None:
labels = tf.vectorized_map(
lambda index: self._update_label(
images, labels, permutation_order, mosaic_centers, index
),
tf.range(batch_size),
)
inputs["labels"] = labels
if bounding_boxes is not None:
# values to translate the boxes by in the mosaic image
translate_x = tf.stack(
[
mosaic_centers_x - input_width,
mosaic_centers_x,
mosaic_centers_x - input_width,
mosaic_centers_x,
],
axis=-1,
)
translate_y = tf.stack(
[
mosaic_centers_y - input_height,
mosaic_centers_y - input_height,
mosaic_centers_y,
mosaic_centers_y,
],
axis=-1,
)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = tf.map_fn(
lambda index: self._update_bounding_box(
images,
bounding_boxes,
permutation_order,
translate_x,
translate_y,
index,
),
tf.range(batch_size),
fn_output_signature={
"boxes": tf.RaggedTensorSpec(
shape=[None, 4],
ragged_rank=1,
dtype=self.compute_dtype,
),
"classes": tf.RaggedTensorSpec(
shape=[None], dtype=self.compute_dtype
),
},
)
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
inputs["bounding_boxes"] = bounding_boxes
inputs["images"] = images
return inputs
def _augment(self, inputs):
raise ValueError(
"Mosaic received a single image to `call`. The layer relies on "
"combining multiple examples, and as such will not behave as "
"expected. Please call the layer with 4 or more samples."
)
def _update_image(self, images, permutation_order, mosaic_centers, index):
# forms mosaic for one image from the batch
input_height, input_width, _ = images.shape[1:]
mosaic_images = tf.gather(images, permutation_order[index])
top = tf.concat([mosaic_images[0], mosaic_images[1]], axis=1)
bottom = tf.concat([mosaic_images[2], mosaic_images[3]], axis=1)
output = tf.concat([top, bottom], axis=0)
# cropping coordinates for the mosaic
x1 = (input_width - mosaic_centers[index][0]) / (input_width * 2 - 1)
y1 = (input_height - mosaic_centers[index][1]) / (input_height * 2 - 1)
x2 = x1 + (input_width) / (input_width * 2 - 1)
y2 = y1 + (input_height) / (input_height * 2 - 1)
# helps avoid retracing caused by slicing, inspired by RRC
# implementation
output = tf.image.crop_and_resize(
tf.expand_dims(output, axis=0),
[[y1, x1, y2, x2]],
[0],
[input_height, input_width],
)
# tf.image.crop_and_resize will always output float32, so we need to
# recast tf.image.crop_and_resize outputs
# [num_boxes, crop_height, crop_width, depth] since num_boxes is always
# one we squeeze axis 0
output = tf.cast(output, self.compute_dtype)
output = tf.squeeze(output, axis=0)
return output
def _update_label(
self, images, labels, permutation_order, mosaic_centers, index
):
# updates labels for one output mosaic
input_height, input_width, _ = images.shape[1:]
labels_for_mosaic = tf.gather(labels, permutation_order[index])
center_x = mosaic_centers[index][0]
center_y = mosaic_centers[index][1]
area = input_height * input_width
# labels are in the same ratio as the area of the images
top_left_ratio = (center_x * center_y) / area
top_right_ratio = ((input_width - center_x) * center_y) / area
bottom_left_ratio = (center_x * (input_height - center_y)) / area
bottom_right_ratio = (
(input_width - center_x) * (input_height - center_y)
) / area
label = (
labels_for_mosaic[0] * top_left_ratio
+ labels_for_mosaic[1] * top_right_ratio
+ labels_for_mosaic[2] * bottom_left_ratio
+ labels_for_mosaic[3] * bottom_right_ratio
)
return label
def _update_bounding_box(
self,
images,
bounding_boxes,
permutation_order,
translate_x,
translate_y,
index,
):
# updates bounding_boxes for one output mosaic
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=images,
dtype=self.compute_dtype,
)
boxes, classes = bounding_boxes["boxes"], bounding_boxes["classes"]
classes_for_mosaic = tf.gather(classes, permutation_order[index])
boxes_for_mosaic = tf.gather(boxes, permutation_order[index])
# stacking translate values such that the shape is (4, 1, 4) or
# (num_images, broadcast dim, coordinates)
translate_values = tf.stack(
[
translate_x[index],
translate_y[index],
translate_x[index],
translate_y[index],
],
axis=-1,
)
translate_values = tf.expand_dims(translate_values, axis=1)
# translating boxes
boxes_for_mosaic = boxes_for_mosaic + translate_values
boxes_for_mosaic = tf.reshape(boxes_for_mosaic, [-1, 4])
classes_for_mosaic = tf.reshape(
classes_for_mosaic,
[
-1,
],
)
boxes_for_mosaic = {
"boxes": boxes_for_mosaic,
"classes": classes_for_mosaic,
}
boxes_for_mosaic = bounding_box.clip_to_image(
boxes_for_mosaic,
bounding_box_format="xyxy",
images=images[index],
)
boxes_for_mosaic = bounding_box.to_ragged(boxes_for_mosaic)
boxes_for_mosaic = bounding_box.convert_format(
boxes_for_mosaic,
source="xyxy",
target=self.bounding_box_format,
images=images[index],
dtype=self.compute_dtype,
)
return boxes_for_mosaic
def _validate_inputs(self, inputs):
images = inputs.get("images", None)
labels = inputs.get("labels", None)
bounding_boxes = inputs.get("bounding_boxes", None)
if images is None or (labels is None and bounding_boxes is None):
raise ValueError(
"Mosaic expects inputs in a dictionary with format "
'{"images": images, "labels": labels}. or'
'{"images": images, "bounding_boxes": bounding_boxes}'
f"Got: inputs = {inputs}"
)
if labels is not None and not labels.dtype.is_floating:
raise ValueError(
f"Mosaic received labels with type {labels.dtype}. "
"Labels must be of type float."
)
if bounding_boxes is not None and self.bounding_box_format is None:
raise ValueError(
"Mosaic received bounding boxes but no bounding_box_format. "
"Please pass a bounding_box_format from the supported list."
)
def get_config(self):
config = {
"offset": self.offset,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class MosaicTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (1, 32, 32, 3)
fixed_offset = (0.5, 0.5)
fixed_seed = 2023
images = tf.random.uniform(shape=image_shape)
inputs = {
IMAGES: images,
LABELS: tf.one_hot(tf.zeros((1,), tf.int32), 10),
}
layer = Mosaic(offset=fixed_offset, seed=fixed_seed)
old_layer = OldMosaic(offset=fixed_offset, seed=fixed_seed)
output = layer(inputs)
old_output = old_layer(inputs)
self.assertNotAllClose(inputs[IMAGES], output[IMAGES])
self.assertAllClose(old_output[IMAGES], output[IMAGES])
self.assertAllClose(old_output[LABELS], output[LABELS])
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
num_classes = 10
results = {}
aug_candidates = [Mosaic, OldMosaic]
aug_args = {}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
inputs = {
IMAGES: x_train[:n_images],
LABELS: tf.one_hot(
tf.zeros((n_images,), tf.int32), num_classes
),
}
layer(inputs)
t0 = time.time()
r1 = layer(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {
IMAGES: x_train[:n_images],
LABELS: tf.one_hot(
tf.zeros((n_images,), tf.int32), num_classes
),
}
# warmup
apply_aug(inputs)
t0 = time.time()
r1 = apply_aug(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.image.crop_and_resize on XLA
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_mosaic.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_mosaic.py",
"repo_id": "keras-cv",
"token_count": 7976
} | 55 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Usage: python configure.py
"""Configures local environment to prepare for building KerasCV from source."""
import logging
import os
import pathlib
import platform
import tensorflow as tf
from packaging.version import Version
_TFA_BAZELRC = ".bazelrc"
# Writes variables to bazelrc file
def write(line):
with open(_TFA_BAZELRC, "a") as f:
f.write(line + "\n")
def write_action_env(var_name, var):
write('build --action_env {}="{}"'.format(var_name, var))
def is_macos():
return platform.system() == "Darwin"
def is_windows():
return platform.system() == "Windows"
def is_linux():
return platform.system() == "Linux"
def is_raspi_arm():
return os.uname()[4] == "armv7l" or os.uname()[4] == "aarch64"
def is_linux_ppc64le():
return is_linux() and platform.machine() == "ppc64le"
def is_linux_x86_64():
return is_linux() and platform.machine() == "x86_64"
def is_linux_arm():
return is_linux() and platform.machine() == "arm"
def is_linux_aarch64():
return is_linux() and platform.machine() == "aarch64"
def is_linux_s390x():
return is_linux() and platform.machine() == "s390x"
def get_tf_header_dir():
import tensorflow as tf
tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]
if is_windows():
tf_header_dir = tf_header_dir.replace("\\", "/")
return tf_header_dir
def get_cpp_version():
cpp_version = "c++14"
if Version(tf.__version__) >= Version("2.10"):
cpp_version = "c++17"
return cpp_version
def get_tf_shared_lib_dir():
import tensorflow as tf
# OS Specific parsing
if is_windows():
tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
return tf_shared_lib_dir.replace("\\", "/")
elif is_raspi_arm():
return tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
else:
return tf.sysconfig.get_link_flags()[0][2:]
# Converts the linkflag namespec to the full shared library name
def get_shared_lib_name():
import tensorflow as tf
namespec = tf.sysconfig.get_link_flags()
if is_macos():
# MacOS
return "lib" + namespec[1][2:] + ".dylib"
elif is_windows():
# Windows
return "_pywrap_tensorflow_internal.lib"
elif is_raspi_arm():
# The below command for linux would return an empty list
return "_pywrap_tensorflow_internal.so"
else:
# Linux
return namespec[1][3:]
def create_build_configuration():
print()
print("Configuring KerasCV to be built from source...")
if os.path.isfile(_TFA_BAZELRC):
os.remove(_TFA_BAZELRC)
logging.disable(logging.WARNING)
write_action_env("TF_HEADER_DIR", get_tf_header_dir())
write_action_env("TF_SHARED_LIBRARY_DIR", get_tf_shared_lib_dir())
write_action_env("TF_SHARED_LIBRARY_NAME", get_shared_lib_name())
write_action_env("TF_CXX11_ABI_FLAG", tf.sysconfig.CXX11_ABI_FLAG)
# This should be replaced with a call to tf.sysconfig if it's added
write_action_env("TF_CPLUSPLUS_VER", get_cpp_version())
write("build --spawn_strategy=standalone")
write("build --strategy=Genrule=standalone")
write("build --experimental_repo_remote_exec")
write("build -c opt")
write(
"build --cxxopt="
+ '"-D_GLIBCXX_USE_CXX11_ABI="'
+ str(tf.sysconfig.CXX11_ABI_FLAG)
)
if is_windows():
write("build --config=windows")
write("build:windows --enable_runfiles")
write("build:windows --copt=/experimental:preprocessor")
write("build:windows --host_copt=/experimental:preprocessor")
write("build:windows --copt=/arch=AVX")
write("build:windows --cxxopt=/std:" + get_cpp_version())
write("build:windows --host_cxxopt=/std:" + get_cpp_version())
if is_macos() or is_linux():
if (
not is_linux_ppc64le()
and not is_linux_arm()
and not is_linux_aarch64()
):
write("build --copt=-mavx")
write("build --cxxopt=-std=" + get_cpp_version())
write("build --host_cxxopt=-std=" + get_cpp_version())
print("> Building only CPU ops")
print()
print("Build configurations successfully written to", _TFA_BAZELRC, ":\n")
print(pathlib.Path(_TFA_BAZELRC).read_text())
if __name__ == "__main__":
create_build_configuration()
| keras-cv/build_deps/configure.py/0 | {
"file_path": "keras-cv/build_deps/configure.py",
"repo_id": "keras-cv",
"token_count": 1987
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import tensorflow as tf
from keras_cv.datasets.waymo import build_tensors_for_augmentation
from keras_cv.datasets.waymo import load
# "gs://waymo_open_dataset_v_1_0_0_individual_files/training"
TRAINING_RECORD_PATH = "./wod_records"
# "gs://waymo_open_dataset_v_1_0_0_individual_files/training"
TRANSFORMED_RECORD_PATH = "./wod_transformed"
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def serialize_example(feature0, feature1):
"""
Creates a tf.train.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the
# tf.train.Example-compatible data type.
feature = {
"point_clouds": _float_feature(tf.reshape(feature0, [-1]).numpy()),
"bounding_boxes": _float_feature(tf.reshape(feature1, [-1]).numpy()),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature)
)
return example_proto.SerializeToString()
# Load the training dataset
filenames = os.listdir(TRAINING_RECORD_PATH)
for filename in filenames:
train_ds = load([os.path.join(TRAINING_RECORD_PATH, filename)])
train_ds = train_ds.map(
build_tensors_for_augmentation, num_parallel_calls=tf.data.AUTOTUNE
)
start = time.time()
step = 0
transformed_filename = os.path.join(TRANSFORMED_RECORD_PATH, filename)
with tf.io.TFRecordWriter(transformed_filename) as writer:
for examples in train_ds:
serialized_example = serialize_example(
examples["point_clouds"], examples["bounding_boxes"]
)
writer.write(serialized_example)
step += 1
print(f"Number of samples {step}")
print(f"Time elapsed: {time.time()-start} seconds")
| keras-cv/examples/training/object_detection_3d/waymo/serialize_records.py/0 | {
"file_path": "keras-cv/examples/training/object_detection_3d/waymo/serialize_records.py",
"repo_id": "keras-cv",
"token_count": 918
} | 57 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarks for training KerasCV models against the MNIST dataset."""
import time
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from keras_cv import models
from keras_cv.models.classification import image_classifier
# isort: off
from tensorflow.python.platform.benchmark import (
ParameterizedBenchmark,
)
class ClassificationTrainingBenchmark(
tf.test.Benchmark, metaclass=ParameterizedBenchmark
):
"""Benchmarks for classification models using `tf.test.Benchmark`."""
_benchmark_parameters = [
("ResNet18V2Backbone", models.ResNet18V2Backbone),
]
def __init__(self):
super().__init__()
self.num_classes = 10
self.batch_size = 64
# x shape is (batch_size, 56, 56, 3)
# y shape is (batch_size, 10)
self.dataset = (
tfds.load("mnist", split="test")
.map(
lambda x: (
tf.image.grayscale_to_rgb(
tf.image.resize(x["image"], (56, 56))
),
tf.one_hot(x["label"], self.num_classes),
),
num_parallel_calls=tf.data.AUTOTUNE,
)
.batch(self.batch_size)
)
self.epochs = 1
def benchmark_classification_training_single_gpu(self, app):
self._run_benchmark(app, tf.distribute.OneDeviceStrategy("/gpu:0"))
def benchmark_classification_training_multi_gpu(self, app):
self._run_benchmark(app, tf.distribute.MirroredStrategy())
def _run_benchmark(self, app, strategy):
with strategy.scope():
t0 = time.time()
model = image_classifier.ImageClassifier(
backbone=app(),
num_classes=self.num_classes,
)
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=0.1, momentum=0.9),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
compile_time = time.time() - t0
train_start_time = time.time()
training_results = model.fit(
self.dataset,
batch_size=self.batch_size,
epochs=self.epochs,
)
train_end_time = time.time()
training_time = train_end_time - train_start_time
total_time = train_end_time - t0
metrics = []
metrics.append({"name": "compile_time", "value": compile_time})
metrics.append(
{"name": "avg_epoch_time", "value": training_time / self.epochs}
)
metrics.append({"name": "epochs", "value": self.epochs})
metrics.append(
{
"name": "accuracy",
"value": training_results.history["accuracy"][0],
}
)
self.report_benchmark(wall_time=total_time, metrics=metrics)
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/benchmarks/classification_training_benchmark_test.py/0 | {
"file_path": "keras-cv/keras_cv/benchmarks/classification_training_benchmark_test.py",
"repo_id": "keras-cv",
"token_count": 1581
} | 58 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class BoundingBoxUtilTest(TestCase):
def test_clip_to_image_standard(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array([[200, 200, 400, 400], [100, 100, 300, 300]]),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
boxes = bounding_boxes["boxes"]
self.assertAllGreaterEqual(boxes, 0)
(
x1,
y1,
x2,
y2,
) = ops.split(boxes, 4, axis=1)
self.assertAllLessEqual(ops.concatenate([x1, x2], axis=1), width)
self.assertAllLessEqual(ops.concatenate([y1, y2], axis=1), height)
# Test relative format batched
image = ops.ones(shape=(1, height, width, 3))
bounding_boxes = {
"boxes": np.array([[[0.2, -1, 1.2, 0.3], [0.4, 1.5, 0.2, 0.3]]]),
"classes": np.array([[0, 0]]),
}
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="rel_xyxy", images=image
)
self.assertAllLessEqual(bounding_boxes["boxes"], 1)
def test_clip_to_image_filters_fully_out_bounding_boxes(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array([[257, 257, 400, 400], [100, 100, 300, 300]]),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
self.assertAllEqual(
bounding_boxes["boxes"],
np.array([[-1, -1, -1, -1], [100, 100, 256, 256]]),
),
self.assertAllEqual(
bounding_boxes["classes"],
np.array([-1, 0]),
)
def test_clip_to_image_filters_fully_out_bounding_boxes_negative_area(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array([[110, 120, 100, 100], [100, 100, 300, 300]]),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
self.assertAllEqual(
bounding_boxes["boxes"],
np.array(
[
[
-1,
-1,
-1,
-1,
],
[
100,
100,
256,
256,
],
]
),
)
self.assertAllEqual(
bounding_boxes["classes"],
np.array([-1, 0]),
)
def test_clip_to_image_filters_nans(self):
# Test xyxy format unbatched
height = 256
width = 256
bounding_boxes = {
"boxes": np.array(
[[0, float("NaN"), 100, 100], [100, 100, 300, 300]]
),
"classes": np.array([0, 0]),
}
image = ops.ones(shape=(height, width, 3))
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, bounding_box_format="xyxy", images=image
)
self.assertAllEqual(
bounding_boxes["boxes"],
np.array(
[
[
-1,
-1,
-1,
-1,
],
[
100,
100,
256,
256,
],
]
),
)
self.assertAllEqual(
bounding_boxes["classes"],
np.array([-1, 0]),
)
def test_is_relative_util(self):
self.assertTrue(bounding_box.is_relative("rel_xyxy"))
self.assertFalse(bounding_box.is_relative("xyxy"))
with self.assertRaises(ValueError):
_ = bounding_box.is_relative("bad_format")
def test_as_relative_util(self):
self.assertEqual(bounding_box.as_relative("yxyx"), "rel_yxyx")
self.assertEqual(bounding_box.as_relative("rel_xywh"), "rel_xywh")
| keras-cv/keras_cv/bounding_box/utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/utils_test.py",
"repo_id": "keras-cv",
"token_count": 2868
} | 59 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import layers
from keras_cv.api_export import keras_cv_export
def parse_imagenet_example(img_size, crop_to_aspect_ratio):
"""Function to parse a TFRecord example into an image and label"""
resizing = None
if img_size:
resizing = layers.Resizing(
width=img_size[0],
height=img_size[1],
crop_to_aspect_ratio=crop_to_aspect_ratio,
)
def apply(example):
# Read example
image_key = "image/encoded"
label_key = "image/class/label"
keys_to_features = {
image_key: tf.io.FixedLenFeature((), tf.string, ""),
label_key: tf.io.FixedLenFeature([], tf.int64, -1),
}
parsed = tf.io.parse_single_example(example, keys_to_features)
# Decode and resize image
image_bytes = tf.reshape(parsed[image_key], shape=[])
image = tf.io.decode_jpeg(image_bytes, channels=3)
if resizing:
image = resizing(image)
# Decode label
label = (
tf.cast(tf.reshape(parsed[label_key], shape=()), dtype=tf.int32) - 1
)
label = tf.one_hot(label, 1000)
return image, label
return apply
@keras_cv_export(
"keras_cv.datasets.imagenet.load", package="keras_cv.datasets.imagenet"
)
def load(
split,
tfrecord_path,
batch_size=None,
shuffle=True,
shuffle_buffer=None,
reshuffle_each_iteration=False,
img_size=None,
crop_to_aspect_ratio=True,
):
"""Loads the ImageNet dataset from TFRecords
Usage:
```python
dataset, ds_info = keras_cv.datasets.imagenet.load(
split="train", tfrecord_path="gs://my-bucket/imagenet-tfrecords"
)
```
Args:
split: the split to load. Should be one of "train" or "validation."
tfrecord_path: the path to your preprocessed ImageNet TFRecords.
See keras_cv/datasets/imagenet/README.md for preprocessing
instructions.
batch_size: how many instances to include in batches after loading.
Should only be specified if img_size is specified (so that images
can be resized to the same size before batching).
shuffle: whether to shuffle the dataset, defaults to True.
shuffle_buffer: the size of the buffer to use in shuffling.
reshuffle_each_iteration: whether to reshuffle the dataset on every
epoch, defaults to False.
img_size: the size to resize the images to, defaults to None, indicating
that images should not be resized.
Returns:
tf.data.Dataset containing ImageNet. Each entry is a dictionary
containing keys {"image": image, "label": label} where images is a
Tensor of shape [H, W, 3] and label is a Tensor of shape [1000].
"""
if batch_size is not None and img_size is None:
raise ValueError(
"Batching can only be performed if images are resized."
)
num_splits = 1024 if split == "train" else 128
filenames = [
f"{tfrecord_path}/{split}-{i:05d}-of-{num_splits:05d}"
for i in range(0, num_splits)
]
dataset = tf.data.TFRecordDataset(
filenames=filenames, num_parallel_reads=tf.data.AUTOTUNE
)
dataset = dataset.map(
parse_imagenet_example(img_size, crop_to_aspect_ratio),
num_parallel_calls=tf.data.AUTOTUNE,
)
if shuffle:
if not batch_size and not shuffle_buffer:
raise ValueError(
"If `shuffle=True`, either a `batch_size` or `shuffle_buffer` "
"must be provided to `keras_cv.datasets.imagenet.load().`"
)
shuffle_buffer = shuffle_buffer or 8 * batch_size
dataset = dataset.shuffle(
shuffle_buffer, reshuffle_each_iteration=reshuffle_each_iteration
)
if batch_size is not None:
dataset = dataset.batch(batch_size)
return dataset.prefetch(tf.data.AUTOTUNE)
| keras-cv/keras_cv/datasets/imagenet/load.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/imagenet/load.py",
"repo_id": "keras-cv",
"token_count": 1895
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converter functions for working with keypoints formats."""
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
# Internal exception
class _RequiresImagesException(Exception):
pass
def _rel_xy_to_xy(keypoints, images=None):
if images is None:
raise _RequiresImagesException()
shape = tf.cast(tf.shape(images), keypoints.dtype)
h, w = shape[1], shape[2]
x, y, rest = tf.split(keypoints, [1, 1, keypoints.shape[-1] - 2], axis=-1)
return tf.concat([x * w, y * h, rest], axis=-1)
def _xy_to_rel_xy(keypoints, images=None):
if images is None:
raise _RequiresImagesException()
shape = tf.cast(tf.shape(images), keypoints.dtype)
h, w = shape[1], shape[2]
x, y, rest = tf.split(keypoints, [1, 1, keypoints.shape[-1] - 2], axis=-1)
return tf.concat([x / w, y / h, rest], axis=-1)
def _xy_noop(keypoints, images=None):
return keypoints
TO_XY_CONVERTERS = {
"xy": _xy_noop,
"rel_xy": _rel_xy_to_xy,
}
FROM_XY_CONVERTERS = {
"xy": _xy_noop,
"rel_xy": _xy_to_rel_xy,
}
@keras_cv_export(
"keras_cv.keypoint.convert_format", package="keras_cv.keypoint"
)
def convert_format(keypoints, source, target, images=None, dtype=None):
"""Converts keypoints from one format to another.
Supported formats are:
- `"xy"`, absolute pixel positions.
- `"rel_xyxy"`. relative pixel positions.
Formats are case-insensitive. It is recommended that you
capitalize width and height to maximize the visual difference
between `"xyWH"` and `"xyxy"`.
Relative formats, abbreviated `rel`, make use of the shapes of the
`images` passed. In these formats, the coordinates, widths, and
heights are all specified as percentages of the host image.
`images` may be a ragged Tensor. Note that using a ragged Tensor
for images may cause a substantial performance loss, as each image
will need to be processed separately due to the mismatching image
shapes.
Usage:
```python
images, keypoints = load_my_dataset()
keypoints_in_rel = keras_cv.keypoint.convert_format(
keypoint,
source='xy',
target='rel_xy',
images=images,
)
```
Args:
keypoints: tf.Tensor or tf.RaggedTensor representing keypoints
in the format specified in the `source` parameter.
`keypoints` can optionally have extra dimensions stacked
on the final axis to store metadata. keypoints should
have a rank between 2 and 4, with the shape
`[num_boxes,*]`, `[batch_size, num_boxes, *]` or
`[batch_size, num_groups, num_keypoints,*]`.
source: One of {" ".join([f'"{f}"' for f in
TO_XY_CONVERTERS.keys()])}. Used to specify the original
format of the `boxes` parameter.
target: One of {" ".join([f'"{f}"' for f in
TO_XY_CONVERTERS.keys()])}. Used to specify the
destination format of the `boxes` parameter.
images: (Optional) a batch of images aligned with `boxes` on
the first axis. Should be rank 3 (`HWC` format) or 4
(`BHWC` format). Used in some converters to compute
relative pixel values of the bounding box dimensions.
Required when transforming from a rel format to a non-rel
format.
dtype: the data type to use when transforming the boxes.
Defaults to None, i.e. `keypoints` dtype.
"""
source = source.lower()
target = target.lower()
if source not in TO_XY_CONVERTERS:
raise ValueError(
f"convert_format() received an unsupported format for the argument "
f"`source`. `source` should be one of {TO_XY_CONVERTERS.keys()}. "
f"Got source={source}"
)
if target not in FROM_XY_CONVERTERS:
raise ValueError(
f"convert_format() received an unsupported format for the argument "
f"`target`. `target` should be one of {FROM_XY_CONVERTERS.keys()}. "
f"Got target={target}"
)
if dtype:
keypoints = tf.cast(keypoints, dtype)
if source == target:
return keypoints
keypoints, images, squeeze_axis = _format_inputs(keypoints, images)
try:
in_xy = TO_XY_CONVERTERS[source](keypoints, images=images)
result = FROM_XY_CONVERTERS[target](in_xy, images=images)
except _RequiresImagesException:
raise ValueError(
"convert_format() must receive `images` when transforming "
f"between relative and absolute formats. "
f"convert_format() received source=`{source}`, target=`{target}`, "
f"but images={images}"
)
return _format_outputs(result, squeeze_axis)
def _format_inputs(keypoints, images):
keypoints_rank = len(keypoints.shape)
if keypoints_rank > 4:
raise ValueError(
"Expected keypoints rank to be in [2, 4], got "
f"len(keypoints.shape)={keypoints_rank}."
)
keypoints_includes_batch = keypoints_rank > 2
keypoints_are_grouped = keypoints_rank == 4
if images is not None:
images_rank = len(images.shape)
if images_rank > 4 or images_rank < 3:
raise ValueError(
"Expected images rank to be 3 or 4, got "
f"len(images.shape)={images_rank}."
)
images_include_batch = images_rank == 4
if keypoints_includes_batch != images_include_batch:
raise ValueError(
"convert_format() expects both `keypoints` and `images` to be "
"batched or both unbatched. Received "
f"len(keypoints.shape)={keypoints_rank}, "
f"len(images.shape)={images_rank}. Expected either "
"len(keypoints.shape)=2 and len(images.shape)=3, or "
"len(keypoints.shape)>=3 and len(images.shape)=4."
)
if not images_include_batch:
images = tf.expand_dims(images, axis=0)
squeeze_axis = []
if not keypoints_includes_batch:
keypoints = tf.expand_dims(keypoints, axis=0)
squeeze_axis.append(0)
if not keypoints_are_grouped:
keypoints = tf.expand_dims(keypoints, axis=1)
squeeze_axis.append(1)
return keypoints, images, squeeze_axis
def _format_outputs(result, squeeze_axis):
if len(squeeze_axis) == 0:
return result
return tf.squeeze(result, axis=squeeze_axis)
| keras-cv/keras_cv/keypoint/converters.py/0 | {
"file_path": "keras-cv/keras_cv/keypoint/converters.py",
"repo_id": "keras-cv",
"token_count": 2871
} | 61 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.AnchorGenerator")
class AnchorGenerator(keras.layers.Layer):
"""AnchorGenerator generates anchors for multiple feature maps.
AnchorGenerator takes multiple scales and generates anchor boxes based on
the anchor sizes, scales, aspect ratios, and strides provided. To invoke
AnchorGenerator, call it on the image that needs anchor boxes.
`sizes` and `strides` must match structurally - they are pairs. Scales and
aspect ratios can either be a list, that is then used for all the sizes (aka
levels), or a dictionary from `{'level_{number}': [parameters at scale...]}`
Args:
bounding_box_format: The format of bounding boxes to generate. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
sizes: A list of integers that represent the anchor sizes for each level,
or a dictionary of integer lists with each key representing a level.
For each anchor size, anchor height will be
`anchor_size / sqrt(aspect_ratio)`, and anchor width will be
`anchor_size * sqrt(aspect_ratio)`. This is repeated for each scale and
aspect ratio.
scales: A list of floats corresponding to multipliers that will be
multiplied by each `anchor_size` to generate a level.
aspect_ratios: A list of floats representing the ratio of anchor width to
height.
strides: iterable of ints that represent the anchor stride size between
center of anchors at each scale.
clip_boxes: whether to clip generated anchor boxes to the image
size, defaults to `False`.
Usage:
```python
strides = [8, 16, 32]
scales = [1, 1.2599210498948732, 1.5874010519681994]
sizes = [32.0, 64.0, 128.0]
aspect_ratios = [0.5, 1.0, 2.0]
image = np.random.uniform(size=(512, 512, 3))
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="rel_yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
clip_boxes=True,
)
anchors = anchor_generator(image)
print(anchors)
# > {0: ..., 1: ..., 2: ...}
```
Input shape: an image with shape `[H, W, C]`
Output: a dictionary with integer keys corresponding to each level of the
feature pyramid. The size of the anchors at each level will be
`(H/strides[i] * W/strides[i] * len(scales) * len(aspect_ratios), 4)`.
""" # noqa: E501
def __init__(
self,
bounding_box_format,
sizes,
scales,
aspect_ratios,
strides,
clip_boxes=False,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
# aspect_ratio is a single list that is the same across all levels.
sizes, strides = self._format_sizes_and_strides(sizes, strides)
aspect_ratios = self._match_param_structure_to_sizes(
aspect_ratios, sizes
)
scales = self._match_param_structure_to_sizes(scales, sizes)
self.anchor_generators = {}
for k in sizes.keys():
self.anchor_generators[k] = _SingleAnchorGenerator(
bounding_box_format,
sizes[k],
scales[k],
aspect_ratios[k],
strides[k],
clip_boxes,
dtype=self.compute_dtype,
)
self.built = True
@staticmethod
def _format_sizes_and_strides(sizes, strides):
result_sizes = AnchorGenerator._ensure_param_is_levels_dict(
sizes, "sizes"
)
result_strides = AnchorGenerator._ensure_param_is_levels_dict(
strides, "strides"
)
if sorted(result_strides.keys()) != sorted(result_sizes.keys()):
raise ValueError(
"Expected sizes and strides to be either lists of"
"the same length, or dictionaries with the same keys. Received "
f"sizes={sizes}, strides={strides}"
)
return result_sizes, result_strides
@staticmethod
def _ensure_param_is_levels_dict(param, param_name):
"""Takes a param and its name, converts lists to dictionaries of levels.
For example, the list [1, 2] is converted to {0: 1, 1: 2}.
Raises:
ValueError: when param is not a dict, list or tuple.
"""
if isinstance(param, dict):
return param
if not isinstance(param, (list, tuple)):
raise ValueError(
f"Expected {param_name} to be a dict, list or tuple, received "
f"{param_name}={param}"
)
result = {}
for i in range(len(param)):
result[i] = param[i]
return result
@staticmethod
def _match_param_structure_to_sizes(params, sizes):
"""broadcast the params to match sizes."""
if not isinstance(sizes, dict):
raise ValueError(
"the structure of `sizes` must be a dict, "
f"received sizes={sizes}"
)
return {key: params for key in sizes.keys()}
def __call__(self, image=None, image_shape=None):
if image is None and image_shape is None:
raise ValueError(
"AnchorGenerator() requires `images` or `image_shape`."
)
if image is not None:
if len(image.shape) != 3:
raise ValueError(
"Expected `image` to be a Tensor of rank 3. Got "
f"image.shape.rank={len(image.shape)}"
)
image_shape = tuple(image.shape)
results = {}
for key, generator in self.anchor_generators.items():
results[key] = bounding_box.convert_format(
generator(image_shape),
source="yxyx",
target=self.bounding_box_format,
image_shape=image_shape,
)
return results
# TODO(tanzheny): consider having customized anchor offset.
class _SingleAnchorGenerator:
"""Internal utility to generate anchors for a single feature map in `yxyx`
format.
Example:
```python
anchor_gen = _SingleAnchorGenerator(32, [.5, 1., 2.], stride=16)
anchors = anchor_gen([512, 512, 3])
```
Input shape: the size of the image, `[H, W, C]`
Output shape: the size of anchors,
`(H/stride * W/stride * len(scales) * len(aspect_ratios), 4)`.
Args:
sizes: A single int represents the base anchor size. The anchor
height will be `anchor_size / sqrt(aspect_ratio)`, anchor width will be
`anchor_size * sqrt(aspect_ratio)`.
scales: A list/tuple, or a list/tuple of a list/tuple of positive
floats representing the actual anchor size to the base `anchor_size`.
aspect_ratios: a list/tuple of positive floats representing the ratio of
anchor width to anchor height.
stride: A single int represents the anchor stride size between center of
each anchor.
clip_boxes: Boolean to represent whether the anchor coordinates should be
clipped to the image size, defaults to `False`.
dtype: (Optional) The data type to use for the output anchors, defaults to
'float32'.
"""
def __init__(
self,
bounding_box_format,
sizes,
scales,
aspect_ratios,
stride,
clip_boxes=False,
dtype="float32",
):
self.sizes = sizes
self.scales = scales
self.aspect_ratios = aspect_ratios
self.stride = stride
self.clip_boxes = clip_boxes
self.dtype = dtype
def __call__(self, image_size):
image_height = image_size[0]
image_width = image_size[1]
aspect_ratios = ops.cast(self.aspect_ratios, "float32")
aspect_ratios_sqrt = ops.cast(ops.sqrt(aspect_ratios), dtype="float32")
anchor_size = ops.cast(self.sizes, "float32")
# [K]
anchor_heights = []
anchor_widths = []
for scale in self.scales:
anchor_size_t = anchor_size * scale
anchor_height = anchor_size_t / aspect_ratios_sqrt
anchor_width = anchor_size_t * aspect_ratios_sqrt
anchor_heights.append(anchor_height)
anchor_widths.append(anchor_width)
anchor_heights = ops.concatenate(anchor_heights, axis=0)
anchor_widths = ops.concatenate(anchor_widths, axis=0)
half_anchor_heights = ops.reshape(0.5 * anchor_heights, [1, 1, -1])
half_anchor_widths = ops.reshape(0.5 * anchor_widths, [1, 1, -1])
stride = self.stride
# make sure range of `cx` is within limit of `image_width` with
# `stride`, also for sizes where `image_width % stride != 0`.
# [W]
cx = ops.cast(
ops.arange(
0.5 * stride, math.ceil(image_width / stride) * stride, stride
),
"float32",
)
# make sure range of `cy` is within limit of `image_height` with
# `stride`, also for sizes where `image_height % stride != 0`.
# [H]
cy = ops.cast(
ops.arange(
0.5 * stride, math.ceil(image_height / stride) * stride, stride
),
"float32",
)
# [H, W]
cx_grid, cy_grid = ops.meshgrid(cx, cy)
# [H, W, 1]
cx_grid = ops.expand_dims(cx_grid, axis=-1)
cy_grid = ops.expand_dims(cy_grid, axis=-1)
y_min = ops.reshape(cy_grid - half_anchor_heights, (-1,))
y_max = ops.reshape(cy_grid + half_anchor_heights, (-1,))
x_min = ops.reshape(cx_grid - half_anchor_widths, (-1,))
x_max = ops.reshape(cx_grid + half_anchor_widths, (-1,))
# [H * W * K, 1]
y_min = ops.expand_dims(y_min, axis=-1)
y_max = ops.expand_dims(y_max, axis=-1)
x_min = ops.expand_dims(x_min, axis=-1)
x_max = ops.expand_dims(x_max, axis=-1)
if self.clip_boxes:
y_min = ops.maximum(ops.minimum(y_min, image_height), 0.0)
y_max = ops.maximum(ops.minimum(y_max, image_height), 0.0)
x_min = ops.maximum(ops.minimum(x_min, image_width), 0.0)
x_max = ops.maximum(ops.minimum(x_max, image_width), 0.0)
# [H * W * K, 4]
return ops.cast(
ops.concatenate([y_min, x_min, y_max, x_max], axis=-1), self.dtype
)
| keras-cv/keras_cv/layers/object_detection/anchor_generator.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/anchor_generator.py",
"repo_id": "keras-cv",
"token_count": 5038
} | 62 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.object_detection.rpn_label_encoder import _RpnLabelEncoder
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_keras_only
class RpnLabelEncoderTest(TestCase):
def test_rpn_label_encoder(self):
rpn_encoder = _RpnLabelEncoder(
anchor_format="xyxy",
ground_truth_box_format="xyxy",
positive_threshold=0.7,
negative_threshold=0.3,
positive_fraction=0.5,
samples_per_image=2,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant([[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5]])
gt_classes = tf.constant([2, 10, -1], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
box_targets, box_weights, cls_targets, cls_weights = rpn_encoder(
rois, gt_boxes, gt_classes
)
# all rois will be matched to the 2nd gt boxes, and encoded
expected_box_targets = (
tf.constant(
[
[0.5, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.5, -0.5, 0.0, 0.0],
[0.5, 0.5, 0.0, 0.0],
]
)
/ 0.1
)
self.assertAllClose(expected_box_targets, box_targets)
# only foreground and background classes
self.assertAllClose(tf.reduce_max(cls_targets), 1.0)
self.assertAllClose(tf.reduce_min(cls_targets), 0.0)
# all weights between 0 and 1
self.assertAllClose(tf.reduce_max(cls_weights), 1.0)
self.assertAllClose(tf.reduce_min(cls_weights), 0.0)
self.assertAllClose(tf.reduce_max(box_weights), 1.0)
self.assertAllClose(tf.reduce_min(box_weights), 0.0)
def test_rpn_label_encoder_multi_level(self):
rpn_encoder = _RpnLabelEncoder(
anchor_format="xyxy",
ground_truth_box_format="xyxy",
positive_threshold=0.7,
negative_threshold=0.3,
positive_fraction=0.5,
samples_per_image=2,
)
rois = {
2: tf.constant([[0, 0, 5, 5], [2.5, 2.5, 7.5, 7.5]]),
3: tf.constant([[5, 5, 10, 10], [7.5, 7.5, 12.5, 12.5]]),
}
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant([[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5]])
gt_classes = tf.constant([2, 10, -1], dtype=tf.float32)
gt_classes = gt_classes[..., tf.newaxis]
_, _, _, cls_weights = rpn_encoder(rois, gt_boxes, gt_classes)
# the 2nd level found 2 positive matches, the 3rd level found no match
expected_cls_weights = {
2: tf.constant([[0.0], [1.0]]),
3: tf.constant([[0.0], [1.0]]),
}
self.assertAllClose(expected_cls_weights[2], cls_weights[2])
self.assertAllClose(expected_cls_weights[3], cls_weights[3])
def test_rpn_label_encoder_batched(self):
rpn_encoder = _RpnLabelEncoder(
anchor_format="xyxy",
ground_truth_box_format="xyxy",
positive_threshold=0.7,
negative_threshold=0.3,
positive_fraction=0.5,
samples_per_image=2,
)
rois = tf.constant(
[
[0, 0, 5, 5],
[2.5, 2.5, 7.5, 7.5],
[5, 5, 10, 10],
[7.5, 7.5, 12.5, 12.5],
]
)
# the 3rd box will generate 0 IOUs and not sampled.
gt_boxes = tf.constant([[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5]])
gt_classes = tf.constant([2, 10, -1], dtype=tf.int32)
gt_classes = gt_classes[..., tf.newaxis]
rois = rois[tf.newaxis, ...]
gt_boxes = gt_boxes[tf.newaxis, ...]
gt_classes = gt_classes[tf.newaxis, ...]
box_targets, box_weights, cls_targets, cls_weights = rpn_encoder(
rois, gt_boxes, gt_classes
)
# all rois will be matched to the 2nd gt boxes, and encoded
expected_box_targets = (
tf.constant(
[
[0.5, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.5, -0.5, 0.0, 0.0],
[0.5, 0.5, 0.0, 0.0],
]
)
/ 0.1
)
expected_box_targets = expected_box_targets[tf.newaxis, ...]
self.assertAllClose(expected_box_targets, box_targets)
# only foreground and background classes
self.assertAllClose(tf.reduce_max(cls_targets), 1.0)
self.assertAllClose(tf.reduce_min(cls_targets), 0.0)
# all weights between 0 and 1
self.assertAllClose(tf.reduce_max(cls_weights), 1.0)
self.assertAllClose(tf.reduce_min(cls_weights), 0.0)
self.assertAllClose(tf.reduce_max(box_weights), 1.0)
self.assertAllClose(tf.reduce_min(box_weights), 0.0)
| keras-cv/keras_cv/layers/object_detection/rpn_label_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/rpn_label_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 3068
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.AutoContrast")
class AutoContrast(VectorizedBaseImageAugmentationLayer):
"""Performs the AutoContrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
"""
def __init__(
self,
value_range,
**kwargs,
):
super().__init__(**kwargs)
self.value_range = value_range
def augment_images(self, images, transformations=None, **kwargs):
original_images = images
images = preprocessing.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
low = tf.reduce_min(images, axis=(1, 2), keepdims=True)
high = tf.reduce_max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
result = tf.clip_by_value(images, 0.0, 255.0)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
result = tf.where(tf.math.is_nan(result), original_images, result)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
image, transformations=transformation, **kwargs
)
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
| keras-cv/keras_cv/layers/preprocessing/auto_contrast.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/auto_contrast.py",
"repo_id": "keras-cv",
"token_count": 1323
} | 64 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Some code in this file was inspired & adapted from `tensorflow_models`.
# Reference:
# https://github.com/tensorflow/models/blob/master/official/vision/ops/preprocess_ops.py
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.JitteredResize")
class JitteredResize(VectorizedBaseImageAugmentationLayer):
"""JitteredResize implements resize with scale distortion.
JitteredResize takes a three-step approach to size-distortion based image
augmentation. This technique is specifically tuned for object detection
pipelines. The layer takes an input of images and bounding boxes, both of
which may be ragged. It outputs a dense image tensor, ready to feed to a
model for training. As such this layer will commonly be the final step in an
augmentation pipeline.
The augmentation process is as follows:
The image is first scaled according to a randomly sampled scale factor. The
width and height of the image are then resized according to the sampled
scale. This is done to introduce noise into the local scale of features in
the image. A subset of the image is then cropped randomly according to
`crop_size`. This crop is then padded to be `target_size`. Bounding boxes
are translated and scaled according to the random scaling and random
cropping.
Args:
target_size: A tuple representing the output size of images.
scale_factor: A tuple of two floats or a `keras_cv.FactorSampler`. For
each augmented image a value is sampled from the provided range.
This factor is used to scale the input image.
To replicate the results of the MaskRCNN paper pass `(0.8, 1.25)`.
crop_size: (Optional) the size of the image to crop from the scaled
image, defaults to `target_size` when not provided.
bounding_box_format: The format of bounding boxes of input boxes.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
interpolation: String, the interpolation method, defaults to
`"bilinear"`. Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"area"`, `"lanczos3"`, `"lanczos5"`, `"gaussian"`,
`"mitchellcubic"`.
seed: (Optional) integer to use as the random seed.
Usage:
```python
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3)
# an example using crop size
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
crop_size=(250, 250),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3), but they were resized from a 250x250 crop.
```
""" # noqa: E501
def __init__(
self,
target_size,
scale_factor,
crop_size=None,
bounding_box_format=None,
interpolation="bilinear",
seed=None,
**kwargs,
):
super().__init__(**kwargs)
if not isinstance(target_size, tuple) or len(target_size) != 2:
raise ValueError(
"JitteredResize() expects `target_size` to be a tuple of two "
f"integers. Received `target_size={target_size}`"
)
crop_size = crop_size or target_size
self.interpolation = preprocessing_utils.get_interpolation(
interpolation
)
self.scale_factor = preprocessing_utils.parse_factor(
scale_factor,
min_value=0.0,
max_value=None,
param_name="scale_factor",
seed=seed,
)
self.crop_size = crop_size
self.target_size = target_size
self.bounding_box_format = bounding_box_format
self.seed = seed
self.force_output_dense_images = True
def compute_ragged_image_signature(self, images):
ragged_spec = tf.RaggedTensorSpec(
shape=list(self.target_size) + [images.shape[-1]],
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def get_random_transformation_batch(
self, batch_size, images=None, **kwargs
):
heights, widths = self._get_image_shape(images)
image_shapes = tf.cast(
tf.concat((heights, widths), axis=-1), dtype=tf.float32
)
scaled_sizes = tf.round(
image_shapes * self.scale_factor(shape=(batch_size, 1))
)
scales = tf.where(
tf.less(
scaled_sizes[..., 0] / image_shapes[..., 0],
scaled_sizes[..., 1] / image_shapes[..., 1],
),
scaled_sizes[..., 0] / image_shapes[..., 0],
scaled_sizes[..., 1] / image_shapes[..., 1],
)
scaled_sizes = tf.round(image_shapes * scales[..., tf.newaxis])
image_scales = scaled_sizes / image_shapes
max_offsets = scaled_sizes - self.crop_size
max_offsets = tf.where(
tf.less(max_offsets, 0), tf.zeros_like(max_offsets), max_offsets
)
offsets = max_offsets * self._random_generator.uniform(
shape=(batch_size, 2), minval=0, maxval=1, dtype=tf.float32
)
offsets = tf.cast(offsets, tf.int32)
return {
"image_scales": image_scales,
"scaled_sizes": scaled_sizes,
"offsets": offsets,
}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
scaled_sizes = transformation["scaled_sizes"]
offsets = transformation["offsets"]
transformation = {
"scaled_sizes": tf.expand_dims(scaled_sizes, axis=0),
"offsets": tf.expand_dims(offsets, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(
self, images, transformations, resize_method="bilinear", **kwargs
):
# unpackage augmentation arguments
scaled_sizes = transformations["scaled_sizes"]
offsets = transformations["offsets"]
inputs_for_resize_and_crop_single_image = {
"images": images,
"scaled_sizes": scaled_sizes,
"offsets": offsets,
}
scaled_images = tf.map_fn(
lambda x: self.resize_and_crop_single_image(
x, resize_method=resize_method
),
inputs_for_resize_and_crop_single_image,
fn_output_signature=tf.float32,
)
return tf.cast(scaled_images, self.compute_dtype)
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return self.augment_images(
segmentation_masks, transformations, resize_method="nearest"
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"Please provide a `bounding_box_format` when augmenting "
"bounding boxes with `JitteredResize()`."
)
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes = bounding_box.to_dense(bounding_boxes)
result = bounding_boxes.copy()
image_scales = tf.cast(
transformations["image_scales"], self.compute_dtype
)
offsets = tf.cast(transformations["offsets"], self.compute_dtype)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
images=raw_images,
source=self.bounding_box_format,
target="yxyx",
)
# Adjusts box coordinates based on image_scale and offset.
yxyx = bounding_boxes["boxes"]
yxyx *= tf.tile(image_scales, [1, 2])[..., tf.newaxis, :]
yxyx -= tf.tile(offsets, [1, 2])[..., tf.newaxis, :]
result["boxes"] = yxyx
result = bounding_box.clip_to_image(
result,
image_shape=self.target_size + (3,),
bounding_box_format="yxyx",
)
result = bounding_box.convert_format(
result,
image_shape=self.target_size + (3,),
source="yxyx",
target=self.bounding_box_format,
)
return result
def _get_image_shape(self, images):
if isinstance(images, tf.RaggedTensor):
heights = tf.reshape(images.row_lengths(), (-1, 1))
widths = tf.reshape(
tf.reduce_max(images.row_lengths(axis=2), 1), (-1, 1)
)
else:
batch_size = tf.shape(images)[0]
heights = tf.repeat(tf.shape(images)[H_AXIS], repeats=[batch_size])
heights = tf.reshape(heights, shape=(-1, 1))
widths = tf.repeat(tf.shape(images)[W_AXIS], repeats=[batch_size])
widths = tf.reshape(widths, shape=(-1, 1))
return tf.cast(heights, dtype=tf.int32), tf.cast(widths, dtype=tf.int32)
def resize_and_crop_single_image(self, inputs, resize_method="bilinear"):
image = inputs.get("images", None)
scaled_size = inputs.get("scaled_sizes", None)
offset = inputs.get("offsets", None)
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=resize_method
)
scaled_image = scaled_image[
offset[0] : offset[0] + self.crop_size[0],
offset[1] : offset[1] + self.crop_size[1],
:,
]
scaled_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, self.target_size[0], self.target_size[1]
)
return scaled_image
def get_config(self):
config = super().get_config()
config.update(
{
"target_size": self.target_size,
"scale_factor": self.scale_factor,
"crop_size": self.crop_size,
"bounding_box_format": self.bounding_box_format,
"interpolation": self.interpolation,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/jittered_resize.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/jittered_resize.py",
"repo_id": "keras-cv",
"token_count": 5232
} | 65 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class AddOneToInputs(BaseImageAugmentationLayer):
"""Add 1 to all image values, for testing purposes."""
def __init__(self):
super(AddOneToInputs, self).__init__()
def augment_image(self, image, transformation=None, **kwargs):
return image + 1
class RandomAugmentationPipelineTest(TestCase):
@parameterized.named_parameters(("1", 1), ("3", 3), ("5", 5))
def test_calls_layers_augmentations_per_image_times(
self, augmentations_per_image
):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer],
augmentations_per_image=augmentations_per_image,
rate=1.0,
)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + augmentations_per_image, os)
def test_supports_empty_layers_argument(self):
pipeline = layers.RandomAugmentationPipeline(
layers=[], augmentations_per_image=1, rate=1.0
)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs, os)
@pytest.mark.tf_keras_only
def test_calls_layers_augmentations_in_graph(self):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer], augmentations_per_image=3, rate=1.0
)
@tf.function()
def call_pipeline(xs):
return pipeline(xs)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = call_pipeline(xs)
self.assertAllClose(xs + 3, os)
@parameterized.named_parameters(("1", 1), ("3", 3), ("5", 5))
def test_calls_layers_augmentations_per_image_times_single_image(
self, augmentations_per_image
):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer],
augmentations_per_image=augmentations_per_image,
rate=1.0,
)
xs = tf.random.uniform((5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + augmentations_per_image, os)
@parameterized.named_parameters(("1", 1), ("3", 3), ("5", 5))
def test_respects_rate(self, augmentations_per_image):
layer = AddOneToInputs()
pipeline = layers.RandomAugmentationPipeline(
layers=[layer],
augmentations_per_image=augmentations_per_image,
rate=0.0,
)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs, os)
| keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline_test.py",
"repo_id": "keras-cv",
"token_count": 1463
} | 66 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.random_crop import RandomCrop
from keras_cv.tests.test_case import TestCase
class RandomCropTest(TestCase):
@parameterized.named_parameters(
("random_crop_4_by_6", 4, 6),
("random_crop_3_by_2", 3, 2),
("random_crop_full_height", 5, 2),
("random_crop_full_width", 3, 8),
)
def test_output_shape(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
input = tf.random.uniform(
shape=[num_samples, orig_height, orig_width, channels],
)
layer = RandomCrop(expected_height, expected_width)
actual_output = layer(input)
expected_output = tf.random.uniform(
shape=(
num_samples,
expected_height,
expected_width,
channels,
),
)
self.assertAllEqual(expected_output.shape, actual_output.shape)
def test_input_smaller_than_crop_box(self):
np.random.seed(1337)
height, width = 10, 8
inp = np.random.random((12, 3, 3, 3))
layer = RandomCrop(height, width)
actual_output = layer(inp)
# In this case, output should equal resizing with crop_to_aspect
# ratio.
resizing_layer = cv_layers.Resizing(height, width)
expected_output = resizing_layer(inp)
self.assertAllEqual(expected_output, actual_output)
@pytest.mark.skip(reason="need to update tests for keras 3")
def test_training_with_mock(self):
np.random.seed(1337)
batch_size = 12
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
# manually compute transformations which shift height_offset and
# width_offset respectively
tops = np.ones((batch_size, 1)) * (height_offset / (5 - height))
lefts = np.ones((batch_size, 1)) * (width_offset / (8 - width))
transformations = {"tops": tops, "lefts": lefts}
layer = RandomCrop(height, width)
with unittest.mock.patch.object(
layer,
"get_random_transformation_batch",
return_value=transformations,
):
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=True)
expected_output = inp[
:,
height_offset : (height_offset + height),
width_offset : (width_offset + width),
:,
]
self.assertAllClose(expected_output, actual_output)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
inp = np.random.random((12, 8, 16, 3))
layer = RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_unbatched_image(self):
np.random.seed(1337)
inp = np.random.random((16, 16, 3))
# manually compute transformations which shift 2 pixels
mock_offset = np.ones(shape=(1, 1), dtype="float32") * 0.25
layer = RandomCrop(8, 8)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_offset,
):
actual_output = layer(inp, training=True)
self.assertAllClose(inp[2:10, 2:10, :], actual_output)
def test_batched_input(self):
np.random.seed(1337)
inp = np.random.random((20, 16, 16, 3))
# manually compute transformations which shift 2 pixels
mock_offset = np.ones(shape=(20, 1), dtype="float32") * 2 / (16 - 8)
layer = RandomCrop(8, 8)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_offset,
):
actual_output = layer(inp, training=True)
self.assertAllClose(inp[:, 2:10, 2:10, :], actual_output)
def test_compute_ragged_output_signature(self):
inputs = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
layer = RandomCrop(2, 2)
output = layer(inputs)
output_signature = layer.compute_ragged_image_signature(inputs).shape
self.assertAllEqual(output.shape[1:], output_signature)
def test_augment_bounding_boxes_crop(self):
orig_height, orig_width = 512, 512
height, width = 100, 200
input_image = np.random.random((orig_height, orig_width, 3)).astype(
np.float32
)
bboxes = {
"boxes": np.array([[200, 200, 400, 400]]),
"classes": np.array([1]),
}
input = {"images": input_image, "bounding_boxes": bboxes}
# for top = 300 and left = 305
height_offset = 300
width_offset = 305
tops = np.ones((1, 1)) * (height_offset / (orig_height - height))
lefts = np.ones((1, 1)) * (width_offset / (orig_width - width))
transformations = {"tops": tops, "lefts": lefts}
layer = RandomCrop(
height=height, width=width, bounding_box_format="xyxy"
)
with unittest.mock.patch.object(
layer,
"get_random_transformation_batch",
return_value=transformations,
):
output = layer(input)
expected_output = np.asarray(
[[0.0, 0.0, 95.0, 100.0]],
)
self.assertAllClose(expected_output, output["bounding_boxes"]["boxes"])
def test_augment_bounding_boxes_resize(self):
input_image = np.random.random((256, 256, 3)).astype(np.float32)
bboxes = {
"boxes": np.array([[100, 100, 200, 200]]),
"classes": np.array([1]),
}
input = {"images": input_image, "bounding_boxes": bboxes}
layer = RandomCrop(height=512, width=512, bounding_box_format="xyxy")
output = layer(input)
expected_output = np.asarray(
[[200.0, 200.0, 400.0, 400.0]],
)
self.assertAllClose(expected_output, output["bounding_boxes"]["boxes"])
@pytest.mark.tf_only
def test_in_tf_function(self):
np.random.seed(1337)
inp = np.random.random((20, 16, 16, 3))
mock_offset = np.ones(shape=(20, 1), dtype="float32") * 2 / (16 - 8)
layer = RandomCrop(8, 8)
@tf.function
def augment(x):
return layer(x, training=True)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=mock_offset,
):
actual_output = augment(inp)
self.assertAllClose(inp[:, 2:10, 2:10, :], actual_output)
def test_random_crop_on_batched_images_independently(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = RandomCrop(height=25, width=25)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_random_crop_on_batched_ragged_images_and_bounding_boxes(self):
images = tf.ragged.constant(
[np.ones((8, 8, 3)), np.ones((4, 8, 3))], dtype="float32"
)
boxes = {
"boxes": tf.ragged.stack(
[
np.ones((3, 4), dtype="float32"),
np.ones((3, 4), dtype="float32"),
],
),
"classes": tf.ragged.stack(
[
np.ones((3,), dtype="float32"),
np.ones((3,), dtype="float32"),
],
),
}
inputs = {"images": images, "bounding_boxes": boxes}
layer = RandomCrop(height=2, width=2, bounding_box_format="xyxy")
results = layer(inputs)
self.assertTrue(isinstance(results["images"], tf.Tensor))
self.assertTrue(
isinstance(results["bounding_boxes"]["boxes"], tf.RaggedTensor)
)
self.assertTrue(
isinstance(results["bounding_boxes"]["classes"], tf.RaggedTensor)
)
def test_config_with_custom_name(self):
layer = RandomCrop(5, 5, name="image_preproc")
config = layer.get_config()
layer_1 = RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = RandomCrop(2, 2)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = RandomCrop(2, 2, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_config(self):
layer = RandomCrop(height=2, width=3, bounding_box_format="xyxy")
config = layer.get_config()
self.assertEqual(config["height"], 2)
self.assertEqual(config["width"], 3)
self.assertEqual(config["bounding_box_format"], "xyxy")
| keras-cv/keras_cv/layers/preprocessing/random_crop_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop_test.py",
"repo_id": "keras-cv",
"token_count": 4709
} | 67 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class VectorizedRandomAddLayer(VectorizedBaseImageAugmentationLayer):
def __init__(self, add_range=(0.0, 1.0), fixed_value=None, **kwargs):
super().__init__(**kwargs)
self.add_range = add_range
self.fixed_value = fixed_value
def augment_ragged_image(self, image, transformation, **kwargs):
return image + transformation[None, None]
def get_random_transformation_batch(self, batch_size, **kwargs):
if self.fixed_value:
return tf.ones((batch_size,)) * self.fixed_value
return self._random_generator.uniform(
(batch_size,), minval=self.add_range[0], maxval=self.add_range[1]
)
def augment_images(self, images, transformations, **kwargs):
return images + transformations[:, None, None, None]
def augment_labels(self, labels, transformations, **kwargs):
return labels + transformations[:, None]
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return {
"boxes": bounding_boxes["boxes"] + transformations[:, None, None],
"classes": bounding_boxes["classes"] + transformations[:, None],
}
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints + transformations[:, None, None]
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks + transformations[:, None, None, None]
TF_ALL_TENSOR_TYPES = (tf.Tensor, tf.RaggedTensor, tf.SparseTensor)
class VectorizedAssertionLayer(VectorizedBaseImageAugmentationLayer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def augment_ragged_image(
self,
image,
label=None,
bounding_boxes=None,
keypoints=None,
segmentation_mask=None,
transformation=None,
**kwargs
):
assert isinstance(image, TF_ALL_TENSOR_TYPES)
assert isinstance(label, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(keypoints, TF_ALL_TENSOR_TYPES)
assert isinstance(segmentation_mask, TF_ALL_TENSOR_TYPES)
assert isinstance(transformation, TF_ALL_TENSOR_TYPES)
return image
def get_random_transformation_batch(
self,
batch_size,
images=None,
labels=None,
bounding_boxes=None,
keypoints=None,
segmentation_masks=None,
**kwargs
):
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(keypoints, TF_ALL_TENSOR_TYPES)
assert isinstance(segmentation_masks, TF_ALL_TENSOR_TYPES)
return self._random_generator.uniform((batch_size,))
def augment_images(
self,
images,
transformations=None,
bounding_boxes=None,
labels=None,
**kwargs
):
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
return images
def augment_labels(
self,
labels,
transformations=None,
bounding_boxes=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return labels
def augment_bounding_boxes(
self,
bounding_boxes,
transformations=None,
labels=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return bounding_boxes
def augment_keypoints(
self,
keypoints,
transformations=None,
labels=None,
bounding_boxes=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(keypoints, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return keypoints
def augment_segmentation_masks(
self,
segmentation_masks,
transformations=None,
labels=None,
bounding_boxes=None,
images=None,
raw_images=None,
**kwargs
):
assert isinstance(segmentation_masks, TF_ALL_TENSOR_TYPES)
assert isinstance(transformations, TF_ALL_TENSOR_TYPES)
assert isinstance(labels, TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["boxes"], TF_ALL_TENSOR_TYPES)
assert isinstance(bounding_boxes["classes"], TF_ALL_TENSOR_TYPES)
assert isinstance(images, TF_ALL_TENSOR_TYPES)
assert isinstance(raw_images, TF_ALL_TENSOR_TYPES)
return segmentation_masks
class VectorizedBaseImageAugmentationLayerTest(TestCase):
def test_augment_single_image(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer(image)
self.assertAllClose(image + 2.0, output)
def test_augment_dict_return_type(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
output = add_layer({"images": image})
self.assertIsInstance(output, dict)
def test_augment_casts_dtypes(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
images = np.ones((2, 8, 8, 3), dtype="uint8")
output = add_layer(images)
self.assertAllClose(
np.ones((2, 8, 8, 3), dtype="float32") * 3.0, output
)
def test_augment_batch_images(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
output = add_layer(images)
diff = ops.convert_to_numpy(output) - images
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(diff[0], diff[1])
def test_augment_image_and_label(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_image_and_target(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype("float32")
label = np.random.random(size=(1,)).astype("float32")
output = add_layer({"images": image, "targets": label})
expected_output = {"images": image + 2.0, "targets": label + 2.0}
self.assertAllClose(output, expected_output)
def test_augment_batch_images_and_targets(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
targets = np.random.random(size=(2, 1)).astype("float32")
output = add_layer({"images": images, "targets": targets})
image_diff = ops.convert_to_numpy(output["images"]) - images
label_diff = ops.convert_to_numpy(output["targets"]) - targets
# Make sure the first image and second image get different augmentation
self.assertNotAllClose(image_diff[0], image_diff[1])
self.assertNotAllClose(label_diff[0], label_diff[1])
def test_augment_leaves_extra_dict_entries_unmodified(self):
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
images = np.random.random(size=(8, 8, 3)).astype("float32")
timestamps = np.array(123123123)
inputs = {"images": images, "timestamps": timestamps}
output = add_layer(inputs)
self.assertAllEqual(output["timestamps"], timestamps)
def test_augment_ragged_images(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
result = add_layer(images)
self.assertAllClose(images + 0.5, result)
def test_augment_image_and_localization_data(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
images = np.random.random(size=(8, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(8, 3, 4)).astype("float32"),
"classes": np.random.random(size=(8, 3)).astype("float32"),
}
keypoints = np.random.random(size=(8, 5, 2)).astype("float32")
segmentation_mask = np.random.random(size=(8, 8, 8, 1)).astype(
"float32"
)
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_mask,
}
)
expected_output = {
"images": images + 2.0,
"bounding_boxes": bounding_box.to_dense(
{
"boxes": bounding_boxes["boxes"] + 2.0,
"classes": bounding_boxes["classes"] + 2.0,
}
),
"keypoints": keypoints + 2.0,
"segmentation_masks": segmentation_mask + 2.0,
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(output["images"], expected_output["images"])
self.assertAllClose(output["keypoints"], expected_output["keypoints"])
self.assertAllClose(
output["bounding_boxes"]["boxes"],
expected_output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
output["bounding_boxes"]["classes"],
expected_output["bounding_boxes"]["classes"],
)
self.assertAllClose(
output["segmentation_masks"], expected_output["segmentation_masks"]
)
def test_augment_batch_image_and_localization_data(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
output = add_layer(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
# the test finishes here for the non-tensorflow backends.
if (
getattr(keras.config, "backend", lambda: "tensorflow")()
!= "tensorflow"
):
return
@tf.function
def in_tf_function(inputs):
return add_layer(inputs)
output = in_tf_function(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
@pytest.mark.tf_only
def test_augment_all_data_in_tf_function(self):
add_layer = VectorizedRandomAddLayer()
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
@tf.function
def in_tf_function(inputs):
return add_layer(inputs)
output = in_tf_function(
{
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
bounding_boxes_diff = (
output["bounding_boxes"]["boxes"] - bounding_boxes["boxes"]
)
keypoints_diff = output["keypoints"] - keypoints
segmentation_mask_diff = (
output["segmentation_masks"] - segmentation_masks
)
self.assertNotAllClose(bounding_boxes_diff[0], bounding_boxes_diff[1])
self.assertNotAllClose(keypoints_diff[0], keypoints_diff[1])
self.assertNotAllClose(
segmentation_mask_diff[0], segmentation_mask_diff[1]
)
def test_augment_unbatched_all_data(self):
add_layer = VectorizedRandomAddLayer(fixed_value=2.0)
images = np.random.random(size=(8, 8, 3)).astype("float32")
bounding_boxes = {
"boxes": np.random.random(size=(3, 4)).astype("float32"),
"classes": np.random.random(size=(3)).astype("float32"),
}
keypoints = np.random.random(size=(5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(8, 8, 1)).astype("float32")
input = {
"images": images,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
output = add_layer(input, training=True)
self.assertAllClose(output["images"], images + 2.0)
self.assertAllClose(output["keypoints"], keypoints + 2.0)
self.assertAllClose(
output["bounding_boxes"]["boxes"],
np.squeeze(bounding_boxes["boxes"]) + 2.0,
)
self.assertAllClose(
output["bounding_boxes"]["classes"],
np.squeeze(bounding_boxes["classes"]) + 2.0,
)
self.assertAllClose(
output["segmentation_masks"], segmentation_masks + 2.0
)
def test_augment_all_data_for_assertion(self):
images = np.random.random(size=(2, 8, 8, 3)).astype("float32")
labels = np.squeeze(np.eye(10)[np.array([0, 1]).reshape(-1)])
bounding_boxes = {
"boxes": np.random.random(size=(2, 3, 4)).astype("float32"),
"classes": np.random.random(size=(2, 3)).astype("float32"),
}
keypoints = np.random.random(size=(2, 5, 2)).astype("float32")
segmentation_masks = np.random.random(size=(2, 8, 8, 1)).astype(
"float32"
)
assertion_layer = VectorizedAssertionLayer()
_ = assertion_layer(
{
"images": images,
"labels": labels,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
# assertion is at VectorizedAssertionLayer's methods
def test_augment_all_data_with_ragged_images_for_assertion(self):
images = tf.ragged.stack(
[
tf.random.uniform(shape=(8, 8, 3)),
tf.random.uniform(shape=(16, 8, 3)),
]
)
labels = tf.constant(
np.squeeze(np.eye(10)[np.array([0, 1]).reshape(-1)])
)
bounding_boxes = {
"boxes": tf.random.uniform(shape=(2, 3, 4)),
"classes": tf.random.uniform(shape=(2, 3)),
}
keypoints = tf.random.uniform(shape=(2, 5, 2))
segmentation_masks = tf.random.uniform(shape=(2, 8, 8, 1))
assertion_layer = VectorizedAssertionLayer()
_ = assertion_layer(
{
"images": images,
"labels": labels,
"bounding_boxes": bounding_boxes,
"keypoints": keypoints,
"segmentation_masks": segmentation_masks,
}
)
# assertion is at VectorizedAssertionLayer's methods
def test_converts_ragged_to_dense_images(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
add_layer.force_output_dense_images = True
result = add_layer(images)
self.assertTrue(isinstance(result, tf.Tensor))
def test_converts_ragged_to_dense_segmentation_masks(self):
images = tf.ragged.stack(
[
np.random.random(size=(8, 8, 3)).astype("float32"),
np.random.random(size=(16, 8, 3)).astype("float32"),
]
)
segmentation_masks = tf.ragged.stack(
[
np.random.randint(0, 10, size=(8, 8, 1)).astype("float32"),
np.random.randint(0, 10, size=(16, 8, 1)).astype("float32"),
]
)
add_layer = VectorizedRandomAddLayer(fixed_value=0.5)
add_layer.force_output_dense_segmentation_masks = True
result = add_layer(
{"images": images, "segmentation_masks": segmentation_masks}
)
self.assertTrue(isinstance(result["segmentation_masks"], tf.Tensor))
def test_in_tf_data_pipeline(self):
images = np.random.randn(4, 100, 100, 3).astype("float32")
train_ds = tf.data.Dataset.from_tensor_slices(images)
train_ds = train_ds.map(lambda x: {"images": x})
train_ds = train_ds.map(
VectorizedRandomAddLayer(fixed_value=2.0)
).batch(4)
for output in train_ds.take(1):
pass
self.assertTrue(isinstance(output["images"], tf.Tensor))
self.assertAllClose(output["images"], images + 2.0)
| keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer_test.py",
"repo_id": "keras-cv",
"token_count": 9932
} | 68 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import wrap_angle_radians
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomFlip")
class GlobalRandomFlip(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which flips point clouds and bounding boxes with
respect to the specified axis during training.
This layer will flip the whole scene with respect to the specified axes.
Note that this layer currently only supports flipping over the Y axis.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Args:
flip_x: whether to flip over the X axis, defaults to False.
flip_y: whether to flip over the Y axis, defaults to True.
flip_z: whether to flip over the Z axis, defaults to False.
"""
def __init__(self, flip_x=False, flip_y=True, flip_z=False, **kwargs):
if flip_x or flip_z:
raise ValueError(
"GlobalRandomFlip currently only supports flipping over the Y "
f"axis. Received flip_x={flip_x}, flip_y={flip_y}, "
f"flip_z={flip_z}."
)
if not (flip_x or flip_y or flip_z):
raise ValueError("GlobalRandomFlip must flip over at least 1 axis.")
self.flip_x = flip_x
self.flip_y = flip_y
self.flip_z = flip_z
super().__init__(**kwargs)
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_clouds_y = -point_clouds[..., 1:2]
point_clouds = tf.concat(
[point_clouds[..., 0:1], point_clouds_y, point_clouds[..., 2:]],
axis=-1,
)
# Flip boxes.
bounding_boxes_y = -bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.Y : CENTER_XYZ_DXDYDZ_PHI.Y + 1
]
bounding_boxes_xyz = tf.concat(
[
bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.X : CENTER_XYZ_DXDYDZ_PHI.X + 1
],
bounding_boxes_y,
bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.Z : CENTER_XYZ_DXDYDZ_PHI.Z + 1
],
],
axis=-1,
)
# Compensate rotation.
bounding_boxes_heading = wrap_angle_radians(
-bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.PHI : CENTER_XYZ_DXDYDZ_PHI.PHI + 1
]
)
bounding_boxes = tf.concat(
[
bounding_boxes_xyz,
bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.DX : CENTER_XYZ_DXDYDZ_PHI.DZ + 1
],
bounding_boxes_heading,
bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.CLASS :],
],
axis=-1,
)
return (point_clouds, bounding_boxes)
def get_config(self):
return {
"flip_x": self.flip_x,
"flip_y": self.flip_y,
"flip_z": self.flip_z,
}
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip.py",
"repo_id": "keras-cv",
"token_count": 1981
} | 69 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import ops
from keras_cv.layers.vit_det_layers import AddPositionalEmbedding
from keras_cv.layers.vit_det_layers import MultiHeadAttentionWithRelativePE
from keras_cv.layers.vit_det_layers import ViTDetPatchingAndEmbedding
from keras_cv.layers.vit_det_layers import WindowedTransformerEncoder
from keras_cv.tests.test_case import TestCase
class TestViTDetLayers(TestCase):
def test_multi_head_attention_with_relative_pe(self):
attention_with_rel_pe = MultiHeadAttentionWithRelativePE(
num_heads=16,
key_dim=1280 // 16,
use_bias=True,
input_size=(64, 64),
)
x = np.ones(shape=(1, 64, 64, 1280))
x_out = ops.convert_to_numpy(attention_with_rel_pe(x))
self.assertEqual(x_out.shape, (1, 64, 64, 1280))
def test_windowed_transformer_encoder(self):
windowed_transformer_encoder = WindowedTransformerEncoder(
project_dim=1280,
mlp_dim=1280 * 4,
num_heads=16,
use_bias=True,
use_rel_pos=True,
window_size=14,
input_size=(64, 64),
)
x = np.ones((1, 64, 64, 1280))
x_out = ops.convert_to_numpy(windowed_transformer_encoder(x))
self.assertEqual(x_out.shape, (1, 64, 64, 1280))
self.assertAllClose(x_out, np.ones_like(x_out))
def test_vit_patching_and_embedding(self):
vit_patching_and_embedding = ViTDetPatchingAndEmbedding()
x = np.ones((1, 1024, 1024, 3))
x_out = vit_patching_and_embedding(x)
self.assertEqual(x_out.shape, (1, 64, 64, 768))
def test_add_positional_embedding(self):
add_positional_embedding = AddPositionalEmbedding(
img_size=1024, patch_size=16, embed_dim=256
)
x = np.ones((1, 64, 64, 256))
x_out = add_positional_embedding(x)
self.assertEqual(x_out.shape, (1, 64, 64, 256))
| keras-cv/keras_cv/layers/vit_det_layers_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/vit_det_layers_test.py",
"repo_id": "keras-cv",
"token_count": 1093
} | 70 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.losses import BinaryPenaltyReducedFocalCrossEntropy
from keras_cv.tests.test_case import TestCase
class BinaryPenaltyReducedFocalLossTest(TestCase):
def test_output_shape(self):
y_true = (np.random.uniform(size=[2, 5], low=0, high=2),)
y_pred = np.random.uniform(size=[2, 5], low=0, high=1)
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
self.assertAllEqual(focal_loss(y_true, y_pred).shape, [])
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=2)
y_pred = np.random.uniform(size=[2, 5], low=0, high=2)
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="none")
self.assertAllEqual(
[2, 5],
focal_loss(y_true, y_pred).shape,
)
def test_output_with_pos_label_pred(self):
y_true = np.array([1.0])
y_pred = np.array([1.0])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
self.assertAllClose(0.0, focal_loss(y_true, y_pred))
def test_output_with_pos_label_neg_pred(self):
y_true = np.array([1.0])
y_pred = np.array([np.exp(-1.0)])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
# (1-1/e)^2 * log(1/e)
self.assertAllClose(
np.square(1 - np.exp(-1.0)), focal_loss(y_true, y_pred)
)
def test_output_with_neg_label_pred(self):
y_true = np.array([0.0])
y_pred = np.array([0.0])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
self.assertAllClose(0.0, focal_loss(y_true, y_pred))
def test_output_with_neg_label_pos_pred(self):
y_true = np.array([0.0])
y_pred = np.array([1.0 - np.exp(-1.0)])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
# (1-0)^4 * (1-1/e)^2 * log(1/e)
self.assertAllClose(
np.square(1 - np.exp(-1.0)), focal_loss(y_true, y_pred)
)
def test_output_with_weak_label_pos_pred(self):
y_true = np.array([0.5])
y_pred = np.array([1.0 - np.exp(-1.0)])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(
beta=2.0, reduction="sum"
)
# (1-0.5)^2 * (1-1/e)^2 * log(1/e)
self.assertAllClose(
0.25 * np.square(1 - np.exp(-1.0)), focal_loss(y_true, y_pred)
)
def test_output_with_sample_weight(self):
y_true = np.array([0.0])
y_pred = np.array([1.0 - np.exp(-1.0)])
sample_weight = np.array([0.5])
focal_loss = BinaryPenaltyReducedFocalCrossEntropy(reduction="sum")
# (1-0)^4 * (1-1/e)^2 * log(1/e)
self.assertAllClose(
0.5 * np.square(1 - np.exp(-1.0)),
focal_loss(y_true, y_pred, sample_weight=sample_weight),
)
| keras-cv/keras_cv/losses/penalty_reduced_focal_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/penalty_reduced_focal_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1593
} | 71 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All Backbone presets"""
from keras_cv.models.backbones.csp_darknet import csp_darknet_backbone_presets
from keras_cv.models.backbones.densenet import densenet_backbone_presets
from keras_cv.models.backbones.efficientnet_lite import (
efficientnet_lite_backbone_presets,
)
from keras_cv.models.backbones.efficientnet_v1 import (
efficientnet_v1_backbone_presets,
)
from keras_cv.models.backbones.efficientnet_v2 import (
efficientnet_v2_backbone_presets,
)
from keras_cv.models.backbones.mobilenet_v3 import mobilenet_v3_backbone_presets
from keras_cv.models.backbones.resnet_v1 import resnet_v1_backbone_presets
from keras_cv.models.backbones.resnet_v2 import resnet_v2_backbone_presets
from keras_cv.models.backbones.vit_det import vit_det_backbone_presets
from keras_cv.models.object_detection.yolo_v8 import yolo_v8_backbone_presets
backbone_presets_no_weights = {
**resnet_v1_backbone_presets.backbone_presets_no_weights,
**resnet_v2_backbone_presets.backbone_presets_no_weights,
**mobilenet_v3_backbone_presets.backbone_presets_no_weights,
**csp_darknet_backbone_presets.backbone_presets_no_weights,
**efficientnet_v1_backbone_presets.backbone_presets_no_weights,
**efficientnet_v2_backbone_presets.backbone_presets_no_weights,
**densenet_backbone_presets.backbone_presets_no_weights,
**efficientnet_lite_backbone_presets.backbone_presets_no_weights,
**yolo_v8_backbone_presets.backbone_presets_no_weights,
**vit_det_backbone_presets.backbone_presets_no_weights,
}
backbone_presets_with_weights = {
**resnet_v1_backbone_presets.backbone_presets_with_weights,
**resnet_v2_backbone_presets.backbone_presets_with_weights,
**mobilenet_v3_backbone_presets.backbone_presets_with_weights,
**csp_darknet_backbone_presets.backbone_presets_with_weights,
**efficientnet_v1_backbone_presets.backbone_presets_with_weights,
**efficientnet_v2_backbone_presets.backbone_presets_with_weights,
**densenet_backbone_presets.backbone_presets_with_weights,
**efficientnet_lite_backbone_presets.backbone_presets_with_weights,
**yolo_v8_backbone_presets.backbone_presets_with_weights,
**vit_det_backbone_presets.backbone_presets_with_weights,
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1078
} | 72 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet Lite backbone model.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
- [Based on the original EfficientNet Lite's](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
""" # noqa: E501
import copy
import math
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
BN_AXIS = 3
@keras.saving.register_keras_serializable(package="keras_cv.models")
class EfficientNetLiteBackbone(Backbone):
"""Instantiates the EfficientNetLite architecture using given scaling
coefficients.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
- [Based on the original EfficientNet Lite's](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
Args:
include_rescaling: whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections. The
default value is set to 0.2.
depth_divisor: integer, a unit of network width. The default value
is set to 8.
activation: activation function.
input_shape: optional shape tuple,
It should have exactly 3 inputs channels.
input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`)
to use as image input for the model.
Usage:
```python
# Construct an EfficientNetLite from a preset:
efficientnet = models.EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b0"
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
# Alternatively, you can also customize the EfficientNetLite architecture:
model = EfficientNetLiteBackbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
images = np.ones((1, 256, 256, 3))
outputs = model.predict(images)
```
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
width_coefficient,
depth_coefficient,
stackwise_kernel_sizes,
stackwise_num_repeats,
stackwise_input_filters,
stackwise_output_filters,
stackwise_expansion_ratios,
stackwise_strides,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
input_shape=(None, None, 3),
input_tensor=None,
activation="relu6",
**kwargs,
):
img_input = utils.parse_model_inputs(input_shape, input_tensor)
# Build stem
x = img_input
if include_rescaling:
# Use common rescaling strategy across keras_cv
x = keras.layers.Rescaling(1.0 / 255.0)(x)
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, 3), name="stem_conv_pad"
)(x)
x = keras.layers.Conv2D(
32,
3,
strides=2,
padding="valid",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name="stem_conv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name="stem_bn")(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
# Build blocks
block_id = 0
blocks = float(sum(stackwise_num_repeats))
pyramid_level_inputs = []
for i in range(len(stackwise_kernel_sizes)):
num_repeats = stackwise_num_repeats[i]
input_filters = stackwise_input_filters[i]
output_filters = stackwise_output_filters[i]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(
filters=input_filters,
width_coefficient=width_coefficient,
depth_divisor=depth_divisor,
)
output_filters = round_filters(
filters=output_filters,
width_coefficient=width_coefficient,
depth_divisor=depth_divisor,
)
if i == 0 or i == (len(stackwise_kernel_sizes) - 1):
repeats = num_repeats
else:
repeats = round_repeats(
repeats=num_repeats,
depth_coefficient=depth_coefficient,
)
strides = stackwise_strides[i]
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
strides = 1
input_filters = output_filters
if strides != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# 97 is the start of the lowercase alphabet.
letter_identifier = chr(j + 97)
x = apply_efficient_net_lite_block(
inputs=x,
filters_in=input_filters,
filters_out=output_filters,
kernel_size=stackwise_kernel_sizes[i],
strides=strides,
expand_ratio=stackwise_expansion_ratios[i],
activation=activation,
dropout_rate=drop_connect_rate * block_id / blocks,
name="block{}{}_".format(i + 1, letter_identifier),
)
block_id += 1
# Build top
x = keras.layers.Conv2D(
1280,
1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name="top_conv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name="top_bn")(x)
x = keras.layers.Activation(activation, name="top_activation")(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# Create model.
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.include_rescaling = include_rescaling
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.depth_divisor = depth_divisor
self.activation = activation
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_kernel_sizes = stackwise_kernel_sizes
self.stackwise_num_repeats = stackwise_num_repeats
self.stackwise_input_filters = stackwise_input_filters
self.stackwise_output_filters = stackwise_output_filters
self.stackwise_expansion_ratios = stackwise_expansion_ratios
self.stackwise_strides = stackwise_strides
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
"width_coefficient": self.width_coefficient,
"depth_coefficient": self.depth_coefficient,
"dropout_rate": self.dropout_rate,
"drop_connect_rate": self.drop_connect_rate,
"depth_divisor": self.depth_divisor,
"activation": self.activation,
"input_tensor": self.input_tensor,
"input_shape": self.input_shape[1:],
"stackwise_kernel_sizes": self.stackwise_kernel_sizes,
"stackwise_num_repeats": self.stackwise_num_repeats,
"stackwise_input_filters": self.stackwise_input_filters,
"stackwise_output_filters": self.stackwise_output_filters,
"stackwise_expansion_ratios": self.stackwise_expansion_ratios,
"stackwise_strides": self.stackwise_strides,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
def conv_kernel_initializer(scale=2.0):
return keras.initializers.VarianceScaling(
scale=scale, mode="fan_out", distribution="truncated_normal"
)
def round_filters(filters, depth_divisor, width_coefficient):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(
depth_divisor,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += depth_divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def apply_efficient_net_lite_block(
inputs,
activation="relu6",
dropout_rate=0.0,
name=None,
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
):
"""An inverted residual block, without SE phase.
Args:
inputs: input tensor.
activation: activation function.
dropout_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
Returns:
output tensor for the block.
""" # noqa: E501
if name is None:
name = f"block_{keras.backend.get_uid('block_')}_"
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = keras.layers.Conv2D(
filters,
1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "expand_conv",
)(inputs)
x = keras.layers.BatchNormalization(
axis=BN_AXIS, name=name + "expand_bn"
)(x)
x = keras.layers.Activation(
activation, name=name + "expand_activation"
)(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, kernel_size),
name=name + "dwconv_pad",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = keras.layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=conv_kernel_initializer(),
name=name + "dwconv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name=name + "bn")(x)
x = keras.layers.Activation(activation, name=name + "activation")(x)
# Output phase
x = keras.layers.Conv2D(
filters_out,
1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "project_conv",
)(x)
x = keras.layers.BatchNormalization(axis=BN_AXIS, name=name + "project_bn")(
x
)
if strides == 1 and filters_in == filters_out:
if dropout_rate > 0:
x = keras.layers.Dropout(
dropout_rate, noise_shape=(None, 1, 1, 1), name=name + "drop"
)(x)
x = keras.layers.Add(name=name + "add")([x, inputs])
return x
| keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone.py",
"repo_id": "keras-cv",
"token_count": 6056
} | 73 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import numpy as np
import pytest
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet50Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class ResNetPresetSmokeTest(TestCase):
"""
A smoke test for ResNet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_backbone_output(self):
model = ResNetBackbone.from_preset("resnet50")
model(self.input_batch)
def test_backbone_output_with_weights(self):
model = ResNetBackbone.from_preset("resnet50_imagenet")
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model(np.ones(shape=(1, 512, 512, 3)))
expected = [0.0, 0.0, 0.0, 0.05175382, 0.0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs[0, 0, 0, :5]),
expected,
atol=0.01,
rtol=0.01,
)
def test_applications_model_output(self):
model = ResNet50Backbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = ResNet50Backbone.from_preset("resnet50_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in ResNetBackbone.presets:
self.assertRegex(ResNetBackbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
ResNetBackbone.from_preset("resnet50_clowntown")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
ResNetBackbone.from_preset("resnet50", load_weights=True)
@pytest.mark.extra_large
class ResNetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for ResNet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_resnet(self):
input_data = np.ones(shape=(2, 224, 224, 3))
for preset in ResNetBackbone.presets:
model = ResNetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1412
} | 74 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConvMixer models for Keras.
References:
- [Patches Are All You Need?](https://arxiv.org/abs/2201.09792)
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.weights import parse_weights
MODEL_CONFIGS = {
"ConvMixer_1536_20": {
"dim": 1536,
"depth": 20,
"patch_size": 7,
"kernel_size": 9,
},
"ConvMixer_1536_24": {
"dim": 1536,
"depth": 24,
"patch_size": 14,
"kernel_size": 9,
},
"ConvMixer_768_32": {
"dim": 768,
"depth": 32,
"patch_size": 7,
"kernel_size": 7,
},
"ConvMixer_1024_16": {
"dim": 1024,
"depth": 16,
"patch_size": 7,
"kernel_size": 9,
},
"ConvMixer_512_16": {
"dim": 512,
"depth": 16,
"patch_size": 7,
"kernel_size": 8,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Patches Are All You Need?](https://arxiv.org/abs/2201.09792)
This class represents a Keras {name} model.
For transfer learning use cases, make sure to read the [guide to transfer
learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, num_classes must be provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
name: string, optional name to pass to the model, defaults to "{name}".
Returns:
A `keras.Model` instance.
"""
def apply_conv_mixer_layer(x, dim, kernel_size):
"""ConvMixerLayer module.
Args:
x: input tensor.
dim: integer, filters of the layer in a block.
kernel_size: integer, kernel size of the Conv2D layers.
Returns:
the updated input tensor.
"""
residual = x
x = layers.DepthwiseConv2D(kernel_size=kernel_size, padding="same")(x)
x = tf.nn.gelu(x)
x = layers.BatchNormalization()(x)
x = layers.Add()([x, residual])
x = layers.Conv2D(dim, kernel_size=1)(x)
x = tf.nn.gelu(x)
x = layers.BatchNormalization()(x)
return x
def apply_patch_embed(x, dim, patch_size):
"""Implementation for Extracting Patch Embeddings.
Args:
x: input tensor.
dim: integer, filters of the layer in a block.
patch_size: integer, Size of patches.
Returns:
the updated input tensor.
"""
x = layers.Conv2D(filters=dim, kernel_size=patch_size, strides=patch_size)(
x
)
x = tf.nn.gelu(x)
x = layers.BatchNormalization()(x)
return x
@keras.utils.register_keras_serializable(package="keras_cv.models")
class ConvMixer(keras.Model):
"""Instantiates the ConvMixer architecture.
Args:
dim: integer, number of filters.
depth: integer, number of ConvMixer Layer.
patch_size: integer, size of the patches.
kernel_size: integer, kernel size for Conv2D layers.
include_top: bool, whether to include the fully-connected layer at the
top of the network.
include_rescaling: bool, whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
name: string, optional name to pass to the model, defaults to
"ConvMixer".
weights: one of `None` (random initialization) or the path to the
weights file to be loaded.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional layer.
- `avg` means that global average pooling will be applied to the
output of the last convolutional layer, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
**kwargs: Pass-through keyword arguments to `keras.Model`.
Returns:
A `keras.Model` instance.
"""
def __init__(
self,
dim,
depth,
patch_size,
kernel_size,
include_top,
include_rescaling,
name="ConvMixer",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either `None` or the path to "
"the weights file to be loaded. Weights file not found at "
f"location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `classes`. "
f"Received: classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
x = apply_patch_embed(x, dim, patch_size)
for _ in range(depth):
x = apply_conv_mixer_layer(x, dim, kernel_size)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
super().__init__(inputs=inputs, outputs=x, name=name, **kwargs)
if weights is not None:
self.load_weights(weights)
self.dim = dim
self.depth = depth
self.patch_size = patch_size
self.kernel_size = kernel_size
self.include_top = include_top
self.include_rescaling = include_rescaling
self.input_tensor = input_tensor
self.pooling = pooling
self.num_classes = num_classes
self.classifier_activation = classifier_activation
def get_config(self):
return {
"dim": self.dim,
"depth": self.depth,
"patch_size": self.patch_size,
"kernel_size": self.kernel_size,
"include_top": self.include_top,
"include_rescaling": self.include_rescaling,
"name": self.name,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
def ConvMixer_1536_20(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_1536_20",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_1536_20"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_1536_20"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_1536_20"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_1536_20"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_1536_24(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_1536_24",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_1536_24"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_1536_24"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_1536_24"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_1536_24"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_768_32(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_768_32",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_768_32"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_768_32"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_768_32"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_768_32"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_1024_16(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_1024_16",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_1024_16"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_1024_16"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_1024_16"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_1024_16"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
def ConvMixer_512_16(
include_rescaling,
include_top,
num_classes=None,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
classifier_activation="softmax",
name="ConvMixer_512_16",
**kwargs,
):
return ConvMixer(
dim=MODEL_CONFIGS["ConvMixer_512_16"]["dim"],
depth=MODEL_CONFIGS["ConvMixer_512_16"]["depth"],
patch_size=MODEL_CONFIGS["ConvMixer_512_16"]["patch_size"],
kernel_size=MODEL_CONFIGS["ConvMixer_512_16"]["kernel_size"],
include_rescaling=include_rescaling,
include_top=include_top,
name=name,
weights=parse_weights(weights, include_top, "convmixer_512_16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
**kwargs,
)
setattr(
ConvMixer_1536_20,
"__doc__",
BASE_DOCSTRING.format(name="ConvMixer_1536_20"),
)
setattr(
ConvMixer_1536_24,
"__doc__",
BASE_DOCSTRING.format(name="ConvMixer_1536_24"),
)
setattr(
ConvMixer_768_32, "__doc__", BASE_DOCSTRING.format(name="ConvMixer_768_32")
)
setattr(
ConvMixer_1024_16,
"__doc__",
BASE_DOCSTRING.format(name="ConvMixer_1024_16"),
)
setattr(
ConvMixer_512_16, "__doc__", BASE_DOCSTRING.format(name="ConvMixer_512_16")
)
| keras-cv/keras_cv/models/legacy/convmixer.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/convmixer.py",
"repo_id": "keras-cv",
"token_count": 6519
} | 75 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for models"""
from tensorflow import keras
from tensorflow.keras import layers
def parse_model_inputs(input_shape, input_tensor):
if input_tensor is None:
return layers.Input(shape=input_shape)
else:
if not keras.backend.is_keras_tensor(input_tensor):
return layers.Input(tensor=input_tensor, shape=input_shape)
else:
return input_tensor
def as_backbone(self, min_level=None, max_level=None):
"""Convert the application model into a model backbone for other tasks.
The backbone model will usually take same inputs as the original application
model, but produce multiple outputs, one for each feature level. Those
outputs can be feed to network downstream, like FPN and RPN. The output of
the backbone model will be a dict with int as key and tensor as value. The
int key represent the level of the feature output. A typical feature pyramid
has five levels corresponding to scales P3, P4, P5, P6, P7 in the backbone.
Scale Pn represents a feature map 2n times smaller in width and height than
the input image.
Args:
min_level: optional int, the lowest level of feature to be included in
the output, defaults to model's lowest feature level
(based on the model structure).
max_level: optional int, the highest level of feature to be included in
the output, defaults to model's highest feature level
(based on the model structure).
Returns:
a `keras.Model` which has dict as outputs.
Raises:
ValueError: When the model is lack of information for feature level, and
can't be converted to backbone model, or the min_level/max_level param
is out of range based on the model structure.
"""
if hasattr(self, "_backbone_level_outputs"):
backbone_level_outputs = self._backbone_level_outputs
model_levels = list(sorted(backbone_level_outputs.keys()))
if min_level is not None:
if min_level < model_levels[0]:
raise ValueError(
f"The min_level provided: {min_level} should be in "
f"the range of {model_levels}"
)
else:
min_level = model_levels[0]
if max_level is not None:
if max_level > model_levels[-1]:
raise ValueError(
f"The max_level provided: {max_level} should be in "
f"the range of {model_levels}"
)
else:
max_level = model_levels[-1]
outputs = {}
for level in range(min_level, max_level + 1):
outputs[level] = backbone_level_outputs[level]
return keras.Model(inputs=self.inputs, outputs=outputs)
else:
raise ValueError(
"The current model doesn't have any feature level "
"information and can't be convert to backbone model."
)
| keras-cv/keras_cv/models/legacy/utils.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/utils.py",
"repo_id": "keras-cv",
"token_count": 1350
} | 76 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from keras_cv import bounding_box
from keras_cv import layers as cv_layers
from keras_cv import losses
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.bounding_box.converters import _decode_deltas_to_boxes
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.object_detection.__internal__ import unpack_input
from keras_cv.models.object_detection.retinanet import FeaturePyramid
from keras_cv.models.object_detection.retinanet import PredictionHead
from keras_cv.models.object_detection.retinanet import RetinaNetLabelEncoder
from keras_cv.models.object_detection.retinanet.retinanet_presets import (
retinanet_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.train import get_feature_extractor
BOX_VARIANCE = [0.1, 0.1, 0.2, 0.2]
@keras_cv_export(
["keras_cv.models.RetinaNet", "keras_cv.models.object_detection.RetinaNet"]
)
class RetinaNet(Task):
"""A Keras model implementing the RetinaNet meta-architecture.
Implements the RetinaNet architecture for object detection. The constructor
requires `num_classes`, `bounding_box_format`, and a backbone. Optionally,
a custom label encoder, and prediction decoder may be provided.
Examples:
```python
images = np.ones((1, 512, 512, 3))
labels = {
"boxes": tf.cast([
[
[0, 0, 100, 100],
[100, 100, 200, 200],
[300, 300, 100, 100],
]
], dtype=tf.float32),
"classes": tf.cast([[1, 1, 1]], dtype=tf.float32),
}
model = keras_cv.models.RetinaNet(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet50Backbone.from_preset(
"resnet50_imagenet"
)
)
# Evaluate model without box decoding and NMS
model(images)
# Prediction with box decoding and NMS
model.predict(images)
# Train model
model.compile(
classification_loss='focal',
box_loss='smoothl1',
optimizer=keras.optimizers.SGD(global_clipnorm=10.0),
jit_compile=False,
)
model.fit(images, labels)
```
Args:
num_classes: the number of classes in your dataset excluding the
background class. Classes should be represented by integers in the
range [0, num_classes).
bounding_box_format: The format of bounding boxes of input dataset.
Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
backbone: `keras.Model`. If the default `feature_pyramid` is used,
must implement the `pyramid_level_inputs` property with keys "P3", "P4",
and "P5" and layer names as values. A somewhat sensible backbone
to use in many cases is the:
`keras_cv.models.ResNetBackbone.from_preset("resnet50_imagenet")`
anchor_generator: (Optional) a `keras_cv.layers.AnchorGenerator`. If
provided, the anchor generator will be passed to both the
`label_encoder` and the `prediction_decoder`. Only to be used when
both `label_encoder` and `prediction_decoder` are both `None`.
Defaults to an anchor generator with the parameterization:
`strides=[2**i for i in range(3, 8)]`,
`scales=[2**x for x in [0, 1 / 3, 2 / 3]]`,
`sizes=[32.0, 64.0, 128.0, 256.0, 512.0]`,
and `aspect_ratios=[0.5, 1.0, 2.0]`.
label_encoder: (Optional) a keras.Layer that accepts an image Tensor, a
bounding box Tensor and a bounding box class Tensor to its `call()`
method, and returns RetinaNet training targets. By default, a
KerasCV standard `RetinaNetLabelEncoder` is created and used.
Results of this object's `call()` method are passed to the `loss`
object for `box_loss` and `classification_loss` the `y_true`
argument.
prediction_decoder: (Optional) A `keras.layers.Layer` that is
responsible for transforming RetinaNet predictions into usable
bounding box Tensors. If not provided, a default is provided. The
default `prediction_decoder` layer is a
`keras_cv.layers.MultiClassNonMaxSuppression` layer, which uses
a Non-Max Suppression for box pruning.
feature_pyramid: (Optional) A `keras.layers.Layer` that produces
a list of 4D feature maps (batch dimension included)
when called on the pyramid-level outputs of the `backbone`.
If not provided, the reference implementation from the paper will be used.
classification_head: (Optional) A `keras.Layer` that performs
classification of the bounding boxes. If not provided, a simple
ConvNet with 3 layers will be used.
box_head: (Optional) A `keras.Layer` that performs regression of the
bounding boxes. If not provided, a simple ConvNet with 3 layers
will be used.
""" # noqa: E501
def __init__(
self,
backbone,
num_classes,
bounding_box_format,
anchor_generator=None,
label_encoder=None,
prediction_decoder=None,
feature_pyramid=None,
classification_head=None,
box_head=None,
**kwargs,
):
if anchor_generator is not None and label_encoder is not None:
raise ValueError(
"`anchor_generator` is only to be provided when "
"`label_encoder` is `None`. Received `anchor_generator="
f"{anchor_generator}`, label_encoder={label_encoder}`. To "
"customize the behavior of the anchor_generator inside of a "
"custom `label_encoder` you should provide both to `RetinaNet`"
"provide both to `RetinaNet`, and ensure that the "
"`anchor_generator` provided to both is identical"
)
if label_encoder is None:
anchor_generator = (
anchor_generator
or RetinaNet.default_anchor_generator(bounding_box_format)
)
label_encoder = RetinaNetLabelEncoder(
bounding_box_format=bounding_box_format,
anchor_generator=anchor_generator,
box_variance=BOX_VARIANCE,
)
extractor_levels = ["P3", "P4", "P5"]
extractor_layer_names = [
backbone.pyramid_level_inputs[i] for i in extractor_levels
]
feature_extractor = get_feature_extractor(
backbone, extractor_layer_names, extractor_levels
)
feature_pyramid = feature_pyramid or FeaturePyramid()
prior_probability = keras.initializers.Constant(
-np.log((1 - 0.01) / 0.01)
)
classification_head = classification_head or PredictionHead(
output_filters=9 * num_classes,
bias_initializer=prior_probability,
)
box_head = box_head or PredictionHead(
output_filters=9 * 4, bias_initializer=keras.initializers.Zeros()
)
# Begin construction of forward pass
images = keras.layers.Input(
feature_extractor.input_shape[1:], name="images"
)
backbone_outputs = feature_extractor(images)
features = feature_pyramid(backbone_outputs)
cls_pred = []
box_pred = []
for feature in features:
box_pred.append(keras.layers.Reshape((-1, 4))(box_head(feature)))
cls_pred.append(
keras.layers.Reshape((-1, num_classes))(
classification_head(feature)
)
)
cls_pred = keras.layers.Concatenate(axis=1, name="classification")(
cls_pred
)
box_pred = keras.layers.Concatenate(axis=1, name="box")(box_pred)
# box_pred is always in "center_yxhw" delta-encoded no matter what
# format you pass in.
inputs = {"images": images}
outputs = {"box": box_pred, "classification": cls_pred}
super().__init__(
inputs=inputs,
outputs=outputs,
**kwargs,
)
self.label_encoder = label_encoder
self.anchor_generator = label_encoder.anchor_generator
self.bounding_box_format = bounding_box_format
self.num_classes = num_classes
self.backbone = backbone
self.feature_extractor = feature_extractor
self._prediction_decoder = (
prediction_decoder
or cv_layers.NonMaxSuppression(
bounding_box_format=bounding_box_format,
from_logits=True,
)
)
self.feature_pyramid = feature_pyramid
self.classification_head = classification_head
self.box_head = box_head
self.build(backbone.input_shape)
def predict_step(self, *args):
outputs = super().predict_step(*args)
if type(outputs) is tuple:
return self.decode_predictions(outputs[0], args[-1]), outputs[1]
else:
return self.decode_predictions(outputs, args[-1])
@property
def prediction_decoder(self):
return self._prediction_decoder
@prediction_decoder.setter
def prediction_decoder(self, prediction_decoder):
if prediction_decoder.bounding_box_format != self.bounding_box_format:
raise ValueError(
"Expected `prediction_decoder` and RetinaNet to "
"use the same `bounding_box_format`, but got "
"`prediction_decoder.bounding_box_format="
f"{prediction_decoder.bounding_box_format}`, and "
"`self.bounding_box_format="
f"{self.bounding_box_format}`."
)
self._prediction_decoder = prediction_decoder
self.make_predict_function(force=True)
self.make_train_function(force=True)
self.make_test_function(force=True)
@staticmethod
def default_anchor_generator(bounding_box_format):
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [32.0, 64.0, 128.0, 256.0, 512.0]
aspect_ratios = [0.5, 1.0, 2.0]
return cv_layers.AnchorGenerator(
bounding_box_format=bounding_box_format,
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
clip_boxes=True,
)
def decode_predictions(self, predictions, images):
box_pred, cls_pred = predictions["box"], predictions["classification"]
# box_pred is on "center_yxhw" format, convert to target format.
image_shape = tuple(images[0].shape)
anchors = self.anchor_generator(image_shape=image_shape)
anchors = ops.concatenate([a for a in anchors.values()], axis=0)
box_pred = _decode_deltas_to_boxes(
anchors=anchors,
boxes_delta=box_pred,
anchor_format=self.anchor_generator.bounding_box_format,
box_format=self.bounding_box_format,
variance=BOX_VARIANCE,
image_shape=image_shape,
)
# box_pred is now in "self.bounding_box_format" format
box_pred = bounding_box.convert_format(
box_pred,
source=self.bounding_box_format,
target=self.prediction_decoder.bounding_box_format,
image_shape=image_shape,
)
y_pred = self.prediction_decoder(
box_pred, cls_pred, image_shape=image_shape
)
y_pred["boxes"] = bounding_box.convert_format(
y_pred["boxes"],
source=self.prediction_decoder.bounding_box_format,
target=self.bounding_box_format,
image_shape=image_shape,
)
return y_pred
def compile(
self,
box_loss=None,
classification_loss=None,
loss=None,
metrics=None,
**kwargs,
):
"""compiles the RetinaNet.
compile() mirrors the standard Keras compile() method, but has a few key
distinctions. Primarily, all metrics must support bounding boxes, and
two losses must be provided: `box_loss` and `classification_loss`.
Args:
box_loss: a Keras loss to use for box offset regression.
Preconfigured losses are provided when the string "huber" or
"smoothl1" are passed.
classification_loss: a Keras loss to use for box classification.
A preconfigured `FocalLoss` is provided when the string "focal"
is passed.
weight_decay: a float for variable weight decay.
metrics: KerasCV object detection metrics that accept decoded
bounding boxes as their inputs. Examples of this metric type
are `keras_cv.metrics.BoxRecall()` and
`keras_cv.metrics.BoxMeanAveragePrecision()`. When `metrics` are
included in the call to `compile()`, the RetinaNet will perform
non-max suppression decoding during the forward pass. By
default, the RetinaNet uses a
`keras_cv.layers.MultiClassNonMaxSuppression()` layer to
perform decoding. This behavior can be customized by passing in
a `prediction_decoder` to the constructor or by modifying the
`prediction_decoder` attribute on the model. It should be noted
that the default non-max suppression operation does not have
TPU support, and thus when training on TPU metrics must be
evaluated in a `keras.utils.SidecarEvaluator` or a
`keras.callbacks.Callback`.
kwargs: most other `keras.Model.compile()` arguments are supported
and propagated to the `keras.Model` class.
"""
if loss is not None:
raise ValueError(
"`RetinaNet` does not accept a `loss` to `compile()`. "
"Instead, please pass `box_loss` and `classification_loss`. "
"`loss` will be ignored during training."
)
box_loss = _parse_box_loss(box_loss)
classification_loss = _parse_classification_loss(classification_loss)
if hasattr(classification_loss, "from_logits"):
if not classification_loss.from_logits:
raise ValueError(
"RetinaNet.compile() expects `from_logits` to be True for "
"`classification_loss`. Got "
"`classification_loss.from_logits="
f"{classification_loss.from_logits}`"
)
if hasattr(box_loss, "bounding_box_format"):
if box_loss.bounding_box_format != self.bounding_box_format:
raise ValueError(
"Wrong `bounding_box_format` passed to `box_loss` in "
"`RetinaNet.compile()`. Got "
"`box_loss.bounding_box_format="
f"{box_loss.bounding_box_format}`, want "
"`box_loss.bounding_box_format="
f"{self.bounding_box_format}`"
)
self.box_loss = box_loss
self.classification_loss = classification_loss
losses = {
"box": self.box_loss,
"classification": self.classification_loss,
}
self._has_user_metrics = metrics is not None and len(metrics) != 0
self._user_metrics = metrics
super().compile(loss=losses, **kwargs)
def compute_loss(self, x, y, y_pred, sample_weight, **kwargs):
y_for_label_encoder = bounding_box.convert_format(
y,
source=self.bounding_box_format,
target=self.label_encoder.bounding_box_format,
images=x,
)
boxes, classes = self.label_encoder(x, y_for_label_encoder)
box_pred = y_pred["box"]
cls_pred = y_pred["classification"]
if boxes.shape[-1] != 4:
raise ValueError(
"boxes should have shape (None, None, 4). Got "
f"boxes.shape={tuple(boxes.shape)}"
)
if box_pred.shape[-1] != 4:
raise ValueError(
"box_pred should have shape (None, None, 4). Got "
f"box_pred.shape={tuple(box_pred.shape)}. Does your model's "
"`num_classes` parameter match your losses `num_classes` "
"parameter?"
)
if cls_pred.shape[-1] != self.num_classes:
raise ValueError(
"cls_pred should have shape (None, None, 4). Got "
f"cls_pred.shape={tuple(cls_pred.shape)}. Does your model's "
"`num_classes` parameter match your losses `num_classes` "
"parameter?"
)
cls_labels = ops.one_hot(
ops.cast(classes, "int32"), self.num_classes, dtype="float32"
)
positive_mask = ops.cast(ops.greater(classes, -1.0), dtype="float32")
normalizer = ops.sum(positive_mask)
cls_weights = ops.cast(ops.not_equal(classes, -2.0), dtype="float32")
cls_weights /= normalizer
box_weights = positive_mask / normalizer
y_true = {
"box": boxes,
"classification": cls_labels,
}
sample_weights = {
"box": box_weights,
"classification": cls_weights,
}
zero_weight = {
"box": ops.zeros_like(box_weights),
"classification": ops.zeros_like(cls_weights),
}
sample_weights = ops.cond(
normalizer == 0,
lambda: zero_weight,
lambda: sample_weights,
)
return super().compute_loss(
x=x, y=y_true, y_pred=y_pred, sample_weight=sample_weights
)
def train_step(self, *args):
data = args[-1]
args = args[:-1]
x, y = unpack_input(data)
return super().train_step(*args, (x, y))
def test_step(self, *args):
data = args[-1]
args = args[:-1]
x, y = unpack_input(data)
return super().test_step(*args, (x, y))
def compute_metrics(self, x, y, y_pred, sample_weight):
metrics = {}
metrics.update(super().compute_metrics(x, {}, {}, sample_weight={}))
if not self._has_user_metrics:
return metrics
y_pred = self.decode_predictions(y_pred, x)
for metric in self._user_metrics:
metric.update_state(y, y_pred, sample_weight=sample_weight)
for metric in self._user_metrics:
result = metric.result()
if isinstance(result, dict):
metrics.update(result)
else:
metrics[metric.name] = result
return metrics
def get_config(self):
return {
"num_classes": self.num_classes,
"bounding_box_format": self.bounding_box_format,
"backbone": keras.saving.serialize_keras_object(self.backbone),
"label_encoder": keras.saving.serialize_keras_object(
self.label_encoder
),
"prediction_decoder": self._prediction_decoder,
"classification_head": keras.saving.serialize_keras_object(
self.classification_head
),
"box_head": keras.saving.serialize_keras_object(self.box_head),
}
@classmethod
def from_config(cls, config):
if "box_head" in config and isinstance(config["box_head"], dict):
config["box_head"] = keras.layers.deserialize(config["box_head"])
if "classification_head" in config and isinstance(
config["classification_head"], dict
):
config["classification_head"] = keras.layers.deserialize(
config["classification_head"]
)
if "label_encoder" in config and isinstance(
config["label_encoder"], dict
):
config["label_encoder"] = keras.layers.deserialize(
config["label_encoder"]
)
if "prediction_decoder" in config and isinstance(
config["prediction_decoder"], dict
):
config["prediction_decoder"] = keras.layers.deserialize(
config["prediction_decoder"]
)
return super().from_config(config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy({**backbone_presets, **retinanet_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(
{**backbone_presets_with_weights, **retinanet_presets}
)
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
def _parse_box_loss(loss):
if not isinstance(loss, str):
# support arbitrary callables
return loss
# case insensitive comparison
if loss.lower() == "smoothl1":
return losses.SmoothL1Loss(l1_cutoff=1.0, reduction="sum")
if loss.lower() == "huber":
return keras.losses.Huber(reduction="sum")
raise ValueError(
"Expected `box_loss` to be either a Keras Loss, "
f"callable, or the string 'SmoothL1'. Got loss={loss}."
)
def _parse_classification_loss(loss):
if not isinstance(loss, str):
# support arbitrary callables
return loss
# case insensitive comparison
if loss.lower() == "focal":
return losses.FocalLoss(from_logits=True, reduction="sum")
raise ValueError(
"Expected `classification_loss` to be either a Keras Loss, "
f"callable, or the string 'Focal'. Got loss={loss}."
)
| keras-cv/keras_cv/models/object_detection/retinanet/retinanet.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet.py",
"repo_id": "keras-cv",
"token_count": 10496
} | 77 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
import keras_cv.layers as cv_layers
from keras_cv import bounding_box
class YoloXPredictionDecoder(keras.layers.Layer):
"""Decodes the predictions from YoloX head.
This layer is similar to the decoding code in `YoloX.compute_losses`. This
is followed by a bounding box suppression layer.
Arguments:
bounding_box_format: The format of bounding boxes of input dataset.
Refer to https://keras.io/api/keras_cv/bounding_box/formats/
for more details on supported bounding box formats.
num_classes: The number of classes to be considered for the
classification head.
suppression_layer: A `keras.layers.Layer` that follows the same API
signature of the `keras_cv.layers.MultiClassNonMaxSuppression`
layer. This layer should perform a suppression operation such as Non
Max Suppression, or Soft Non-Max Suppression.
"""
def __init__(
self, bounding_box_format, num_classes, suppression_layer=None, **kwargs
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.num_classes = num_classes
self.suppression_layer = (
suppression_layer
or cv_layers.MultiClassNonMaxSuppression(
bounding_box_format=bounding_box_format,
from_logits=False,
confidence_threshold=0.01,
iou_threshold=0.65,
max_detections=100,
max_detections_per_class=100,
)
)
if (
self.suppression_layer.bounding_box_format
!= self.bounding_box_format
):
raise ValueError(
"`suppression_layer` must have the same `bounding_box_format` "
"as the `YoloXPredictionDecoder()` layer. "
"Received `YoloXPredictionDecoder.bounding_box_format="
f"{self.bounding_box_format}`, "
f"`suppression_layer={suppression_layer}`."
)
self.built = True
def call(self, images, predictions):
image_shape = tf.cast(tf.shape(images), dtype=self.compute_dtype)[1:-1]
batch_size = tf.shape(predictions[0])[0]
grids = []
strides = []
shapes = [x.shape[1:3] for x in predictions]
# 5 + self.num_classes is a concatenation of bounding boxes (length=4)
# + objectness score (length=1) + num_classes
# this reshape is simply collapsing axes 1 and 2 of x into a single
# dimension
predictions = [
tf.reshape(x, [batch_size, -1, 5 + self.num_classes])
for x in predictions
]
predictions = tf.cast(
tf.concat(predictions, axis=1), dtype=self.compute_dtype
)
predictions_shape = tf.cast(
tf.shape(predictions), dtype=self.compute_dtype
)
for i in range(len(shapes)):
shape_x, shape_y = shapes[i]
grid_x, grid_y = tf.meshgrid(tf.range(shape_y), tf.range(shape_x))
grid = tf.reshape(tf.stack((grid_x, grid_y), 2), (1, -1, 2))
shape = grid.shape[:2]
grids.append(tf.cast(grid, self.compute_dtype))
strides.append(
tf.ones((shape[0], shape[1], 1))
* image_shape[0]
/ tf.cast(shape_x, self.compute_dtype)
)
grids = tf.concat(grids, axis=1)
strides = tf.concat(strides, axis=1)
box_xy = tf.expand_dims(
(predictions[..., :2] + grids) * strides / image_shape, axis=-2
)
box_xy = tf.broadcast_to(
box_xy, [batch_size, predictions_shape[1], self.num_classes, 2]
)
box_wh = tf.expand_dims(
tf.exp(predictions[..., 2:4]) * strides / image_shape, axis=-2
)
box_wh = tf.broadcast_to(
box_wh, [batch_size, predictions_shape[1], self.num_classes, 2]
)
box_confidence = tf.math.sigmoid(predictions[..., 4:5])
box_class_probs = tf.math.sigmoid(predictions[..., 5:])
# create and broadcast classes for every box before nms
box_classes = tf.expand_dims(
tf.range(self.num_classes, dtype=self.compute_dtype), axis=-1
)
box_classes = tf.broadcast_to(
box_classes, [batch_size, predictions_shape[1], self.num_classes, 1]
)
box_scores = tf.expand_dims(box_confidence * box_class_probs, axis=-1)
outputs = tf.concat([box_xy, box_wh, box_classes, box_scores], axis=-1)
outputs = tf.reshape(outputs, [batch_size, -1, 6])
outputs = {
"boxes": outputs[..., :4],
"classes": outputs[..., 4],
"confidence": outputs[..., 5],
}
# this conversion is rel_center_xywh to rel_xywh
# small workaround because rel_center_xywh isn't supported yet
outputs = bounding_box.convert_format(
outputs,
source="center_xywh",
target="xywh",
images=images,
)
outputs = bounding_box.convert_format(
outputs,
source="rel_xywh",
target=self.suppression_layer.bounding_box_format,
images=images,
)
# preparing the predictions for TF NMS op
class_predictions = tf.cast(outputs["classes"], tf.int32)
class_predictions = tf.one_hot(class_predictions, self.num_classes)
scores = (
tf.expand_dims(outputs["confidence"], axis=-1) * class_predictions
)
return self.suppression_layer(outputs["boxes"], scores)
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_decoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_decoder.py",
"repo_id": "keras-cv",
"token_count": 2879
} | 78 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BASNet model preset configurations."""
from keras_cv.models.backbones.resnet_v1 import resnet_v1_backbone_presets
presets_no_weights = {
"basnet_resnet18": {
"metadata": {
"description": "BASNet with a ResNet18 v1 backbone.",
"params": 98780872,
"official_name": "BASNet",
"path": "basnet_resnet18",
},
"config": {
"backbone": resnet_v1_backbone_presets.backbone_presets["resnet18"],
"num_classes": 1,
"input_shape": (288, 288, 3),
},
},
"basnet_resnet34": {
"metadata": {
"description": "BASNet with a ResNet34 v1 backbone.",
"params": 108896456,
"official_name": "BASNet",
"path": "basnet_resnet34",
},
"config": {
"backbone": resnet_v1_backbone_presets.backbone_presets["resnet34"],
"num_classes": 1,
"input_shape": (288, 288, 3),
},
},
}
presets_with_weights = {
# TODO: Add BASNet preset with weights
}
basnet_presets = {**presets_no_weights, **presets_with_weights}
| keras-cv/keras_cv/models/segmentation/basnet/basnet_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/basnet/basnet_presets.py",
"repo_id": "keras-cv",
"token_count": 723
} | 79 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.vit_det_layers import MLP
from keras_cv.models.segmentation.segment_anything.sam_transformer import (
TwoWayTransformer,
)
@keras_cv_export("keras_cv.models.SAMMaskDecoder", package="keras_cv.models")
class SAMMaskDecoder(keras.layers.Layer):
"""Mask decoder for the Segment Anything Model (SAM).
This lightweight module efficiently maps the image embedding and a set of
prompt embeddings to an output mask. Before applying the transformer
decoder, the layer first inserts into the set of prompt embeddings a
learned output token embedding that will be used at the decoder's output.
For simplicity, these embeddings (not including the image embedding) are
collectively called "tokens".
The image embeddings, positional image embeddings, and tokens are passed
through a transformer decoder. After running the decoder, the layer
upsamples the updated image embedding by 4x with two transposed
convolutional layers (now it's downscaled 4x relative to the input
image). Then, the tokens attend once more to the image embedding and
the updated output token embedding are passed to a small 3-layer MLP that
outputs a vector matching the channel dimension of the upscaled image
embedding. Finally, a mask is predicted with a spatially point-wise
product between the upscaled image embedding and the MLP's output.
Args:
transformer_dim (int, optional): The number of input features to the
transformer decoder. Defaults to `256`.
transformer (keras.layers.Layer, optional): A transformer decoder.
Defaults to `None`. When `None`, a
`keras_cv.models.TwoWayTransformer` layer is used.
num_multimask_outputs (int, optional): Number of multimask outputs.
The model would generate these many extra masks. The total masks
generated by the model are `1 + num_multimask_outputs`. Defaults
to `3`.
iou_head_depth (int, optional): The depth of the dense net used to
predict the IoU confidence score. Defaults to `3`.
iou_head_hidden_dim (int, optional): The number of units in the hidden
layers used in the dense net to predict the IoU confidence score.
Defaults to `256`.
activation (str, optional): Activation to use in the mask upscaler
network. Defaults to `"gelu"`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
""" # noqa: E501
def __init__(
self,
*,
transformer_dim=256,
transformer=None,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=256,
activation="gelu",
**kwargs,
):
super().__init__(**kwargs)
self.transformer_dim = transformer_dim
if transformer is None:
transformer = TwoWayTransformer()
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
self.activation = activation
self.iou_token = keras.layers.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = keras.layers.Embedding(
self.num_mask_tokens, transformer_dim
)
self.output_upscaling = keras.models.Sequential(
[
keras.layers.Conv2DTranspose(
transformer_dim // 4, kernel_size=2, strides=2
),
keras.layers.LayerNormalization(epsilon=1e-6),
keras.layers.Activation(activation),
keras.layers.Conv2DTranspose(
transformer_dim // 8, kernel_size=2, strides=2
),
keras.layers.Activation(activation),
]
)
self.output_hypernetworks_mlps = [
MLP(transformer_dim, transformer_dim // 8, 3)
for _ in range(self.num_mask_tokens)
]
self.iou_prediction_head = MLP(
iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def build(self, input_shape=None):
self.transformer.build()
self.iou_token.build([None])
self.mask_tokens.build([None])
self.output_upscaling.build([None, None, None, self.transformer_dim])
for mlp in self.output_hypernetworks_mlps:
mlp.build([None, self.transformer_dim])
self.iou_prediction_head.build([None, self.transformer_dim])
self.built = True
def call(self, inputs):
image_embeddings = inputs["image_embeddings"]
image_pe = inputs["image_pe"]
sparse_prompt_embeddings = inputs["sparse_prompt_embeddings"]
dense_prompt_embeddings = inputs["dense_prompt_embeddings"]
masks, iou_pred = self._predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
return {"masks": masks, "iou_pred": iou_pred}
def _predict_masks(
self,
image_embeddings,
image_pe,
sparse_prompt_embeddings,
dense_prompt_embeddings,
):
indices_iou = ops.arange(1, dtype="int32")
indices_mask = ops.arange(self.num_mask_tokens, dtype="int32")
output_tokens = ops.concatenate(
[self.iou_token(indices_iou), self.mask_tokens(indices_mask)],
axis=0,
)
output_tokens = ops.broadcast_to(
output_tokens[None, ...],
shape=(
ops.shape(sparse_prompt_embeddings)[0],
ops.shape(output_tokens)[0],
ops.shape(output_tokens)[1],
),
)
tokens = ops.concatenate(
[output_tokens, sparse_prompt_embeddings], axis=1
)
source = ops.broadcast_to(
image_embeddings,
shape=(
ops.shape(tokens)[0],
ops.shape(image_embeddings)[1],
ops.shape(image_embeddings)[2],
ops.shape(image_embeddings)[3],
),
)
source = source + dense_prompt_embeddings
positional_source = ops.broadcast_to(
image_pe,
shape=(
ops.shape(tokens)[0],
ops.shape(image_embeddings)[1],
ops.shape(image_embeddings)[2],
ops.shape(image_embeddings)[3],
),
)
shape = ops.shape(source)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
hidden_state, source = self.transformer(
source, positional_source, tokens
)
iou_token_out = hidden_state[:, 0, :]
mask_tokens_out = hidden_state[:, 1 : (1 + self.num_mask_tokens), :]
source = ops.reshape(source, (B, H, W, C))
upscaled_embeddings = self.output_upscaling(source)
hyper_in_list = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(
self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])
)
hyper_in = ops.stack(hyper_in_list, axis=1)
shape = ops.shape(upscaled_embeddings)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
upscaled_embeddings = ops.reshape(
ops.transpose(upscaled_embeddings, axes=(0, 3, 1, 2)),
(B, C, H * W),
)
masks = ops.reshape(
hyper_in @ upscaled_embeddings, (B, self.num_mask_tokens, H, W)
)
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
def get_config(self):
config = super().get_config()
config.update(
{
"transformer_dim": self.transformer_dim,
"transformer": keras.saving.serialize_keras_object(
self.transformer
),
"num_multimask_outputs": self.num_multimask_outputs,
"iou_head_depth": self.iou_head_depth,
"iou_head_hidden_dim": self.iou_head_hidden_dim,
"activation": self.activation,
}
)
return config
@classmethod
def from_config(cls, config):
config.update(
{"transformer": keras.layers.deserialize(config["transformer"])}
)
return super().from_config(config)
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_mask_decoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_mask_decoder.py",
"repo_id": "keras-cv",
"token_count": 4266
} | 80 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras implementation of StableDiffusion.
Credits:
- Original implementation:
https://github.com/CompVis/stable-diffusion
- Initial TF/Keras port:
https://github.com/divamgupta/stable-diffusion-tensorflow
The current implementation is a rewrite of the initial TF/Keras port by
Divam Gupta.
"""
import math
import numpy as np
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import random
from keras_cv.models.stable_diffusion.clip_tokenizer import SimpleTokenizer
from keras_cv.models.stable_diffusion.constants import _ALPHAS_CUMPROD
from keras_cv.models.stable_diffusion.constants import _UNCONDITIONAL_TOKENS
from keras_cv.models.stable_diffusion.decoder import Decoder
from keras_cv.models.stable_diffusion.diffusion_model import DiffusionModel
from keras_cv.models.stable_diffusion.diffusion_model import DiffusionModelV2
from keras_cv.models.stable_diffusion.image_encoder import ImageEncoder
from keras_cv.models.stable_diffusion.text_encoder import TextEncoder
from keras_cv.models.stable_diffusion.text_encoder import TextEncoderV2
MAX_PROMPT_LENGTH = 77
class StableDiffusionBase:
"""Base class for stable diffusion and stable diffusion v2 model."""
def __init__(
self,
img_height=512,
img_width=512,
jit_compile=True,
):
# UNet requires multiples of 2**7 = 128
img_height = round(img_height / 128) * 128
img_width = round(img_width / 128) * 128
self.img_height = img_height
self.img_width = img_width
# lazy initialize the component models and the tokenizer
self._image_encoder = None
self._text_encoder = None
self._diffusion_model = None
self._decoder = None
self._tokenizer = None
self.jit_compile = jit_compile
def text_to_image(
self,
prompt,
negative_prompt=None,
batch_size=1,
num_steps=50,
unconditional_guidance_scale=7.5,
seed=None,
):
encoded_text = self.encode_text(prompt)
return self.generate_image(
encoded_text,
negative_prompt=negative_prompt,
batch_size=batch_size,
num_steps=num_steps,
unconditional_guidance_scale=unconditional_guidance_scale,
seed=seed,
)
def encode_text(self, prompt):
"""Encodes a prompt into a latent text encoding.
The encoding produced by this method should be used as the
`encoded_text` parameter of `StableDiffusion.generate_image`. Encoding
text separately from generating an image can be used to arbitrarily
modify the text encoding prior to image generation, e.g. for walking
between two prompts.
Args:
prompt: a string to encode, must be 77 tokens or shorter.
Example:
```python
from keras_cv.models import StableDiffusion
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
encoded_text = model.encode_text("Tacos at dawn")
img = model.generate_image(encoded_text)
```
"""
# Tokenize prompt (i.e. starting context)
inputs = self.tokenizer.encode(prompt)
if len(inputs) > MAX_PROMPT_LENGTH:
raise ValueError(
f"Prompt is too long (should be <= {MAX_PROMPT_LENGTH} tokens)"
)
phrase = inputs + [49407] * (MAX_PROMPT_LENGTH - len(inputs))
phrase = ops.convert_to_tensor([phrase], dtype="int32")
context = self.text_encoder.predict_on_batch(
{"tokens": phrase, "positions": self._get_pos_ids()}
)
return context
def generate_image(
self,
encoded_text,
negative_prompt=None,
batch_size=1,
num_steps=50,
unconditional_guidance_scale=7.5,
diffusion_noise=None,
seed=None,
):
"""Generates an image based on encoded text.
The encoding passed to this method should be derived from
`StableDiffusion.encode_text`.
Args:
encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor
of shape (77, 768). When the batch axis is omitted, the same
encoded text will be used to produce every generated image.
batch_size: int, number of images to generate, defaults to 1.
negative_prompt: a string containing information to negatively guide
the image generation (e.g. by removing or altering certain
aspects of the generated image), defaults to None.
num_steps: int, number of diffusion steps (controls image quality),
defaults to 50.
unconditional_guidance_scale: float, controlling how closely the
image should adhere to the prompt. Larger values result in more
closely adhering to the prompt, but will make the image noisier.
Defaults to 7.5.
diffusion_noise: Tensor of shape (`batch_size`, img_height // 8,
img_width // 8, 4), or a Tensor of shape (img_height // 8,
img_width // 8, 4). Optional custom noise to seed the diffusion
process. When the batch axis is omitted, the same noise will be
used to seed diffusion for every generated image.
seed: integer which is used to seed the random generation of
diffusion noise, only to be specified if `diffusion_noise` is
None.
Example:
```python
from keras_cv.models import StableDiffusion
from keras_core import ops
batch_size = 8
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
e_tacos = model.encode_text("Tacos at dawn")
e_watermelons = model.encode_text("Watermelons at dusk")
e_interpolated = ops.linspace(e_tacos, e_watermelons, batch_size)
images = model.generate_image(e_interpolated, batch_size=batch_size)
```
"""
if diffusion_noise is not None and seed is not None:
raise ValueError(
"`diffusion_noise` and `seed` should not both be passed to "
"`generate_image`. `seed` is only used to generate diffusion "
"noise when it's not already user-specified."
)
context = self._expand_tensor(encoded_text, batch_size)
if negative_prompt is None:
unconditional_context = ops.repeat(
self._get_unconditional_context(), batch_size, axis=0
)
else:
unconditional_context = self.encode_text(negative_prompt)
unconditional_context = self._expand_tensor(
unconditional_context, batch_size
)
if diffusion_noise is not None:
diffusion_noise = ops.squeeze(diffusion_noise)
if len(ops.shape(diffusion_noise)) == 3:
diffusion_noise = ops.repeat(
ops.expand_dims(diffusion_noise, axis=0), batch_size, axis=0
)
latent = diffusion_noise
else:
latent = self._get_initial_diffusion_noise(batch_size, seed)
# Iterative reverse diffusion stage
num_timesteps = 1000
ratio = (num_timesteps - 1) / (num_steps - 1)
timesteps = (np.arange(0, num_steps) * ratio).round().astype(np.int64)
alphas, alphas_prev = self._get_initial_alphas(timesteps)
progbar = keras.utils.Progbar(len(timesteps))
iteration = 0
for index, timestep in list(enumerate(timesteps))[::-1]:
latent_prev = latent # Set aside the previous latent vector
t_emb = self._get_timestep_embedding(timestep, batch_size)
unconditional_latent = self.diffusion_model.predict_on_batch(
{
"latent": latent,
"timestep_embedding": t_emb,
"context": unconditional_context,
}
)
latent = self.diffusion_model.predict_on_batch(
{
"latent": latent,
"timestep_embedding": t_emb,
"context": context,
}
)
latent = ops.array(
unconditional_latent
+ unconditional_guidance_scale * (latent - unconditional_latent)
)
a_t, a_prev = alphas[index], alphas_prev[index]
# Keras backend array need to cast explicitly
target_dtype = latent_prev.dtype
latent = ops.cast(latent, target_dtype)
pred_x0 = (latent_prev - math.sqrt(1 - a_t) * latent) / math.sqrt(
a_t
)
latent = (
ops.array(latent) * math.sqrt(1.0 - a_prev)
+ math.sqrt(a_prev) * pred_x0
)
iteration += 1
progbar.update(iteration)
# Decoding stage
decoded = self.decoder.predict_on_batch(latent)
decoded = ((decoded + 1) / 2) * 255
return np.clip(decoded, 0, 255).astype("uint8")
def _get_unconditional_context(self):
unconditional_tokens = ops.convert_to_tensor(
[_UNCONDITIONAL_TOKENS],
dtype="int32",
)
unconditional_context = self.text_encoder.predict_on_batch(
{"tokens": unconditional_tokens, "positions": self._get_pos_ids()}
)
return unconditional_context
def _expand_tensor(self, text_embedding, batch_size):
"""Extends a tensor by repeating it to fit the shape of the given batch
size."""
text_embedding = ops.squeeze(text_embedding)
if len(text_embedding.shape) == 2:
text_embedding = ops.repeat(
ops.expand_dims(text_embedding, axis=0), batch_size, axis=0
)
return text_embedding
@property
def image_encoder(self):
"""image_encoder returns the VAE Encoder with pretrained weights.
Usage:
```python
sd = keras_cv.models.StableDiffusion()
my_image = np.ones((512, 512, 3))
latent_representation = sd.image_encoder.predict(my_image)
```
"""
if self._image_encoder is None:
self._image_encoder = ImageEncoder()
if self.jit_compile:
self._image_encoder.compile(jit_compile=True)
return self._image_encoder
@property
def text_encoder(self):
pass
@property
def diffusion_model(self):
pass
@property
def decoder(self):
"""decoder returns the diffusion image decoder model with pretrained
weights. Can be overriden for tasks where the decoder needs to be
modified.
"""
if self._decoder is None:
self._decoder = Decoder(self.img_height, self.img_width)
if self.jit_compile:
self._decoder.compile(jit_compile=True)
return self._decoder
@property
def tokenizer(self):
"""tokenizer returns the tokenizer used for text inputs.
Can be overriden for tasks like textual inversion where the tokenizer
needs to be modified.
"""
if self._tokenizer is None:
self._tokenizer = SimpleTokenizer()
return self._tokenizer
def _get_timestep_embedding(
self, timestep, batch_size, dim=320, max_period=10000
):
half = dim // 2
range = ops.cast(ops.arange(0, half), "float32")
freqs = ops.exp(-math.log(max_period) * range / half)
args = ops.convert_to_tensor([timestep], dtype="float32") * freqs
embedding = ops.concatenate([ops.cos(args), ops.sin(args)], 0)
embedding = ops.reshape(embedding, [1, -1])
return ops.repeat(embedding, batch_size, axis=0)
def _get_initial_alphas(self, timesteps):
alphas = [_ALPHAS_CUMPROD[t] for t in timesteps]
alphas_prev = [1.0] + alphas[:-1]
return alphas, alphas_prev
def _get_initial_diffusion_noise(self, batch_size, seed):
return random.normal(
(batch_size, self.img_height // 8, self.img_width // 8, 4),
seed=seed,
)
@staticmethod
def _get_pos_ids():
return ops.expand_dims(ops.arange(MAX_PROMPT_LENGTH, dtype="int32"), 0)
@keras_cv_export("keras_cv.models.StableDiffusion")
class StableDiffusion(StableDiffusionBase):
"""Keras implementation of Stable Diffusion.
Note that the StableDiffusion API, as well as the APIs of the sub-components
of StableDiffusion (e.g. ImageEncoder, DiffusionModel) should be considered
unstable at this point. We do not guarantee backwards compatability for
future changes to these APIs.
Stable Diffusion is a powerful image generation model that can be used,
among other things, to generate pictures according to a short text
description (called a "prompt").
Arguments:
img_height: int, height of the images to generate, in pixel. Note that
only multiples of 128 are supported; the value provided will be
rounded to the nearest valid value. Defaults to 512.
img_width: int, width of the images to generate, in pixel. Note that
only multiples of 128 are supported; the value provided will be
rounded to the nearest valid value. Defaults to 512.
jit_compile: bool, whether to compile the underlying models to XLA.
This can lead to a significant speedup on some systems. Defaults to
False.
Example:
```python
from keras_cv.models import StableDiffusion
from PIL import Image
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
img = model.text_to_image(
prompt="A beautiful horse running through a field",
batch_size=1, # How many images to generate at once
num_steps=25, # Number of iterations (controls image quality)
seed=123, # Set this to always get the same image from the same prompt
)
Image.fromarray(img[0]).save("horse.png")
print("saved at horse.png")
```
References:
- [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement)
- [Original implementation](https://github.com/CompVis/stable-diffusion)
""" # noqa: E501
def __init__(
self,
img_height=512,
img_width=512,
jit_compile=True,
):
super().__init__(img_height, img_width, jit_compile)
print(
"By using this model checkpoint, you acknowledge that its usage is "
"subject to the terms of the CreativeML Open RAIL-M license at "
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE" # noqa: E501
)
@property
def text_encoder(self):
"""text_encoder returns the text encoder with pretrained weights.
Can be overriden for tasks like textual inversion where the text encoder
needs to be modified.
"""
if self._text_encoder is None:
self._text_encoder = TextEncoder(MAX_PROMPT_LENGTH)
if self.jit_compile:
self._text_encoder.compile(jit_compile=True)
return self._text_encoder
@property
def diffusion_model(self):
"""diffusion_model returns the diffusion model with pretrained weights.
Can be overriden for tasks where the diffusion model needs to be
modified.
"""
if self._diffusion_model is None:
self._diffusion_model = DiffusionModel(
self.img_height, self.img_width, MAX_PROMPT_LENGTH
)
if self.jit_compile:
self._diffusion_model.compile(jit_compile=True)
return self._diffusion_model
@keras_cv_export("keras_cv.models.StableDiffusionV2")
class StableDiffusionV2(StableDiffusionBase):
"""Keras implementation of Stable Diffusion v2.
Note that the StableDiffusion API, as well as the APIs of the sub-components
of StableDiffusionV2 (e.g. ImageEncoder, DiffusionModelV2) should be
considered unstable at this point. We do not guarantee backwards
compatability for future changes to these APIs.
Stable Diffusion is a powerful image generation model that can be used,
among other things, to generate pictures according to a short text
description (called a "prompt").
Arguments:
img_height: int, height of the images to generate, in pixel. Note that
only multiples of 128 are supported; the value provided will be
rounded to the nearest valid value. Defaults to 512.
img_width: int, width of the images to generate, in pixel. Note that
only multiples of 128 are supported; the value provided will be
rounded to the nearest valid value. Defaults to 512.
jit_compile: bool, whether to compile the underlying models to XLA.
This can lead to a significant speedup on some systems. Defaults to
False.
Example:
```python
from keras_cv.models import StableDiffusionV2
from PIL import Image
model = StableDiffusionV2(img_height=512, img_width=512, jit_compile=True)
img = model.text_to_image(
prompt="A beautiful horse running through a field",
batch_size=1, # How many images to generate at once
num_steps=25, # Number of iterations (controls image quality)
seed=123, # Set this to always get the same image from the same prompt
)
Image.fromarray(img[0]).save("horse.png")
print("saved at horse.png")
```
References:
- [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement)
- [Original implementation](https://github.com/Stability-AI/stablediffusion)
""" # noqa: E501
def __init__(
self,
img_height=512,
img_width=512,
jit_compile=True,
):
super().__init__(img_height, img_width, jit_compile)
print(
"By using this model checkpoint, you acknowledge that its usage is "
"subject to the terms of the CreativeML Open RAIL++-M license at "
"https://github.com/Stability-AI/stablediffusion/blob/main/LICENSE-MODEL" # noqa: E501
)
@property
def text_encoder(self):
"""text_encoder returns the text encoder with pretrained weights.
Can be overriden for tasks like textual inversion where the text encoder
needs to be modified.
"""
if self._text_encoder is None:
self._text_encoder = TextEncoderV2(MAX_PROMPT_LENGTH)
if self.jit_compile:
self._text_encoder.compile(jit_compile=True)
return self._text_encoder
@property
def diffusion_model(self):
"""diffusion_model returns the diffusion model with pretrained weights.
Can be overriden for tasks where the diffusion model needs to be
modified.
"""
if self._diffusion_model is None:
self._diffusion_model = DiffusionModelV2(
self.img_height, self.img_width, MAX_PROMPT_LENGTH
)
if self.jit_compile:
self._diffusion_model.compile(jit_compile=True)
return self._diffusion_model
| keras-cv/keras_cv/models/stable_diffusion/stable_diffusion.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/stable_diffusion.py",
"repo_id": "keras-cv",
"token_count": 8533
} | 81 |
<jupyter_start><jupyter_text>Setup<jupyter_code>!pip install -q git+https://github.com/divyashreepathihalli/keras-cv.git@CLIP_refactor
!pip install -q keras-nlp
!pip install -q tf-keras
!pip install -q tensorflow-text
!pip install keras==3.0.2<jupyter_output>Installing build dependencies ... [?25l[?25hdone
Getting requirements to build wheel ... [?25l[?25hdone
Preparing metadata (pyproject.toml) ... [?25l[?25hdone
[2K [90mββββββββββββββββββββββββββββββββββββββββ[0m [32m950.8/950.8 kB[0m [31m5.9 MB/s[0m eta [36m0:00:00[0m
[?25h Building wheel for keras-cv (pyproject.toml) ... [?25l[?25hdone
[2K [90mββββββββββββββββββββββββββββββββββββββββ[0m [32m415.4/415.4 kB[0m [31m5.4 MB/s[0m eta [36m0:00:00[0m
[2K [90mββββββββββββββββββββββββββββββββββββββββ[0m [32m5.2/5.2 MB[0m [31m17.0 MB/s[0m eta [36m0:00:00[0m
[?25hCollecting keras==3.0.2
Downloading keras-3.0.2-py3-none-any.whl (1.0 MB)
[2K [90mββββββββββββββββββββββββββββββββββββββββ[0m [32m1.0/1.0 MB[0m [31m8.4 MB/s[0m eta [36m0:00:00[0m
[?25hRequirement already satisfied: absl-py in /usr/local/lib/python3.10/dist-packages (from keras==3.0.2) (1.4.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packa[...]<jupyter_text>Import<jupyter_code>from keras_cv.models.feature_extractor.clip import CLIPProcessor
import keras
from keras_cv.models import CLIP
!wget https://i.imgur.com/8H7XCH0.jpg -O cat.jpg
!wget http://images.cocodataset.org/val2017/000000039769.jpg -O test.jpg
# @title Select which model weights you would like to convert
MODEL_CONFIGS = {
"CLIP_B32": {
"embed_dim": 512,
"context_length": 77,
"vocab_size": 49408,
"transformer_width": 512,
"transformer_heads": 8,
"transformer_layers": 12,
"vision_layers": 12,
"vision_width": 768,
"image_resolution": 224,
"vision_patch_size": 32,
},
"CLIP_B16": {
"embed_dim": 512,
"context_length": 77,
"vocab_size": 49408,
"transformer_width": 512,
"transformer_heads": 8,
"transformer_layers": 12,
"vision_layers": 12,
"vision_width": 768,
"image_resolution": 224,
"vision_patch_size": 16,
},
"CLIP_L14": {
"embed_dim": 768,
"context_length": 77,
"vocab_size": 49408,
"transformer_width": 768,
"transformer_heads": 12,
"transformer_layers": 12,
"vision_layers": 24,
"vision_width": 1024,
"image_resolution": 224,
"vision_patch_size": 14,
},
"CLIP_L14_336": {
"embed_dim": 768,
"context_length": 77,
"vocab_size": 49408,
"transformer_width": 768,
"transformer_heads": 12,
"transformer_layers": 12,
"vision_layers": 24,
"vision_width": 1024,
"image_resolution": 336,
"vision_patch_size": 14,
},
}
model_map_hf = {
"CLIP_B16": "openai/clip-vit-base-patch32",
"CLIP_B32": "openai/clip-vit-base-patch16",
"CLIP_L14": "openai/clip-vit-large-patch14",
"CLIP_L14_336": "openai/clip-vit-large-patch14-336",
}
config_name = "CLIP_L14_336" # @param ["CLIP_B16", "CLIP_B32", "CLIP_L14", "CLIP_L14_336"]
config_name_hf = model_map_hf[config_name]<jupyter_output><empty_output><jupyter_text>Keras 3 CLIP<jupyter_code>embed_dim = MODEL_CONFIGS[config_name]["embed_dim"]
context_length = MODEL_CONFIGS[config_name]["context_length"]
vocab_size = MODEL_CONFIGS[config_name]["vocab_size"]
transformer_width = MODEL_CONFIGS[config_name]["transformer_width"]
transformer_heads = MODEL_CONFIGS[config_name]["transformer_heads"]
transformer_layers = MODEL_CONFIGS[config_name]["transformer_layers"]
vision_layers = MODEL_CONFIGS[config_name]["vision_layers"]
vision_width = MODEL_CONFIGS[config_name]["vision_width"]
vision_patch_size = MODEL_CONFIGS[config_name]["vision_patch_size"]
image_resolution = MODEL_CONFIGS[config_name]["image_resolution"]
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
model.summary()
processor = CLIPProcessor(224, "vocab.json", "merges.txt")
image = processor.process_images(["cat.jpg"])
text_input = [
"photo of a cat on a tortoise",
"tortoise on a dog",
"a photo of a tortoise",
]
text = processor.process_texts(text_input)
image_logits, text_logits = model(image, text)
output = keras.layers.Softmax()(image_logits)
print(image_logits)
print(text_input[keras.ops.argmax(output)])
model.summary()<jupyter_output><empty_output><jupyter_text>HF CLIP<jupyter_code>from PIL import Image
import requests
from transformers import CLIPProcessor as CP
from transformers import CLIPModel as CM
model_hf = CM.from_pretrained(config_name_hf)
processor = CP.from_pretrained(config_name_hf)
url = "https://i.imgur.com/8H7XCH0.jpg"
image_hf = Image.open(requests.get(url, stream=True).raw)
text_inputs = [
"photo of a cat on a tortoise",
"tortoise on a dog",
"a photo of a tortoise",
]
inputs = processor(
text=text_inputs, images=image_hf, return_tensors="pt", padding=True
)
outputs = model_hf(**inputs)
logits_per_image = (
outputs.logits_per_image
) # this is the image-text similarity score
probs = logits_per_image.softmax(
dim=1
) # we can take the softmax to get the label probabilitiesprobs
probs
# hugging face weights
hf_wts = model_hf.state_dict()<jupyter_output><empty_output><jupyter_text>Copy weights vision encoder<jupyter_code>model.logit_scale.assign(hf_wts.pop("logit_scale").numpy())
model.get_layer("image_encoder").get_layer(
"clip_patching_and_embedding"
).class_embedding.assign(
hf_wts.pop("vision_model.embeddings.class_embedding").numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_patching_and_embedding"
).positional_embedding.assign(
hf_wts.pop("vision_model.embeddings.position_embedding.weight").numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_patching_and_embedding"
).conv1.weights[0].assign(
hf_wts.pop("vision_model.embeddings.patch_embedding.weight")
.permute(3, 2, 1, 0)
.numpy()
)
model.get_layer("image_encoder").get_layer("ln_1").weights[0].assign(
hf_wts.pop("vision_model.pre_layrnorm.weight").numpy()
)
model.get_layer("image_encoder").get_layer("ln_1").weights[1].assign(
hf_wts.pop("vision_model.pre_layrnorm.bias").numpy()
)
model.get_layer("image_encoder").get_layer("ln_2").weights[0].assign(
hf_wts.pop("vision_model.post_layernorm.weight").numpy()
)
model.get_layer("image_encoder").get_layer("ln_2").weights[1].assign(
hf_wts.pop("vision_model.post_layernorm.bias").numpy()
)
model.get_layer("image_encoder").get_layer("vision_projector").weights[
0
].assign(hf_wts.pop("visual_projection.weight").transpose(1, 0).numpy())
for i in range(0, MODEL_CONFIGS[config_name]["vision_layers"]):
if i == 0:
residual_attention = f"residual_attention"
else:
residual_attention = f"residual_attention_{i}"
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.q_proj.weights[0].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.q_proj.weight")
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.q_proj.weights[1].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.q_proj.bias")
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.k_proj.weights[0].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.k_proj.weight")
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.k_proj.weights[1].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.k_proj.bias")
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.v_proj.weights[0].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.v_proj.weight")
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.v_proj.weights[1].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.self_attn.v_proj.bias")
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.out_proj.weights[1].assign(
hf_wts.pop(
f"vision_model.encoder.layers.{i}.self_attn.out_proj.bias"
).numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).attn.out_proj.weights[0].assign(
hf_wts.pop(
f"vision_model.encoder.layers.{i}.self_attn.out_proj.weight"
).numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).ln_1.weights[0].assign(
hf_wts.pop(
f"vision_model.encoder.layers.{i}.layer_norm1.weight"
).numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).ln_1.weights[1].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.layer_norm1.bias").numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).ln_2.weights[0].assign(
hf_wts.pop(
f"vision_model.encoder.layers.{i}.layer_norm2.weight"
).numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).ln_2.weights[1].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.layer_norm2.bias").numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).mlp.get_layer("c_fc").weights[
0
].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc1.weight")
.transpose(1, 0)
.numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).mlp.get_layer("c_fc").weights[
1
].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc1.bias").numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).mlp.get_layer("c_proj").weights[
0
].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc2.weight")
.transpose(1, 0)
.numpy()
)
model.get_layer("image_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(residual_attention).mlp.get_layer("c_proj").weights[
1
].assign(
hf_wts.pop(f"vision_model.encoder.layers.{i}.mlp.fc2.bias").numpy()
)<jupyter_output><empty_output><jupyter_text>Text encoder<jupyter_code>num_transformer_layers = MODEL_CONFIGS[config_name]["vision_layers"]
model.get_layer("text_encoder").get_layer("text_projector").weights[0].assign(
hf_wts.pop("text_projection.weight").numpy()
)
model.get_layer("text_encoder").get_layer("token_embedding").weights[0].assign(
hf_wts.pop("text_model.embeddings.token_embedding.weight").numpy()
)
model.get_layer("text_encoder").positional_embedding.assign(
hf_wts.pop("text_model.embeddings.position_embedding.weight").numpy()
)
model.get_layer("text_encoder").get_layer("ln_final").weights[0].assign(
hf_wts.pop("text_model.final_layer_norm.weight")
)
model.get_layer("text_encoder").get_layer("ln_final").weights[1].assign(
hf_wts.pop("text_model.final_layer_norm.bias")
)
for i in range(MODEL_CONFIGS[config_name]["transformer_layers"]):
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.k_proj.weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.k_proj.weight")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.k_proj.weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.k_proj.bias")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.q_proj.weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.q_proj.weight")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.q_proj.weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.q_proj.bias")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.v_proj.weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.v_proj.weight")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.v_proj.weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.v_proj.bias")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.out_proj.weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.out_proj.weight")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).attn.out_proj.weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.self_attn.out_proj.bias")
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).ln_1.weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm1.weight").numpy()
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).ln_1.weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm1.bias").numpy()
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).ln_2.weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm2.weight").numpy()
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).ln_2.weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.layer_norm2.bias").numpy()
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).mlp.get_layer(
"c_fc"
).weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc1.weight")
.transpose(1, 0)
.numpy()
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).mlp.get_layer(
"c_fc"
).weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc1.bias").numpy()
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).mlp.get_layer(
"c_proj"
).weights[
0
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc2.weight")
.transpose(1, 0)
.numpy()
)
model.get_layer("text_encoder").get_layer(
"clip_encoder"
).resblocks.get_layer(
f"residual_attention_{num_transformer_layers+i}"
).mlp.get_layer(
"c_proj"
).weights[
1
].assign(
hf_wts.pop(f"text_model.encoder.layers.{i}.mlp.fc2.bias").numpy()
)
# verify that we copied all weights
hf_wts.keys()<jupyter_output><empty_output><jupyter_text>save weights<jupyter_code>model.save_weights("clip-vit-base-patch32.weights.h5")<jupyter_output><empty_output> | keras-cv/keras_cv/tools/checkpoint_conversion/clip_weights_conversion.ipynb/0 | {
"file_path": "keras-cv/keras_cv/tools/checkpoint_conversion/clip_weights_conversion.ipynb",
"repo_id": "keras-cv",
"token_count": 8283
} | 82 |
Subsets and Splits