text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
"""JSON utilities for legacy saving formats (h5 and SavedModel)"""
import collections
import enum
import functools
import json
import numpy as np
from keras.legacy.saving import serialization
from keras.saving import serialization_lib
from keras.utils.module_utils import tensorflow as tf
_EXTENSION_TYPE_SPEC = "_EXTENSION_TYPE_SPEC"
class Encoder(json.JSONEncoder):
"""JSON encoder and decoder that handles TensorShapes and tuples."""
def default(self, obj):
"""Encodes objects for types that aren't handled by the default
encoder."""
if tf.available and isinstance(obj, tf.TensorShape):
items = obj.as_list() if obj.rank is not None else None
return {"class_name": "TensorShape", "items": items}
return get_json_type(obj)
def encode(self, obj):
return super().encode(_encode_tuple(obj))
def _encode_tuple(x):
if isinstance(x, tuple):
return {
"class_name": "__tuple__",
"items": tuple(_encode_tuple(i) for i in x),
}
elif isinstance(x, list):
return [_encode_tuple(i) for i in x]
elif isinstance(x, dict):
return {key: _encode_tuple(value) for key, value in x.items()}
else:
return x
def decode(json_string):
return json.loads(json_string, object_hook=_decode_helper)
def decode_and_deserialize(
json_string, module_objects=None, custom_objects=None
):
"""Decodes the JSON and deserializes any Keras objects found in the dict."""
return json.loads(
json_string,
object_hook=functools.partial(
_decode_helper,
deserialize=True,
module_objects=module_objects,
custom_objects=custom_objects,
),
)
def _decode_helper(
obj, deserialize=False, module_objects=None, custom_objects=None
):
"""A decoding helper that is TF-object aware.
Args:
obj: A decoded dictionary that may represent an object.
deserialize: Boolean. When True, deserializes any Keras
objects found in `obj`. Defaults to `False`.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library
implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
Returns:
The decoded object.
"""
if isinstance(obj, dict) and "class_name" in obj:
if tf.available:
if obj["class_name"] == "TensorShape":
return tf.TensorShape(obj["items"])
elif obj["class_name"] == "TypeSpec":
from tensorflow.python.framework import type_spec_registry
return type_spec_registry.lookup(obj["type_spec"])._deserialize(
_decode_helper(obj["serialized"])
)
elif obj["class_name"] == "CompositeTensor":
spec = obj["spec"]
tensors = []
for dtype, tensor in obj["tensors"]:
tensors.append(
tf.constant(tensor, dtype=tf.dtypes.as_dtype(dtype))
)
return tf.nest.pack_sequence_as(
_decode_helper(spec), tensors, expand_composites=True
)
if obj["class_name"] == "__tuple__":
return tuple(_decode_helper(i) for i in obj["items"])
elif obj["class_name"] == "__ellipsis__":
return Ellipsis
elif deserialize and "__passive_serialization__" in obj:
# __passive_serialization__ is added by the JSON encoder when
# encoding an object that has a `get_config()` method.
try:
if (
"module" not in obj
): # TODO(nkovela): Add TF SavedModel scope
return serialization.deserialize_keras_object(
obj,
module_objects=module_objects,
custom_objects=custom_objects,
)
else:
return serialization_lib.deserialize_keras_object(
obj,
module_objects=module_objects,
custom_objects=custom_objects,
)
except ValueError:
pass
elif obj["class_name"] == "__bytes__":
return obj["value"].encode("utf-8")
return obj
def get_json_type(obj):
"""Serializes any object to a JSON-serializable structure.
Args:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, "get_config"):
# TODO(nkovela): Replace with legacy serialization
serialized = serialization.serialize_keras_object(obj)
serialized["__passive_serialization__"] = True
return serialized
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
if tf.available and isinstance(obj, tf.compat.v1.Dimension):
return obj.value
if tf.available and isinstance(obj, tf.TensorShape):
return obj.as_list()
if tf.available and isinstance(obj, tf.DType):
return obj.name
if isinstance(obj, collections.abc.Mapping):
return dict(obj)
if obj is Ellipsis:
return {"class_name": "__ellipsis__"}
# if isinstance(obj, wrapt.ObjectProxy):
# return obj.__wrapped__
if tf.available and isinstance(obj, tf.TypeSpec):
from tensorflow.python.framework import type_spec_registry
try:
type_spec_name = type_spec_registry.get_name(type(obj))
return {
"class_name": "TypeSpec",
"type_spec": type_spec_name,
"serialized": obj._serialize(),
}
except ValueError:
raise ValueError(
f"Unable to serialize {obj} to JSON, because the TypeSpec "
f"class {type(obj)} has not been registered."
)
if tf.available and isinstance(obj, tf.__internal__.CompositeTensor):
spec = tf.type_spec_from_value(obj)
tensors = []
for tensor in tf.nest.flatten(obj, expand_composites=True):
tensors.append((tensor.dtype.name, tensor.numpy().tolist()))
return {
"class_name": "CompositeTensor",
"spec": get_json_type(spec),
"tensors": tensors,
}
if isinstance(obj, enum.Enum):
return obj.value
if isinstance(obj, bytes):
return {"class_name": "__bytes__", "value": obj.decode("utf-8")}
raise TypeError(
f"Unable to serialize {obj} to JSON. Unrecognized type {type(obj)}."
)
| keras/keras/legacy/saving/json_utils.py/0 | {
"file_path": "keras/keras/legacy/saving/json_utils.py",
"repo_id": "keras",
"token_count": 3352
} | 182 |
import json
import numpy as np
import pytest
from absl import logging
from absl.testing import parameterized
from keras import layers
from keras import metrics
from keras import models
from keras import ops
from keras import testing
from keras.metrics import metrics_utils
class FalsePositivesTest(testing.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name="my_fp", thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, "my_fp")
self.assertLen(fp_obj.variables, 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, "my_fp")
self.assertLen(fp_obj2.variables, 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
fp_obj.update_state(y_true, y_pred)
self.assertAllClose(7.0, fp_obj.result())
def test_weighted(self):
fp_obj = metrics.FalsePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14.0, result)
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
fp_obj.update_state(y_true, y_pred)
self.assertAllClose([7.0, 4.0, 2.0], fp_obj.result())
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = (
(1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0),
(5.0, 15.0, 10.0, 0),
)
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125.0, 42.0, 12.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.FalsePositives(thresholds=[None])
class FalseNegativesTest(testing.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name="my_fn", thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, "my_fn")
self.assertLen(fn_obj.variables, 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, "my_fn")
self.assertLen(fn_obj2.variables, 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
fn_obj.update_state(y_true, y_pred)
self.assertAllClose(3.0, fn_obj.result())
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5.0, result)
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
fn_obj.update_state(y_true, y_pred)
self.assertAllClose([1.0, 4.0, 6.0], fn_obj.result())
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4.0, 16.0, 23.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.FalseNegatives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.FalseNegatives(thresholds=[None])
class TrueNegativesTest(testing.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name="my_tn", thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, "my_tn")
self.assertLen(tn_obj.variables, 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, "my_tn")
self.assertLen(tn_obj2.variables, 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
tn_obj.update_state(y_true, y_pred)
self.assertAllClose(3.0, tn_obj.result())
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4.0, result)
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
tn_obj.update_state(y_true, y_pred)
self.assertAllClose([2.0, 5.0, 7.0], tn_obj.result())
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5.0, 15.0, 23.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.TrueNegatives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.TrueNegatives(thresholds=[None])
class TruePositiveTest(testing.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name="my_tp", thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, "my_tp")
self.assertLen(tp_obj.variables, 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, "my_tp")
self.assertLen(tp_obj2.variables, 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
tp_obj.update_state(y_true, y_pred)
self.assertAllClose(7.0, tp_obj.result())
def test_weighted(self):
tp_obj = metrics.TruePositives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12.0, result)
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
tp_obj.update_state(y_true, y_pred)
self.assertAllClose([6.0, 3.0, 1.0], tp_obj.result())
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = 37.0
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([222.0, 111.0, 37.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.TruePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.TruePositives(thresholds=[None])
class PrecisionTest(testing.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name="my_precision", thresholds=[0.4, 0.9], top_k=15, class_id=12
)
self.assertEqual(p_obj.name, "my_precision")
self.assertLen(p_obj.variables, 2)
self.assertEqual(
[v.name for v in p_obj.variables],
["true_positives", "false_positives"],
)
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, "my_precision")
self.assertLen(p_obj2.variables, 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = np.array([1, 0, 1, 0])
y_true = np.array([0, 1, 1, 0])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, result)
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs)
y_true = np.array(1 - inputs)
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, result)
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = np.array([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = np.array([[0, 1, 1, 0], [1, 0, 0, 1]])
result = p_obj(
y_true,
y_pred,
sample_weight=np.array([[1, 2, 3, 4], [4, 3, 2, 1]]),
)
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, result)
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = np.array([0, 0, 0, 0])
y_true = np.array([0, 0, 0, 0])
result = p_obj(y_true, y_pred)
self.assertEqual(0, result)
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = np.array([1, 0, 0.6, 0])
y_true = np.array([0, 1, 1, 0])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual([0.5, 0.0], result, 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[4, 0], [3, 1]], dtype="float32")
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.0
weighted_positives = (0 + 3.0) + (4.0 + 0.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual([expected_precision, 0], result, 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[4, 0], [3, 1]], dtype="float32")
for _ in range(2):
p_obj.update_state(y_true, y_pred, sample_weight=weights)
weighted_tp = (0 + 3.0) + (0 + 3.0)
weighted_positives = ((0 + 3.0) + (4.0 + 0.0)) + (
(0 + 3.0) + (4.0 + 0.0)
)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual([expected_precision, 0], p_obj.result(), 1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = np.array([0.2, 0.1, 0.5, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1.0 / 3, result)
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = np.array([[0.2, 0.1, 0.4, 0, 0.2]])
y_true1 = np.array([[0, 1, 1, 0, 1]])
p_obj(y_true1, y_pred1, sample_weight=np.array([[1, 4, 2, 3, 5]]))
y_pred2 = np.array([0.2, 0.6, 0.4, 0.2, 0.2])
y_true2 = np.array([1, 0, 1, 1, 1])
result = p_obj(y_true2, y_pred2, sample_weight=np.array(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, result)
def test_unweighted_class_id_should_throw_error_1d(self):
p_obj = metrics.Precision(class_id=2)
y_pred = np.array([0.2, 0.1, 0.6, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
with self.assertRaisesRegex(
ValueError,
r"When class_id is provided, y_pred must be a 2D array "
r"with shape \(num_samples, num_classes\), found shape:.*",
):
p_obj(y_true, y_pred)
def test_unweighted_class_id_multiclass(self):
p_obj = metrics.Precision(class_id=1)
y_pred = np.array(
[
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.2],
[0.2, 0.6, 0.2],
[0.7, 0.2, 0.1],
[0.1, 0.1, 0.8],
]
)
y_true = np.array(
[
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
)
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1.0, result)
self.assertAlmostEqual(1.0, p_obj.true_positives)
self.assertAlmostEqual(0.0, p_obj.false_positives)
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=0.7, top_k=2)
y_pred = np.array([0.2, 0.8, 0.6, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 1])
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, result)
self.assertAlmostEqual(1, p_obj.true_positives)
self.assertAlmostEqual(0, p_obj.false_positives)
class RecallTest(testing.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name="my_recall", thresholds=[0.4, 0.9], top_k=15, class_id=12
)
self.assertEqual(r_obj.name, "my_recall")
self.assertLen(r_obj.variables, 2)
self.assertEqual(
[v.name for v in r_obj.variables],
["true_positives", "false_negatives"],
)
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, "my_recall")
self.assertLen(r_obj2.variables, 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = np.array([1, 0, 1, 0])
y_true = np.array([0, 1, 1, 0])
self.assertAlmostEqual(0.5, r_obj(y_true, y_pred))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs)
y_true = np.array(1 - inputs)
self.assertAlmostEqual(0, r_obj(y_true, y_pred))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = np.array([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = np.array([[0, 1, 1, 0], [1, 0, 0, 1]])
result = r_obj(
y_true,
y_pred,
sample_weight=np.array([[1, 2, 3, 4], [4, 3, 2, 1]]),
)
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, result)
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = np.array([0, 0, 0, 0])
y_true = np.array([0, 0, 0, 0])
self.assertEqual(0, r_obj(y_true, y_pred))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = np.array([1, 0, 0.6, 0])
y_true = np.array([0, 1, 1, 0])
self.assertAllClose([0.5, 0.0], r_obj(y_true, y_pred), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[1, 4], [3, 2]], dtype="float32")
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.0
weighted_positives = (0 + 3.0) + (4.0 + 0.0)
expected_recall = weighted_tp / weighted_positives
self.assertAllClose([expected_recall, 0], result, 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.0])
y_true = np.array([[0, 1], [1, 0]])
y_pred = np.array([[1, 0], [0.6, 0]], dtype="float32")
weights = np.array([[1, 4], [3, 2]], dtype="float32")
for _ in range(2):
r_obj.update_state(y_true, y_pred, sample_weight=weights)
weighted_tp = (0 + 3.0) + (0 + 3.0)
weighted_positives = ((0 + 3.0) + (4.0 + 0.0)) + (
(0 + 3.0) + (4.0 + 0.0)
)
expected_recall = weighted_tp / weighted_positives
self.assertAllClose([expected_recall, 0], r_obj.result(), 1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = np.array([0.2, 0.1, 0.5, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
self.assertAlmostEqual(0.5, r_obj(y_true, y_pred))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = np.array([[0.2, 0.1, 0.4, 0, 0.2]])
y_true1 = np.array([[0, 1, 1, 0, 1]])
r_obj(y_true1, y_pred1, sample_weight=np.array([[1, 4, 2, 3, 5]]))
y_pred2 = np.array([0.2, 0.6, 0.4, 0.2, 0.2])
y_true2 = np.array([1, 0, 1, 1, 1])
result = r_obj(y_true2, y_pred2, sample_weight=np.array(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, result)
def test_unweighted_class_id_should_throw_error_1d(self):
r_obj = metrics.Recall(class_id=2)
y_pred = np.array([0.2, 0.1, 0.6, 0, 0.2])
y_true = np.array([0, 1, 1, 0, 0])
with self.assertRaisesRegex(
ValueError,
r"When class_id is provided, y_pred must be a 2D array "
r"with shape \(num_samples, num_classes\), found shape:.*",
):
r_obj(y_true, y_pred)
def test_unweighted_class_id_multiclass(self):
r_obj = metrics.Recall(class_id=1)
y_pred = np.array(
[
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.2],
[0.2, 0.6, 0.2],
[0.7, 0.2, 0.1],
[0.1, 0.1, 0.8],
]
)
y_true = np.array(
[
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
)
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1.0, result)
self.assertAlmostEqual(1.0, r_obj.true_positives)
self.assertAlmostEqual(0.0, r_obj.false_negatives)
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=0.7, top_k=2)
y_pred = np.array([0.2, 0.8, 0.6, 0, 0.2])
y_true = np.array([1, 1, 1, 0, 1])
self.assertAlmostEqual(0.25, r_obj(y_true, y_pred))
self.assertAlmostEqual(1, r_obj.true_positives)
self.assertAlmostEqual(3, r_obj.false_negatives)
class SensitivityAtSpecificityTest(testing.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4,
num_thresholds=100,
class_id=12,
name="sensitivity_at_specificity_1",
)
self.assertEqual(s_obj.name, "sensitivity_at_specificity_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(
s_obj.get_config()
)
self.assertEqual(s_obj2.name, "sensitivity_at_specificity_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs, dtype="float32")
y_true = np.array(inputs)
self.assertAlmostEqual(1, s_obj(y_true, y_pred))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.8, s_obj(y_true, y_pred))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = ops.transpose(np.array([pred_values] * 3))
y_true = ops.one_hot(np.array(label_values), num_classes=3)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
@parameterized.parameters(["bool", "int32", "float32"])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = np.array(pred_values, dtype="float32")
y_true = ops.cast(label_values, dtype=label_dtype)
weights = np.array(weight_values)
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, result)
def test_invalid_specificity(self):
with self.assertRaisesRegex(
ValueError, r"`specificity` must be in the range \[0, 1\]."
):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
class SpecificityAtSensitivityTest(testing.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4,
num_thresholds=100,
class_id=12,
name="specificity_at_sensitivity_1",
)
self.assertEqual(s_obj.name, "specificity_at_sensitivity_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(
s_obj.get_config()
)
self.assertEqual(s_obj2.name, "specificity_at_sensitivity_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs, dtype="float32")
y_true = np.array(inputs)
self.assertAlmostEqual(1, s_obj(y_true, y_pred))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(1.0)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.2, s_obj(y_true, y_pred))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = ops.transpose(np.array([pred_values] * 3))
y_true = ops.one_hot(np.array(label_values), num_classes=3)
self.assertAlmostEqual(0.6, s_obj(y_true, y_pred))
@parameterized.parameters(["bool", "int32", "float32"])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = np.array(pred_values, dtype="float32")
y_true = ops.cast(label_values, dtype=label_dtype)
weights = np.array(weight_values)
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, result)
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`sensitivity` must be in the range \[0, 1\]."
):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
class PrecisionAtRecallTest(testing.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.PrecisionAtRecall(
0.4, num_thresholds=100, class_id=12, name="precision_at_recall_1"
)
self.assertEqual(s_obj.name, "precision_at_recall_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.recall, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.PrecisionAtRecall.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, "precision_at_recall_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.recall, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_unweighted_all_correct(self):
s_obj = metrics.PrecisionAtRecall(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs, dtype="float32")
y_true = np.array(inputs)
self.assertAlmostEqual(1, s_obj(y_true, y_pred))
def test_unweighted_high_recall(self):
s_obj = metrics.PrecisionAtRecall(0.8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
# For 0.5 < decision threshold < 0.6.
self.assertAlmostEqual(2.0 / 3, s_obj(y_true, y_pred))
def test_unweighted_low_recall(self):
s_obj = metrics.PrecisionAtRecall(0.6)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, s_obj(y_true, y_pred))
def test_unweighted_class_id(self):
s_obj = metrics.PrecisionAtRecall(0.6, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = ops.transpose(np.array([pred_values] * 3))
y_true = ops.one_hot(np.array(label_values), num_classes=3)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, s_obj(y_true, y_pred))
@parameterized.parameters(["bool", "int32", "float32"])
def test_weighted(self, label_dtype):
s_obj = metrics.PrecisionAtRecall(7.0 / 8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [2, 1, 2, 1, 2, 1, 2, 2, 1, 2]
y_pred = np.array(pred_values, dtype="float32")
y_true = ops.cast(label_values, dtype=label_dtype)
weights = np.array(weight_values)
result = s_obj(y_true, y_pred, sample_weight=weights)
# For 0.0 < decision threshold < 0.2.
self.assertAlmostEqual(0.7, result)
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`recall` must be in the range \[0, 1\]."
):
metrics.PrecisionAtRecall(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.PrecisionAtRecall(0.4, num_thresholds=-1)
class RecallAtPrecisionTest(testing.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.RecallAtPrecision(
0.4, num_thresholds=100, class_id=12, name="recall_at_precision_1"
)
self.assertEqual(s_obj.name, "recall_at_precision_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.precision, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.RecallAtPrecision.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, "recall_at_precision_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.precision, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_unweighted_all_correct(self):
s_obj = metrics.RecallAtPrecision(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs, dtype="float32")
y_true = np.array(inputs)
self.assertAlmostEqual(1, s_obj(y_true, y_pred))
def test_unweighted_high_precision(self):
s_obj = metrics.RecallAtPrecision(0.75)
pred_values = [
0.05,
0.1,
0.2,
0.3,
0.3,
0.35,
0.4,
0.45,
0.5,
0.6,
0.9,
0.95,
]
label_values = [0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2,
# 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6,
# 1/6].
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
# The precision 0.75 can be reached at thresholds 0.4<=t<0.45.
self.assertAlmostEqual(0.5, s_obj(y_true, y_pred))
def test_unweighted_low_precision(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3)
pred_values = [
0.05,
0.1,
0.2,
0.3,
0.3,
0.35,
0.4,
0.45,
0.5,
0.6,
0.9,
0.95,
]
label_values = [0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2,
# 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6,
# 1/6].
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
# The precision 5/7 can be reached at thresholds 00.3<=t<0.35.
self.assertAlmostEqual(5.0 / 6, s_obj(y_true, y_pred))
def test_unweighted_class_id(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3, class_id=2)
pred_values = [
0.05,
0.1,
0.2,
0.3,
0.3,
0.35,
0.4,
0.45,
0.5,
0.6,
0.9,
0.95,
]
label_values = [0, 2, 0, 0, 0, 2, 2, 0, 2, 2, 0, 2]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2,
# 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6,
# 1/6].
y_pred = ops.transpose(np.array([pred_values] * 3))
y_true = ops.one_hot(np.array(label_values), num_classes=3)
# The precision 5/7 can be reached at thresholds 00.3<=t<0.35.
self.assertAlmostEqual(5.0 / 6, s_obj(y_true, y_pred))
@parameterized.parameters(["bool", "int32", "float32"])
def test_weighted(self, label_dtype):
s_obj = metrics.RecallAtPrecision(0.75)
pred_values = [0.1, 0.2, 0.3, 0.5, 0.6, 0.9, 0.9]
label_values = [0, 1, 0, 0, 0, 1, 1]
weight_values = [1, 2, 1, 2, 1, 2, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = ops.cast(label_values, dtype=label_dtype)
weights = np.array(weight_values)
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.6, result)
def test_unachievable_precision(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3)
pred_values = [0.1, 0.2, 0.3, 0.9]
label_values = [1, 1, 0, 0]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
# The highest possible precision is 1/2 which is below the required
# value, expect 0 recall.
self.assertAlmostEqual(0, s_obj(y_true, y_pred))
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`precision` must be in the range \[0, 1\]."
):
metrics.RecallAtPrecision(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.RecallAtPrecision(0.4, num_thresholds=-1)
@pytest.mark.requires_trainable_backend
def test_end_to_end(self):
# Test for https://github.com/keras-team/keras/issues/718
model = models.Sequential(
[
layers.Input((1,)),
layers.Dense(1),
]
)
model.compile(
optimizer="rmsprop", loss="mse", metrics=[metrics.Precision()]
)
model.fit(np.ones((5, 1)), np.ones((5, 1)))
class AUCTest(testing.TestCase):
def setUp(self):
self.num_thresholds = 3
self.y_pred = np.array([0, 0.5, 0.3, 0.9], dtype="float32")
self.y_pred_multi_label = np.array(
[[0.0, 0.4], [0.5, 0.7], [0.3, 0.2], [0.9, 0.3]], dtype="float32"
)
epsilon = 1e-12
self.y_pred_logits = -ops.log(1.0 / (self.y_pred + epsilon) - 1.0)
self.y_true = np.array([0, 0, 1, 1])
self.y_true_multi_label = np.array([[0, 0], [1, 1], [1, 1], [1, 0]])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
auc_obj = metrics.AUC(
num_thresholds=100,
curve="PR",
summation_method="majoring",
name="auc_1",
dtype="float64",
multi_label=True,
num_labels=2,
from_logits=True,
)
auc_obj.update_state(self.y_true_multi_label, self.y_pred_multi_label)
self.assertEqual(auc_obj.name, "auc_1")
self.assertEqual(auc_obj._dtype, "float64")
self.assertLen(auc_obj.variables, 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
self.assertTrue(auc_obj.multi_label)
self.assertEqual(auc_obj.num_labels, 2)
self.assertTrue(auc_obj._from_logits)
old_config = auc_obj.get_config()
self.assertNotIn("thresholds", old_config)
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
auc_obj2.update_state(self.y_true_multi_label, self.y_pred_multi_label)
self.assertEqual(auc_obj2.name, "auc_1")
self.assertLen(auc_obj2.variables, 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj2.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
self.assertTrue(auc_obj2.multi_label)
self.assertEqual(auc_obj2.num_labels, 2)
self.assertTrue(auc_obj2._from_logits)
new_config = auc_obj2.get_config()
self.assertNotIn("thresholds", new_config)
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
auc_obj = metrics.AUC(
num_thresholds=None,
curve="PR",
summation_method="majoring",
name="auc_1",
thresholds=[0.3, 0.5],
)
auc_obj.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj.name, "auc_1")
self.assertLen(auc_obj.variables, 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
auc_obj2.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj2.name, "auc_1")
self.assertLen(auc_obj2.variables, 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(
auc_obj2.summation_method, metrics_utils.AUCSummationMethod.MAJORING
)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_unweighted_all_correct(self):
auc_obj = metrics.AUC()
self.assertEqual(auc_obj(self.y_true, self.y_true), 1)
def test_unweighted(self):
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.75 * 1 + 0.25 * 0
self.assertAllClose(result, expected_result, 1e-3)
def test_unweighted_from_logits(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, from_logits=True
)
result = auc_obj(self.y_true, self.y_pred_logits)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.75 * 1 + 0.25 * 0
self.assertAllClose(result, expected_result, 1e-3)
def test_manual_thresholds(self):
# Verify that when specified, thresholds are used instead of
# num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.75 * 1 + 0.25 * 0
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.7855 * 1 + 0.2855 * 0
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_roc_majoring(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method="majoring"
)
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 1 * 1 + 0.571 * 0
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_roc_minoring(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method="minoring"
)
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = 0.571 * 1 + 0 * 0
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_pr_majoring(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve="PR",
summation_method="majoring",
)
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = 1 * 0.429 + 1 * 0.571
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_pr_minoring(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve="PR",
summation_method="minoring",
)
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = 0.7 * 0.429 + 0 * 0.571
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve="PR")
result = auc_obj(
self.y_true, self.y_pred, sample_weight=self.sample_weight
)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = 2.416 / 7 + 4 / 7
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_pr_interpolation_negative_weights(self):
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve="PR")
sample_weight = [-1, -2, -3, -4]
result = auc_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
# Divisor in auc formula is max(tp[1:]+fn[1:], 0), which is all zeros
# because the all values in tp and fn are negative, divide_no_nan will
# produce all zeros.
self.assertAllClose(result, 0.0, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 1"
):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 1."
):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegex(
ValueError, 'Invalid AUC curve value: "Invalid".'
):
metrics.AUC(curve="Invalid")
def test_invalid_summation_method(self):
with self.assertRaisesRegex(
ValueError, 'Invalid AUC summation method value: "Invalid".'
):
metrics.AUC(summation_method="Invalid")
def test_extra_dims(self):
try:
from scipy import special
logits = special.expit(
-np.array(
[
[[-10.0, 10.0, -10.0], [10.0, -10.0, 10.0]],
[[-12.0, 12.0, -12.0], [12.0, -12.0, 12.0]],
],
dtype=np.float32,
)
)
labels = np.array(
[[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]], dtype=np.int64
)
auc_obj = metrics.AUC()
result = auc_obj(labels, logits)
self.assertEqual(result, 0.5)
except ImportError as e:
logging.warning(f"Cannot test special functions: {str(e)}")
class MultiAUCTest(testing.TestCase):
def setUp(self):
self.num_thresholds = 5
self.y_pred = np.array(
[[0, 0.5, 0.3, 0.9], [0.1, 0.2, 0.3, 0.4]], dtype="float32"
).T
epsilon = 1e-12
self.y_pred_logits = -ops.log(1.0 / (self.y_pred + epsilon) - 1.0)
self.y_true_good = np.array([[0, 0, 1, 1], [0, 0, 1, 1]]).T
self.y_true_bad = np.array([[0, 0, 1, 1], [1, 1, 0, 0]]).T
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.25, 0.5, 0.75, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [[1, 1, 1, 1], [1, 1, 1, 1]]
# y_pred when threshold = 0.25 : [[0, 1, 1, 1], [0, 0, 1, 1]]
# y_pred when threshold = 0.5 : [[0, 0, 0, 1], [0, 0, 0, 0]]
# y_pred when threshold = 0.75 : [[0, 0, 0, 1], [0, 0, 0, 0]]
# y_pred when threshold = 1 + 1e-7 : [[0, 0, 0, 0], [0, 0, 0, 0]]
# for y_true_good, over thresholds:
# tp = [[2, 2, 1, 1, 0], [2, 2, 0, 0, 0]]
# fp = [[2, 1, 0, 0 , 0], [2, 0, 0 ,0, 0]]
# fn = [[0, 0, 1, 1, 2], [0, 0, 2, 2, 2]]
# tn = [[0, 1, 2, 2, 2], [0, 2, 2, 2, 2]]
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
# for y_true_bad:
# tp = [[2, 2, 1, 1, 0], [2, 0, 0, 0, 0]]
# fp = [[2, 1, 0, 0 , 0], [2, 2, 0 ,0, 0]]
# fn = [[0, 0, 1, 1, 2], [0, 2, 2, 2, 2]]
# tn = [[0, 1, 2, 2, 2], [0, 0, 2, 2, 2]]
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 0, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 1, 0, 0, 0]]
# for y_true_good with sample_weights:
# tp = [[7, 7, 4, 4, 0], [7, 7, 0, 0, 0]]
# fp = [[3, 2, 0, 0, 0], [3, 0, 0, 0, 0]]
# fn = [[0, 0, 3, 3, 7], [0, 0, 7, 7, 7]]
# tn = [[0, 1, 3, 3, 3], [0, 3, 3, 3, 3]]
# tpr = [[1, 1, 0.57, 0.57, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.67, 0, 0, 0], [1, 0, 0, 0, 0]]
def test_unweighted_all_correct(self):
auc_obj = metrics.AUC(multi_label=True)
result = auc_obj(self.y_true_good, self.y_true_good)
self.assertEqual(result, 1)
def test_unweighted_all_correct_flat(self):
auc_obj = metrics.AUC(multi_label=False)
result = auc_obj(self.y_true_good, self.y_true_good)
self.assertEqual(result, 1)
def test_unweighted(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=True
)
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 + 1.0) / 2.0
self.assertAllClose(result, expected_result, 1e-3)
def test_unweighted_from_logits(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=True,
from_logits=True,
)
result = auc_obj(self.y_true_good, self.y_pred_logits)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 + 1.0) / 2.0
self.assertAllClose(result, expected_result, 1e-3)
def test_sample_weight_flat(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=False
)
result = auc_obj(
self.y_true_good, self.y_pred, sample_weight=[1, 2, 3, 4]
)
# tpr = [1, 1, 0.2857, 0.2857, 0]
# fpr = [1, 0.3333, 0, 0, 0]
expected_result = 1.0 - (0.3333 * (1.0 - 0.2857) / 2.0)
self.assertAllClose(result, expected_result, 1e-3)
def test_full_sample_weight_flat(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=False
)
sw = np.arange(4 * 2)
sw = sw.reshape(4, 2)
result = auc_obj(self.y_true_good, self.y_pred, sample_weight=sw)
# tpr = [1, 1, 0.2727, 0.2727, 0]
# fpr = [1, 0.3333, 0, 0, 0]
expected_result = 1.0 - (0.3333 * (1.0 - 0.2727) / 2.0)
self.assertAllClose(result, expected_result, 1e-3)
def test_label_weights(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=True,
label_weights=[0.75, 0.25],
)
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 * 0.75 + 1.0 * 0.25) / (0.75 + 0.25)
self.assertAllClose(result, expected_result, 1e-3)
def test_label_weights_flat(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=False,
label_weights=[0.75, 0.25],
)
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [1, 1, 0.375, 0.375, 0]
# fpr = [1, 0.375, 0, 0, 0]
expected_result = 1.0 - ((1.0 - 0.375) * 0.375 / 2.0)
self.assertAllClose(result, expected_result, 1e-2)
def test_unweighted_flat(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=False
)
result = auc_obj(self.y_true_good, self.y_pred)
# tp = [4, 4, 1, 1, 0]
# fp = [4, 1, 0, 0, 0]
# fn = [0, 0, 3, 3, 4]
# tn = [0, 3, 4, 4, 4]
# tpr = [1, 1, 0.25, 0.25, 0]
# fpr = [1, 0.25, 0, 0, 0]
expected_result = 1.0 - (3.0 / 32.0)
self.assertAllClose(result, expected_result, 1e-3)
def test_unweighted_flat_from_logits(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=False,
from_logits=True,
)
result = auc_obj(self.y_true_good, self.y_pred_logits)
# tp = [4, 4, 1, 1, 0]
# fp = [4, 1, 0, 0, 0]
# fn = [0, 0, 3, 3, 4]
# tn = [0, 3, 4, 4, 4]
# tpr = [1, 1, 0.25, 0.25, 0]
# fpr = [1, 0.25, 0, 0, 0]
expected_result = 1.0 - (3.0 / 32.0)
self.assertAllClose(result, expected_result, 1e-3)
def test_manual_thresholds(self):
# Verify that when specified, thresholds are used instead of
# num_thresholds.
auc_obj = metrics.AUC(
num_thresholds=2, thresholds=[0.5], multi_label=True
)
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
result = auc_obj(self.y_true_good, self.y_pred)
# tp = [[2, 1, 0], [2, 0, 0]]
# fp = [2, 0, 0], [2, 0, 0]]
# fn = [[0, 1, 2], [0, 2, 2]]
# tn = [[0, 2, 2], [0, 2, 2]]
# tpr = [[1, 0.5, 0], [1, 0, 0]]
# fpr = [[1, 0, 0], [1, 0, 0]]
# auc by slice = [0.75, 0.5]
expected_result = (0.75 + 0.5) / 2.0
self.assertAllClose(result, expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=True
)
result = auc_obj(
self.y_true_good, self.y_pred, sample_weight=self.sample_weight
)
# tpr = [[1, 1, 0.57, 0.57, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.67, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = 1.0 - 0.5 * 0.43 * 0.67
self.assertAllClose(result, expected_result, 1e-1)
def test_pr_interpolation_unweighted(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, curve="PR", multi_label=True
)
good_result = auc_obj(self.y_true_good, self.y_pred)
with self.subTest(name="good"):
# PR AUCs are 0.917 and 1.0 respectively
self.assertAllClose(good_result, (0.91667 + 1.0) / 2.0, 1e-1)
bad_result = auc_obj(self.y_true_bad, self.y_pred)
with self.subTest(name="bad"):
# PR AUCs are 0.917 and 0.5 respectively
self.assertAllClose(bad_result, (0.91667 + 0.5) / 2.0, 1e-1)
def test_pr_interpolation(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, curve="PR", multi_label=True
)
good_result = auc_obj(
self.y_true_good, self.y_pred, sample_weight=self.sample_weight
)
# PR AUCs are 0.939 and 1.0 respectively
self.assertAllClose(good_result, (0.939 + 1.0) / 2.0, 1e-1)
@pytest.mark.requires_trainable_backend
def test_keras_model_compiles(self):
inputs = layers.Input(shape=(10,), batch_size=1)
output = layers.Dense(3, activation="sigmoid")(inputs)
model = models.Model(inputs=inputs, outputs=output)
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=[metrics.AUC(multi_label=True)],
)
def test_reset_state(self):
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=True
)
auc_obj(self.y_true_good, self.y_pred)
auc_obj.reset_state()
self.assertAllClose(auc_obj.true_positives, np.zeros((5, 2)))
| keras/keras/metrics/confusion_metrics_test.py/0 | {
"file_path": "keras/keras/metrics/confusion_metrics_test.py",
"repo_id": "keras",
"token_count": 34634
} | 183 |
from keras.models.functional import Functional
from keras.models.model import Model
from keras.models.sequential import Sequential
| keras/keras/models/__init__.py/0 | {
"file_path": "keras/keras/models/__init__.py",
"repo_id": "keras",
"token_count": 32
} | 184 |
from keras import backend
from keras import ops
from keras.api_export import keras_export
from keras.backend import KerasTensor
from keras.backend import any_symbolic_tensors
from keras.ops.operation import Operation
from keras.ops.operation_utils import compute_conv_output_shape
class Resize(Operation):
def __init__(
self,
size,
interpolation="bilinear",
antialias=False,
data_format="channels_last",
):
super().__init__()
self.size = tuple(size)
self.interpolation = interpolation
self.antialias = antialias
self.data_format = data_format
def call(self, image):
return backend.image.resize(
image,
self.size,
interpolation=self.interpolation,
antialias=self.antialias,
data_format=self.data_format,
)
def compute_output_spec(self, image):
if len(image.shape) == 3:
return KerasTensor(
self.size + (image.shape[-1],), dtype=image.dtype
)
elif len(image.shape) == 4:
if self.data_format == "channels_last":
return KerasTensor(
(image.shape[0],) + self.size + (image.shape[-1],),
dtype=image.dtype,
)
else:
return KerasTensor(
(image.shape[0], image.shape[1]) + self.size,
dtype=image.dtype,
)
raise ValueError(
"Invalid input rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
@keras_export("keras.ops.image.resize")
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
data_format="channels_last",
):
"""Resize images to size using the specified interpolation method.
Args:
image: Input image or batch of images. Must be 3D or 4D.
size: Size of output image in `(height, width)` format.
interpolation: Interpolation method. Available methods are `"nearest"`,
`"bilinear"`, and `"bicubic"`. Defaults to `"bilinear"`.
antialias: Whether to use an antialiasing filter when downsampling an
image. Defaults to `False`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Returns:
Resized image or batch of images.
Examples:
>>> x = np.random.random((2, 4, 4, 3)) # batch of 2 RGB images
>>> y = keras.ops.image.resize(x, (2, 2))
>>> y.shape
(2, 2, 2, 3)
>>> x = np.random.random((4, 4, 3)) # single RGB image
>>> y = keras.ops.image.resize(x, (2, 2))
>>> y.shape
(2, 2, 3)
>>> x = np.random.random((2, 3, 4, 4)) # batch of 2 RGB images
>>> y = keras.ops.image.resize(x, (2, 2),
... data_format="channels_first")
>>> y.shape
(2, 3, 2, 2)
"""
if any_symbolic_tensors((image,)):
return Resize(
size,
interpolation=interpolation,
antialias=antialias,
data_format=data_format,
).symbolic_call(image)
return backend.image.resize(
image,
size,
interpolation=interpolation,
antialias=antialias,
data_format=data_format,
)
class AffineTransform(Operation):
def __init__(
self,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
super().__init__()
self.interpolation = interpolation
self.fill_mode = fill_mode
self.fill_value = fill_value
self.data_format = data_format
def call(self, image, transform):
return backend.image.affine_transform(
image,
transform,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
def compute_output_spec(self, image, transform):
if len(image.shape) not in (3, 4):
raise ValueError(
"Invalid image rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
return KerasTensor(image.shape, dtype=image.dtype)
@keras_export("keras.ops.image.affine_transform")
def affine_transform(
image,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
"""Applies the given transform(s) to the image(s).
Args:
image: Input image or batch of images. Must be 3D or 4D.
transform: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transform is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the output point
`(x, y)` to a transformed input point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transform is inverted compared to
the transform mapping input points to output points. Note that
gradients are not backpropagated into transformation parameters.
Note that `c0` and `c1` are only effective when using TensorFlow
backend and will be considered as `0` when using other backends.
interpolation: Interpolation method. Available methods are `"nearest"`,
and `"bilinear"`. Defaults to `"bilinear"`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
fill_value: Value used for points outside the boundaries of the input if
`fill_mode="constant"`. Defaults to `0`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Returns:
Applied affine transform image or batch of images.
Examples:
>>> x = np.random.random((2, 64, 80, 3)) # batch of 2 RGB images
>>> transform = np.array(
... [
... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom
... [1, 0, -20, 0, 1, -16, 0, 0], # translation
... ]
... )
>>> y = keras.ops.image.affine_transform(x, transform)
>>> y.shape
(2, 64, 80, 3)
>>> x = np.random.random((64, 80, 3)) # single RGB image
>>> transform = np.array([1.0, 0.5, -20, 0.5, 1.0, -16, 0, 0]) # shear
>>> y = keras.ops.image.affine_transform(x, transform)
>>> y.shape
(64, 80, 3)
>>> x = np.random.random((2, 3, 64, 80)) # batch of 2 RGB images
>>> transform = np.array(
... [
... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom
... [1, 0, -20, 0, 1, -16, 0, 0], # translation
... ]
... )
>>> y = keras.ops.image.affine_transform(x, transform,
... data_format="channels_first")
>>> y.shape
(2, 3, 64, 80)
"""
if any_symbolic_tensors((image, transform)):
return AffineTransform(
interpolation=interpolation,
fill_mode=fill_mode,
fill_value=fill_value,
data_format=data_format,
).symbolic_call(image, transform)
return backend.image.affine_transform(
image,
transform,
interpolation=interpolation,
fill_mode=fill_mode,
fill_value=fill_value,
data_format=data_format,
)
class ExtractPatches(Operation):
def __init__(
self,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format="channels_last",
):
super().__init__()
if isinstance(size, int):
size = (size, size)
self.size = size
self.strides = strides
self.dilation_rate = dilation_rate
self.padding = padding
self.data_format = data_format
def call(self, image):
return _extract_patches(
image=image,
size=self.size,
strides=self.strides,
dilation_rate=self.dilation_rate,
padding=self.padding,
data_format=self.data_format,
)
def compute_output_spec(self, image):
image_shape = image.shape
if not self.strides:
strides = (self.size[0], self.size[1])
if self.data_format == "channels_last":
channels_in = image.shape[-1]
else:
channels_in = image.shape[-3]
if len(image.shape) == 3:
image_shape = (1,) + image_shape
filters = self.size[0] * self.size[1] * channels_in
kernel_size = (self.size[0], self.size[1])
out_shape = compute_conv_output_shape(
image_shape,
filters,
kernel_size,
strides=strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if len(image.shape) == 3:
out_shape = out_shape[1:]
return KerasTensor(shape=out_shape, dtype=image.dtype)
@keras_export("keras.ops.image.extract_patches")
def extract_patches(
image,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format="channels_last",
):
"""Extracts patches from the image(s).
Args:
image: Input image or batch of images. Must be 3D or 4D.
size: Patch size int or tuple (patch_height, patch_widht)
strides: strides along height and width. If not specified, or
if `None`, it defaults to the same value as `size`.
dilation_rate: This is the input stride, specifying how far two
consecutive patch samples are in the input. For value other than 1,
strides must be 1. NOTE: `strides > 1` is not supported in
conjunction with `dilation_rate > 1`
padding: The type of padding algorithm to use: `"same"` or `"valid"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Returns:
Extracted patches 3D (if not batched) or 4D (if batched)
Examples:
>>> image = np.random.random(
... (2, 20, 20, 3)
... ).astype("float32") # batch of 2 RGB images
>>> patches = keras.ops.image.extract_patches(image, (5, 5))
>>> patches.shape
(2, 4, 4, 75)
>>> image = np.random.random((20, 20, 3)).astype("float32") # 1 RGB image
>>> patches = keras.ops.image.extract_patches(image, (3, 3), (1, 1))
>>> patches.shape
(18, 18, 27)
"""
if any_symbolic_tensors((image,)):
return ExtractPatches(
size=size,
strides=strides,
dilation_rate=dilation_rate,
padding=padding,
data_format=data_format,
).symbolic_call(image)
return _extract_patches(
image, size, strides, dilation_rate, padding, data_format=data_format
)
def _extract_patches(
image,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format="channels_last",
):
if isinstance(size, int):
patch_h = patch_w = size
elif len(size) == 2:
patch_h, patch_w = size[0], size[1]
else:
raise TypeError(
"Invalid `size` argument. Expected an "
f"int or a tuple of length 2. Received: size={size}"
)
if data_format == "channels_last":
channels_in = image.shape[-1]
elif data_format == "channels_first":
channels_in = image.shape[-3]
if not strides:
strides = size
out_dim = patch_h * patch_w * channels_in
kernel = backend.numpy.eye(out_dim)
kernel = backend.numpy.reshape(
kernel, (patch_h, patch_w, channels_in, out_dim)
)
_unbatched = False
if len(image.shape) == 3:
_unbatched = True
image = backend.numpy.expand_dims(image, axis=0)
patches = backend.nn.conv(
inputs=image,
kernel=kernel,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
if _unbatched:
patches = backend.numpy.squeeze(patches, axis=0)
return patches
class MapCoordinates(Operation):
def __init__(self, order, fill_mode="constant", fill_value=0):
super().__init__()
self.order = order
self.fill_mode = fill_mode
self.fill_value = fill_value
def call(self, image, coordinates):
return backend.image.map_coordinates(
image,
coordinates,
order=self.order,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
def compute_output_spec(self, image, coordinates):
if coordinates.shape[0] != len(image.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`image`. "
f"Received image with shape: {image.shape} and coordinate "
f"leading dim of {coordinates.shape[0]}"
)
if len(coordinates.shape) < 2:
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {coordinates.shape}"
)
return KerasTensor(coordinates.shape[1:], dtype=image.dtype)
@keras_export("keras.ops.image.map_coordinates")
def map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0
):
"""Map the input array to new coordinates by interpolation..
Note that interpolation near boundaries differs from the scipy function,
because we fixed an outstanding bug
[scipy/issues/2640](https://github.com/scipy/scipy/issues/2640).
Args:
input: The input array.
coordinates: The coordinates at which input is evaluated.
order: The order of the spline interpolation. The order must be `0` or
`1`. `0` indicates the nearest neighbor and `1` indicates the linear
interpolation.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"mirror"` and `"reflect"`. Defaults to
`"constant"`.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"mirror"`: `(c d c b | a b c d | c b a b)`
The input is extended by mirroring about the edge.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
fill_value: Value used for points outside the boundaries of the input if
`fill_mode="constant"`. Defaults to `0`.
Returns:
Output image or batch of images.
"""
if any_symbolic_tensors((input, coordinates)):
return MapCoordinates(
order,
fill_mode,
fill_value,
).symbolic_call(input, coordinates)
return backend.image.map_coordinates(
input,
coordinates,
order,
fill_mode,
fill_value,
)
class PadImages(Operation):
def __init__(
self,
top_padding,
bottom_padding,
left_padding,
right_padding,
target_height,
target_width,
):
super().__init__()
self.top_padding = top_padding
self.bottom_padding = bottom_padding
self.left_padding = left_padding
self.right_padding = right_padding
self.target_height = target_height
self.target_width = target_width
def call(self, images):
return _pad_images(
images,
self.top_padding,
self.bottom_padding,
self.left_padding,
self.right_padding,
self.target_height,
self.target_width,
)
def compute_output_spec(self, images):
images_shape = ops.shape(images)
if self.target_height is None:
height_axis = 0 if len(images_shape) == 3 else 1
self.target_height = (
self.top_padding
+ images_shape[height_axis]
+ self.bottom_padding
)
if self.target_width is None:
width_axis = 0 if len(images_shape) == 3 else 2
self.target_width = (
self.left_padding
+ images_shape[width_axis]
+ self.right_padding
)
out_shape = (
images_shape[0],
self.target_height,
self.target_width,
images_shape[-1],
)
if len(images_shape) == 3:
out_shape = out_shape[1:]
return KerasTensor(
shape=out_shape,
dtype=images.dtype,
)
@keras_export("keras.ops.image.pad_images")
def pad_images(
images,
top_padding=None,
left_padding=None,
target_height=None,
target_width=None,
bottom_padding=None,
right_padding=None,
):
"""Pad `images` with zeros to the specified `height` and `width`.
Args:
images: 4D Tensor of shape `(batch, height, width, channels)` or 3D
Tensor of shape `(height, width, channels)`.
top_padding: Number of rows of zeros to add on top.
bottom_padding: Number of rows of zeros to add at the bottom.
left_padding: Number of columns of zeros to add on the left.
right_padding: Number of columns of zeros to add on the right.
target_height: Height of output images.
target_width: Width of output images.
Returns:
If `images` were 4D, a 4D float Tensor of shape
`(batch, target_height, target_width, channels)`
If `images` were 3D, a 3D float Tensor of shape
`(target_height, target_width, channels)`
Example:
>>> images = np.random.random((15, 25, 3))
>>> padded_images = keras.ops.image.pad_images(
... images, 2, 3, target_height=20, target_width=30
... )
>>> padded_images.shape
(20, 30, 3)
>>> batch_images = np.random.random((2, 15, 25, 3))
>>> padded_batch = keras.ops.image.pad_images(
... batch_images, 2, 3, target_height=20, target_width=30
... )
>>> padded_batch.shape
(2, 20, 30, 3)"""
if any_symbolic_tensors((images,)):
return PadImages(
top_padding,
bottom_padding,
left_padding,
right_padding,
target_height,
target_width,
).symbolic_call(images)
return _pad_images(
images,
top_padding,
bottom_padding,
left_padding,
right_padding,
target_height,
target_width,
)
def _pad_images(
images,
top_padding,
bottom_padding,
left_padding,
right_padding,
target_height,
target_width,
):
images = backend.convert_to_tensor(images)
is_batch = True
images_shape = ops.shape(images)
if len(images_shape) == 3:
is_batch = False
images = backend.numpy.expand_dims(images, 0)
elif len(images_shape) != 4:
raise ValueError(
f"Invalid shape for argument `images`: "
"it must have rank 3 or 4. "
f"Received: images.shape={images_shape}"
)
batch, height, width, depth = ops.shape(images)
if [top_padding, bottom_padding, target_height].count(None) != 1:
raise ValueError(
"Must specify exactly two of "
"top_padding, bottom_padding, target_height. "
f"Received: top_padding={top_padding}, "
f"bottom_padding={bottom_padding}, "
f"target_height={target_height}"
)
if [left_padding, right_padding, target_width].count(None) != 1:
raise ValueError(
"Must specify exactly two of "
"left_padding, right_padding, target_width. "
f"Received: left_padding={left_padding}, "
f"right_padding={right_padding}, "
f"target_width={target_width}"
)
if top_padding is None:
top_padding = target_height - bottom_padding - height
if bottom_padding is None:
bottom_padding = target_height - top_padding - height
if left_padding is None:
left_padding = target_width - right_padding - width
if right_padding is None:
right_padding = target_width - left_padding - width
if top_padding < 0:
raise ValueError(
"top_padding must be >= 0. " f"Received: top_padding={top_padding}"
)
if left_padding < 0:
raise ValueError(
"left_padding must be >= 0. "
f"Received: left_padding={left_padding}"
)
if right_padding < 0:
raise ValueError(
"right_padding must be >= 0. "
f"Received: right_padding={right_padding}"
)
if bottom_padding < 0:
raise ValueError(
"bottom_padding must be >= 0. "
f"Received: bottom_padding={bottom_padding}"
)
paddings = backend.numpy.reshape(
backend.numpy.stack(
[
0,
0,
top_padding,
bottom_padding,
left_padding,
right_padding,
0,
0,
]
),
[4, 2],
)
padded = backend.numpy.pad(images, paddings)
if target_height is None:
target_height = top_padding + height + bottom_padding
if target_width is None:
target_width = left_padding + width + right_padding
padded_shape = [batch, target_height, target_width, depth]
padded = backend.numpy.reshape(padded, padded_shape)
if not is_batch:
padded = backend.numpy.squeeze(padded, axis=[0])
return padded
class CropImages(Operation):
def __init__(
self,
top_cropping,
bottom_cropping,
left_cropping,
right_cropping,
target_height,
target_width,
):
super().__init__()
self.top_cropping = top_cropping
self.bottom_cropping = bottom_cropping
self.left_cropping = left_cropping
self.right_cropping = right_cropping
self.target_height = target_height
self.target_width = target_width
def call(self, images):
return _crop_images(
images,
self.top_cropping,
self.bottom_cropping,
self.left_cropping,
self.right_cropping,
self.target_height,
self.target_width,
)
def compute_output_spec(self, images):
images_shape = ops.shape(images)
out_shape = (
images_shape[0],
self.target_height,
self.target_width,
images_shape[-1],
)
if self.target_height is None:
height_axis = 0 if len(images_shape) == 3 else 1
self.target_height = (
self.top_cropping
- images_shape[height_axis]
- self.bottom_cropping
)
if self.target_width is None:
width_axis = 0 if len(images_shape) == 3 else 2
self.target_width = (
self.left_cropping
- images_shape[width_axis]
- self.right_cropping
)
out_shape = (
images_shape[0],
self.target_height,
self.target_width,
images_shape[-1],
)
if len(images_shape) == 3:
out_shape = out_shape[1:]
return KerasTensor(
shape=out_shape,
dtype=images.dtype,
)
@keras_export("keras.ops.image.crop_images")
def crop_images(
images,
top_cropping=None,
left_cropping=None,
target_height=None,
target_width=None,
bottom_cropping=None,
right_cropping=None,
):
"""Crop `images` to a specified `height` and `width`.
Args:
images: 4-D batch of images of shape `(batch, height, width, channels)`
or 3-D single image of shape `(height, width, channels)`.
top_cropping: Number of columns to crop from the top.
bottom_cropping: Number of columns to crop from the bottom.
left_cropping: Number of columns to crop from the left.
right_cropping: Number of columns to crop from the right.
target_height: Height of the output images.
target_width: Width of the output images.
Returns:
If `images` were 4D, a 4D float Tensor of shape
`(batch, target_height, target_width, channels)`
If `images` were 3D, a 3D float Tensor of shape
`(target_height, target_width, channels)`
Example:
>>> images = np.reshape(np.arange(1, 28, dtype="float32"), [3, 3, 3])
>>> images[:,:,0] # print the first channel of the images
array([[ 1., 4., 7.],
[10., 13., 16.],
[19., 22., 25.]], dtype=float32)
>>> cropped_images = keras.image.crop_images(images, 0, 0, 2, 2)
>>> cropped_images[:,:,0] # print the first channel of the cropped images
array([[ 1., 4.],
[10., 13.]], dtype=float32)"""
if any_symbolic_tensors((images,)):
return CropImages(
top_cropping,
bottom_cropping,
left_cropping,
right_cropping,
target_height,
target_width,
).symbolic_call(images)
return _crop_images(
images,
top_cropping,
bottom_cropping,
left_cropping,
right_cropping,
target_height,
target_width,
)
def _crop_images(
images,
top_cropping,
bottom_cropping,
left_cropping,
right_cropping,
target_height,
target_width,
):
images = backend.convert_to_tensor(images)
is_batch = True
images_shape = ops.shape(images)
if len(images_shape) == 3:
is_batch = False
images = backend.numpy.expand_dims(images, 0)
elif len(images_shape) != 4:
raise ValueError(
f"Invalid shape for argument `images`: "
"it must have rank 3 or 4. "
f"Received: images.shape={images_shape}"
)
batch, height, width, depth = ops.shape(images)
if [top_cropping, bottom_cropping, target_height].count(None) != 1:
raise ValueError(
"Must specify exactly two of "
"top_cropping, bottom_cropping, target_height. "
f"Received: top_cropping={top_cropping}, "
f"bottom_cropping={bottom_cropping}, "
f"target_height={target_height}"
)
if [left_cropping, right_cropping, target_width].count(None) != 1:
raise ValueError(
"Must specify exactly two of "
"left_cropping, right_cropping, target_width. "
f"Received: left_cropping={left_cropping}, "
f"right_cropping={right_cropping}, "
f"target_width={target_width}"
)
if top_cropping is None:
top_cropping = height - target_height - bottom_cropping
if target_height is None:
target_height = height - bottom_cropping - top_cropping
if left_cropping is None:
left_cropping = width - target_width - right_cropping
if target_width is None:
target_width = width - right_cropping - left_cropping
if top_cropping < 0:
raise ValueError(
"top_cropping must be >= 0. "
f"Received: top_cropping={top_cropping}"
)
if target_height < 0:
raise ValueError(
"target_height must be >= 0. "
f"Received: target_height={target_height}"
)
if left_cropping < 0:
raise ValueError(
"left_cropping must be >= 0. "
f"Received: left_cropping={left_cropping}"
)
if target_width < 0:
raise ValueError(
"target_width must be >= 0. "
f"Received: target_width={target_width}"
)
cropped = ops.slice(
images,
backend.numpy.stack([0, top_cropping, left_cropping, 0]),
backend.numpy.stack([batch, target_height, target_width, depth]),
)
cropped_shape = [batch, target_height, target_width, depth]
cropped = backend.numpy.reshape(cropped, cropped_shape)
if not is_batch:
cropped = backend.numpy.squeeze(cropped, axis=[0])
return cropped
| keras/keras/ops/image.py/0 | {
"file_path": "keras/keras/ops/image.py",
"repo_id": "keras",
"token_count": 14592
} | 185 |
import tree
from keras.backend import KerasTensor
class SymbolicArguments:
def __init__(self, *args, **kwargs):
self.args = tree.map_structure(lambda x: x, args)
self.kwargs = tree.map_structure(lambda x: x, kwargs)
self._flat_arguments = tree.flatten((self.args, self.kwargs))
# Used to avoid expensive `tree` operations in the most common case.
if (
not self.kwargs
and len(self.args) == 1
and isinstance(self.args[0], KerasTensor)
):
self._single_positional_tensor = self.args[0]
else:
self._single_positional_tensor = None
self.keras_tensors = []
for arg in self._flat_arguments:
if isinstance(arg, KerasTensor):
self.keras_tensors.append(arg)
def convert(self, conversion_fn):
args = tree.map_structure(conversion_fn, self.args)
kwargs = tree.map_structure(conversion_fn, self.kwargs)
return args, kwargs
def fill_in(self, tensor_dict):
"""Maps KerasTensors to computed values using `tensor_dict`.
`tensor_dict` maps `KerasTensor` instances to their current values.
"""
if self._single_positional_tensor is not None:
# Performance optimization for most common case.
# Approx. 70x faster.
return (tensor_dict[id(self._single_positional_tensor)],), {}
def switch_fn(x):
if isinstance(x, KerasTensor):
val = tensor_dict.get(id(x), None)
if val is not None:
return val
return x
return self.convert(switch_fn)
| keras/keras/ops/symbolic_arguments.py/0 | {
"file_path": "keras/keras/ops/symbolic_arguments.py",
"repo_id": "keras",
"token_count": 783
} | 186 |
from keras import initializers
from keras import ops
from keras.api_export import keras_export
from keras.optimizers import optimizer
@keras_export(["keras.optimizers.Ftrl"])
class Ftrl(optimizer.Optimizer):
r"""Optimizer that implements the FTRL algorithm.
"Follow The Regularized Leader" (FTRL) is an optimization algorithm
developed at Google for click-through rate prediction in the early 2010s. It
is most suitable for shallow models with large and sparse feature spaces.
The algorithm is described by
[McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf).
The Keras version has support for both online L2 regularization
(the L2 regularization described in the paper
above) and shrinkage-type L2 regularization
(which is the addition of an L2 penalty to the loss function).
Initialization:
```python
n = 0
sigma = 0
z = 0
```
Update rule for one variable `w`:
```python
prev_n = n
n = n + g ** 2
sigma = (n ** -lr_power - prev_n ** -lr_power) / lr
z = z + g - sigma * w
if abs(z) < lambda_1:
w = 0
else:
w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2)
```
Notation:
- `lr` is the learning rate
- `g` is the gradient for the variable
- `lambda_1` is the L1 regularization strength
- `lambda_2` is the L2 regularization strength
- `lr_power` is the power to scale n.
Check the documentation for the `l2_shrinkage_regularization_strength`
parameter for more details when shrinkage is enabled, in which case gradient
is replaced with a gradient with shrinkage.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero
for a fixed learning rate.
initial_accumulator_value: The starting value for accumulators. Only
zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or equal
to zero. Defaults to `0.0`.
l2_regularization_strength: A float value, must be greater than or equal
to zero. Defaults to `0.0`.
l2_shrinkage_regularization_strength: A float value, must be greater
than or equal to zero. This differs from L2 above in that the L2
above is a stabilization penalty, whereas this L2 shrinkage is a
magnitude penalty. When input is sparse shrinkage will only happen
on the active weights.
beta: A float value, representing the beta value from the paper.
Defaults to `0.0`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.001,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
l2_shrinkage_regularization_strength=0.0,
beta=0.0,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="ftrl",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
**kwargs,
)
if initial_accumulator_value < 0.0:
raise ValueError(
"`initial_accumulator_value` needs to be positive or zero. "
"Received: initial_accumulator_value="
f"{initial_accumulator_value}."
)
if learning_rate_power > 0.0:
raise ValueError(
"`learning_rate_power` needs to be negative or zero. Received: "
f"learning_rate_power={learning_rate_power}."
)
if l1_regularization_strength < 0.0:
raise ValueError(
"`l1_regularization_strength` needs to be positive or zero. "
"Received: l1_regularization_strength="
f"{l1_regularization_strength}."
)
if l2_regularization_strength < 0.0:
raise ValueError(
"`l2_regularization_strength` needs to be positive or zero. "
"Received: l2_regularization_strength="
f"{l2_regularization_strength}."
)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"`l2_shrinkage_regularization_strength` needs to be positive "
"or zero. Received: l2_shrinkage_regularization_strength"
f"={l2_shrinkage_regularization_strength}."
)
self.learning_rate_power = learning_rate_power
self.initial_accumulator_value = initial_accumulator_value
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
self.l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength
)
self.beta = beta
def build(self, var_list):
"""Initialize optimizer variables.
Args:
var_list: list of model variables to build Ftrl variables on.
"""
if self.built:
return
super().build(var_list)
self._accumulators = []
self._linears = []
for var in var_list:
self._accumulators.append(
self.add_variable(
shape=var.shape,
dtype=var.dtype,
name="accumulator",
initializer=initializers.Constant(
self.initial_accumulator_value,
),
)
)
self._linears.append(
self.add_variable_from_reference(
reference_variable=var, name="linear"
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
accum = self._accumulators[self._get_variable_index(variable)]
linear = self._linears[self._get_variable_index(variable)]
lr_power = self.learning_rate_power
l2_reg = self.l2_regularization_strength
l2_reg = l2_reg + self.beta / (2.0 * lr)
grad_to_use = ops.add(
gradient,
ops.multiply(
2 * self.l2_shrinkage_regularization_strength, variable
),
)
new_accum = ops.add(accum, ops.square(gradient))
self.assign_add(
linear,
ops.subtract(
grad_to_use,
ops.multiply(
ops.divide(
ops.subtract(
ops.power(new_accum, -lr_power),
ops.power(accum, -lr_power),
),
lr,
),
variable,
),
),
)
quadratic = ops.add(
ops.divide(ops.power(new_accum, (-lr_power)), lr), 2 * l2_reg
)
linear_clipped = ops.clip(
linear,
-self.l1_regularization_strength,
self.l1_regularization_strength,
)
self.assign(
variable,
ops.divide(ops.subtract(linear_clipped, linear), quadratic),
)
self.assign(accum, new_accum)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate_power": self.learning_rate_power,
"initial_accumulator_value": self.initial_accumulator_value,
"l1_regularization_strength": self.l1_regularization_strength,
"l2_regularization_strength": self.l2_regularization_strength,
"l2_shrinkage_regularization_strength": self.l2_shrinkage_regularization_strength, # noqa: E501
"beta": self.beta,
}
)
return config
Ftrl.__doc__ = Ftrl.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras/keras/optimizers/ftrl.py/0 | {
"file_path": "keras/keras/optimizers/ftrl.py",
"repo_id": "keras",
"token_count": 4232
} | 187 |
from keras import ops
from keras.api_export import keras_export
from keras.optimizers import optimizer
@keras_export("keras.optimizers.SGD")
class SGD(optimizer.Optimizer):
"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.01`.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations. 0 is vanilla
gradient descent. Defaults to `0.0`.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="SGD",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
**kwargs,
)
if not isinstance(momentum, float) or momentum < 0 or momentum > 1:
raise ValueError("`momentum` must be a float between [0, 1].")
self.momentum = momentum
self.nesterov = nesterov
def build(self, variables):
"""Initialize optimizer variables.
SGD optimizer has one variable `momentums`, only set if `self.momentum`
is not 0.
Args:
var_list: list of model variables to build SGD variables on.
"""
if self.built:
return
super().build(variables)
self.momentums = []
if self.momentum != 0:
for variable in variables:
self.momentums.append(
self.add_variable_from_reference(
reference_variable=variable, name="momentum"
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = None
if self.momentum != 0:
m = self.momentums[self._get_variable_index(variable)]
if m is not None:
momentum = ops.cast(self.momentum, variable.dtype)
self.assign(
m,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
if self.nesterov:
self.assign_add(
variable,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
else:
self.assign_add(variable, m)
else:
self.assign_sub(variable, ops.multiply(gradient, learning_rate))
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
SGD.__doc__ = SGD.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras/keras/optimizers/sgd.py/0 | {
"file_path": "keras/keras/optimizers/sgd.py",
"repo_id": "keras",
"token_count": 2131
} | 188 |
"""Python-based idempotent model-saving functionality."""
import datetime
import io
import json
import tempfile
import warnings
import zipfile
import ml_dtypes
import numpy as np
from keras import backend
from keras.backend.common import global_state
from keras.layers.layer import Layer
from keras.losses.loss import Loss
from keras.metrics.metric import Metric
from keras.optimizers.optimizer import Optimizer
from keras.saving.serialization_lib import ObjectSharingScope
from keras.saving.serialization_lib import deserialize_keras_object
from keras.saving.serialization_lib import serialize_keras_object
from keras.trainers.compile_utils import CompileMetrics
from keras.utils import file_utils
from keras.utils import naming
from keras.version import __version__ as keras_version
try:
import h5py
except ImportError:
h5py = None
_CONFIG_FILENAME = "config.json"
_METADATA_FILENAME = "metadata.json"
_VARS_FNAME = "model.weights" # Will become e.g. "model.weights.h5"
_ASSETS_DIRNAME = "assets"
def save_model(model, filepath, weights_format="h5"):
"""Save a zip-archive representing a Keras model to the given filepath.
The zip-based archive contains the following structure:
- JSON-based configuration file (config.json): Records of model, layer, and
other trackables' configuration.
- H5-based trackable state files, found in respective directories, such as
model/states.npz, model/dense_layer/states.npz, etc.
- Metadata file.
The states of Keras trackables (layers, optimizers, loss, and metrics) are
automatically saved as long as they can be discovered through the attributes
returned by `dir(Model)`. Typically, the state includes the variables
associated with the trackable, but some specially purposed layers may
contain more such as the vocabularies stored in the hashmaps. The trackables
define how their states are saved by exposing `save_state()` and
`load_state()` APIs.
For the case of layer states, the variables will be visited as long as
they are either 1) referenced via layer attributes, or 2) referenced via a
container (list, tuple, or dict), and the container is referenced via a
layer attribute.
"""
filepath = str(filepath)
if not filepath.endswith(".keras"):
raise ValueError(
"Invalid `filepath` argument: expected a `.keras` extension. "
f"Received: filepath={filepath}"
)
if weights_format == "h5" and h5py is None:
raise ImportError("h5py must be installed in order to save a model.")
if not model.built:
warnings.warn(
"You are saving a model that has not yet been built. "
"It might not contain any weights yet. "
"Consider building the model first by calling it "
"on some data.",
stacklevel=2,
)
with ObjectSharingScope():
serialized_model_dict = serialize_keras_object(model)
config_json = json.dumps(serialized_model_dict)
metadata_json = json.dumps(
{
"keras_version": keras_version,
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
)
if file_utils.is_remote_path(filepath):
# Remote path. Zip to local memory byte io and copy to remote
zip_filepath = io.BytesIO()
else:
zip_filepath = filepath
with zipfile.ZipFile(zip_filepath, "w") as zf:
with zf.open(_METADATA_FILENAME, "w") as f:
f.write(metadata_json.encode())
with zf.open(_CONFIG_FILENAME, "w") as f:
f.write(config_json.encode())
if weights_format == "h5":
weights_store = H5IOStore(_VARS_FNAME + ".h5", archive=zf, mode="w")
elif weights_format == "npz":
weights_store = NpzIOStore(
_VARS_FNAME + ".npz", archive=zf, mode="w"
)
else:
raise ValueError(
"Unknown `weights_format` argument. "
"Expected 'h5' or 'npz'. "
f"Received: weights_format={weights_format}"
)
asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="w")
_save_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_trackables=set(),
)
weights_store.close()
asset_store.close()
if file_utils.is_remote_path(filepath):
with file_utils.File(filepath, "wb") as f:
f.write(zip_filepath.getvalue())
def load_model(filepath, custom_objects=None, compile=True, safe_mode=True):
"""Load a zip archive representing a Keras model."""
filepath = str(filepath)
if not filepath.endswith(".keras"):
raise ValueError(
"Invalid filename: expected a `.keras` extension. "
f"Received: filepath={filepath}"
)
with file_utils.File(filepath, mode="r+b") as gfile_handle, zipfile.ZipFile(
gfile_handle, "r"
) as zf:
with zf.open(_CONFIG_FILENAME, "r") as f:
config_json = f.read()
# Note: we should NOT use a custom JSON decoder. Anything that
# needs custom decoding must be handled in deserialize_keras_object.
config_dict = json.loads(config_json)
if not compile:
# Disable compilation
config_dict["compile_config"] = None
# Construct the model from the configuration file in the archive.
with ObjectSharingScope():
model = deserialize_keras_object(
config_dict, custom_objects, safe_mode=safe_mode
)
all_filenames = zf.namelist()
if _VARS_FNAME + ".h5" in all_filenames:
weights_store = H5IOStore(_VARS_FNAME + ".h5", archive=zf, mode="r")
elif _VARS_FNAME + ".npz" in all_filenames:
weights_store = NpzIOStore(
_VARS_FNAME + ".npz", archive=zf, mode="r"
)
else:
raise ValueError(
f"Expected a {_VARS_FNAME}.h5 or {_VARS_FNAME}.npz file."
)
if len(all_filenames) > 3:
asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="r")
else:
asset_store = None
failed_trackables = set()
error_msgs = {}
_load_state(
model,
weights_store=weights_store,
assets_store=asset_store,
inner_path="",
visited_trackables=set(),
failed_trackables=failed_trackables,
error_msgs=error_msgs,
)
weights_store.close()
if asset_store:
asset_store.close()
if failed_trackables:
_raise_loading_failure(error_msgs)
return model
def save_weights_only(model, filepath):
"""Save only the weights of a model to a target filepath (.weights.h5).
Note: only supports h5 for now.
"""
# TODO: if h5 filepath is remote, create the file in a temporary directory
# then upload it
filepath = str(filepath)
if not filepath.endswith(".weights.h5"):
raise ValueError(
"Invalid `filepath` argument: expected a `.weights.h5` extension. "
f"Received: filepath={filepath}"
)
weights_store = H5IOStore(filepath, mode="w")
_save_state(
model,
weights_store=weights_store,
assets_store=None,
inner_path="",
visited_trackables=set(),
)
weights_store.close()
def load_weights_only(model, filepath, skip_mismatch=False):
"""Load the weights of a model from a filepath (.keras or .weights.h5).
Note: only supports h5 for now.
"""
temp_dir = None
archive = None
filepath = str(filepath)
if filepath.endswith(".weights.h5"):
# TODO: download file if h5 filepath is remote
weights_store = H5IOStore(filepath, mode="r")
elif filepath.endswith(".keras"):
archive = zipfile.ZipFile(filepath, "r")
weights_store = H5IOStore(
_VARS_FNAME + ".h5", archive=archive, mode="r"
)
failed_trackables = set()
error_msgs = {}
_load_state(
model,
weights_store=weights_store,
assets_store=None,
inner_path="",
skip_mismatch=skip_mismatch,
visited_trackables=set(),
failed_trackables=failed_trackables,
error_msgs=error_msgs,
)
weights_store.close()
if temp_dir and file_utils.exists(temp_dir):
file_utils.rmtree(temp_dir)
if archive:
archive.close()
if failed_trackables:
_raise_loading_failure(error_msgs, warn_only=skip_mismatch)
def _raise_loading_failure(error_msgs, warn_only=False):
first_key = list(error_msgs.keys())[0]
ex_trackable, ex_error = error_msgs[first_key]
msg = (
f"A total of {len(error_msgs)} objects could not "
"be loaded. Example error message for "
f"object {ex_trackable}:\n\n"
f"{ex_error}\n\n"
"List of objects that could not be loaded:\n"
f"{[x[0] for x in error_msgs.values()]}"
)
if warn_only:
warnings.warn(msg)
else:
raise ValueError(msg)
def _write_to_zip_recursively(zipfile_to_save, system_path, zip_path):
if not file_utils.isdir(system_path):
zipfile_to_save.write(system_path, zip_path)
else:
for file_name in file_utils.listdir(system_path):
system_file_path = file_utils.join(system_path, file_name).replace(
"\\", "/"
)
zip_file_path = file_utils.join(zip_path, file_name).replace(
"\\", "/"
)
_write_to_zip_recursively(
zipfile_to_save, system_file_path, zip_file_path
)
def _name_key(name):
"""Make sure that private attributes are visited last."""
if name.startswith("_"):
return "~" + name
return name
def _walk_trackable(trackable):
from keras.models import Functional
from keras.models import Sequential
if isinstance(trackable, Sequential):
obj_type = "Sequential"
elif isinstance(trackable, Functional):
obj_type = "Functional"
elif isinstance(trackable, Layer):
obj_type = "Layer"
elif isinstance(trackable, Optimizer):
obj_type = "Optimizer"
elif isinstance(trackable, Metric):
obj_type = "Metric"
elif isinstance(trackable, Loss):
obj_type = "Loss"
else:
raise ValueError(f"Invalid obj_type: {obj_type}")
attr_skiplist = get_attr_skiplist(obj_type)
# Save all layers directly tracked by Sequential and Functional first.
# This helps avoid ordering concerns for subclassed Sequential or Functional
# models with extra attributes--the internal Keras state take precedence.
if obj_type in ("Sequential", "Functional"):
yield "layers", trackable.layers
for child_attr in sorted(dir(trackable), key=lambda x: _name_key(x)):
if child_attr.startswith("__") or child_attr in attr_skiplist:
continue
try:
child_obj = getattr(trackable, child_attr)
except Exception:
# Avoid raising the exception when visiting the attributes.
continue
yield child_attr, child_obj
def _save_state(
trackable,
weights_store,
assets_store,
inner_path,
visited_trackables,
):
# If the trackable has already been saved, skip it.
if id(trackable) in visited_trackables:
return
if hasattr(trackable, "save_own_variables") and weights_store:
trackable.save_own_variables(weights_store.make(inner_path))
if hasattr(trackable, "save_assets") and assets_store:
trackable.save_assets(assets_store.make(inner_path))
visited_trackables.add(id(trackable))
# Recursively save state of children trackables (layers, optimizers, etc.)
for child_attr, child_obj in _walk_trackable(trackable):
if _is_keras_trackable(child_obj):
_save_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
visited_trackables=visited_trackables,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
_save_container_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
visited_trackables=visited_trackables,
)
def _load_state(
trackable,
weights_store,
assets_store,
inner_path,
skip_mismatch=False,
visited_trackables=None,
failed_trackables=None,
error_msgs=None,
):
if visited_trackables and id(trackable) in visited_trackables:
return
failure = False
if hasattr(trackable, "load_own_variables") and weights_store:
if skip_mismatch or failed_trackables is not None:
try:
trackable.load_own_variables(weights_store.get(inner_path))
except Exception as e:
failed_trackables.add(id(trackable))
error_msgs[id(trackable)] = trackable, e
failure = True
else:
trackable.load_own_variables(weights_store.get(inner_path))
if hasattr(trackable, "load_assets") and assets_store:
if skip_mismatch or failed_trackables is not None:
try:
trackable.load_assets(assets_store.get(inner_path))
except Exception as e:
failed_trackables.add(id(trackable))
error_msgs[id(trackable)] = trackable, e
failure = True
else:
trackable.load_assets(assets_store.get(inner_path))
if failed_trackables is not None:
currently_failed = len(failed_trackables)
else:
currently_failed = 0
# Recursively load states for Keras trackables such as layers/optimizers.
for child_attr, child_obj in _walk_trackable(trackable):
if _is_keras_trackable(child_obj):
_load_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
skip_mismatch=skip_mismatch,
visited_trackables=visited_trackables,
failed_trackables=failed_trackables,
error_msgs=error_msgs,
)
elif isinstance(child_obj, (list, dict, tuple, set)):
_load_container_state(
child_obj,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, child_attr).replace(
"\\", "/"
),
skip_mismatch=skip_mismatch,
visited_trackables=visited_trackables,
failed_trackables=failed_trackables,
error_msgs=error_msgs,
)
if failed_trackables is not None:
newly_failed = len(failed_trackables) - currently_failed
else:
newly_failed = 0
if not failure:
if visited_trackables is not None and newly_failed <= 0:
visited_trackables.add(id(trackable))
if id(trackable) in failed_trackables:
failed_trackables.remove(id(trackable))
error_msgs.pop(id(trackable))
def _save_container_state(
container, weights_store, assets_store, inner_path, visited_trackables
):
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for trackable in container:
if _is_keras_trackable(trackable):
# Do NOT address the trackable via `trackable.name`, since
# names are usually autogenerated and thus not reproducible
# (i.e. they may vary across two instances of the same model).
name = naming.to_snake_case(trackable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
_save_state(
trackable,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, name).replace("\\", "/"),
visited_trackables=visited_trackables,
)
def _load_container_state(
container,
weights_store,
assets_store,
inner_path,
skip_mismatch,
visited_trackables,
failed_trackables,
error_msgs,
):
used_names = {}
if isinstance(container, dict):
container = list(container.values())
for trackable in container:
if _is_keras_trackable(trackable):
name = naming.to_snake_case(trackable.__class__.__name__)
if name in used_names:
used_names[name] += 1
name = f"{name}_{used_names[name]}"
else:
used_names[name] = 0
_load_state(
trackable,
weights_store,
assets_store,
inner_path=file_utils.join(inner_path, name).replace("\\", "/"),
skip_mismatch=skip_mismatch,
visited_trackables=visited_trackables,
failed_trackables=failed_trackables,
error_msgs=error_msgs,
)
class DiskIOStore:
"""Asset store backed by disk storage.
If `archive` is specified, then `root_path` refers to the filename
inside the archive.
If `archive` is not specified, then `root_path` refers to the full path of
the target directory.
"""
def __init__(self, root_path, archive=None, mode=None):
self.mode = mode
self.root_path = root_path
self.archive = archive
self.tmp_dir = None
if self.archive:
self.tmp_dir = get_temp_dir()
if self.mode == "r":
self.archive.extractall(path=self.tmp_dir)
self.working_dir = file_utils.join(
self.tmp_dir, self.root_path
).replace("\\", "/")
if self.mode == "w":
file_utils.makedirs(self.working_dir)
else:
if mode == "r":
self.working_dir = root_path
else:
self.tmp_dir = get_temp_dir()
self.working_dir = file_utils.join(
self.tmp_dir, self.root_path
).replace("\\", "/")
file_utils.makedirs(self.working_dir)
def make(self, path):
if not path:
return self.working_dir
path = file_utils.join(self.working_dir, path).replace("\\", "/")
if not file_utils.exists(path):
file_utils.makedirs(path)
return path
def get(self, path):
if not path:
return self.working_dir
path = file_utils.join(self.working_dir, path).replace("\\", "/")
if file_utils.exists(path):
return path
return None
def close(self):
if self.mode == "w" and self.archive:
_write_to_zip_recursively(
self.archive, self.working_dir, self.root_path
)
if self.tmp_dir and file_utils.exists(self.tmp_dir):
file_utils.rmtree(self.tmp_dir)
class H5IOStore:
def __init__(self, root_path, archive=None, mode="r"):
"""Numerical variable store backed by HDF5.
If `archive` is specified, then `root_path` refers to the filename
inside the archive.
If `archive` is not specified, then `root_path` refers to the path of
the h5 file on disk.
"""
self.root_path = root_path
self.mode = mode
self.archive = archive
self.io_file = None
if self.archive:
if self.mode == "w":
self.io_file = io.BytesIO()
else:
self.io_file = self.archive.open(self.root_path, "r")
self.h5_file = h5py.File(self.io_file, mode=self.mode)
else:
self.h5_file = h5py.File(root_path, mode=self.mode)
def make(self, path):
return H5Entry(self.h5_file, path, mode="w")
def get(self, path):
return H5Entry(self.h5_file, path, mode="r")
def close(self):
self.h5_file.close()
if self.mode == "w" and self.archive:
self.archive.writestr(self.root_path, self.io_file.getvalue())
if self.io_file:
self.io_file.close()
class H5Entry:
"""Leaf entry in a H5IOStore."""
def __init__(self, h5_file, path, mode):
self.h5_file = h5_file
self.path = path
self.mode = mode
if mode == "w":
if not path:
self.group = self.h5_file.create_group("vars")
else:
self.group = self.h5_file.create_group(self.path).create_group(
"vars"
)
else:
found = False
if not path:
self.group = self.h5_file["vars"]
found = True
elif path in self.h5_file and "vars" in self.h5_file[path]:
self.group = self.h5_file[path]["vars"]
found = True
else:
# No hit.
# Fix for 2.13 compatibility
if "_layer_checkpoint_dependencies" in self.h5_file:
path = path.replace(
"layers", "_layer_checkpoint_dependencies"
)
self.path = path
if path in self.h5_file and "vars" in self.h5_file[path]:
self.group = self.h5_file[path]["vars"]
found = True
if not found:
self.group = {}
def __len__(self):
return self.group.__len__()
def keys(self):
return self.group.keys()
def items(self):
return self.group.items()
def values(self):
return self.group.values()
def __setitem__(self, key, value):
if self.mode != "w":
raise ValueError("Setting a value is only allowed in write mode.")
value = backend.convert_to_numpy(value)
if backend.standardize_dtype(value.dtype) == "bfloat16":
ds = self.group.create_dataset(key, data=value)
ds.attrs["dtype"] = "bfloat16"
else:
self.group[key] = value
def __getitem__(self, name):
value = self.group[name]
if "dtype" in value.attrs and value.attrs["dtype"] == "bfloat16":
value = np.array(value, dtype=ml_dtypes.bfloat16)
return value
class NpzIOStore:
def __init__(self, root_path, archive=None, mode="r"):
"""Numerical variable store backed by NumPy.savez/load.
If `archive` is specified, then `root_path` refers to the filename
inside the archive.
If `archive` is not specified, then `root_path` refers to the path of
the npz file on disk.
"""
self.root_path = root_path
self.mode = mode
self.archive = archive
if mode == "w":
self.contents = {}
else:
if self.archive:
self.f = archive.open(root_path, mode="r")
else:
self.f = open(root_path, mode="rb")
self.contents = np.load(self.f, allow_pickle=True)
def make(self, path):
if not path:
self.contents["__root__"] = {}
return self.contents["__root__"]
self.contents[path] = {}
return self.contents[path]
def get(self, path):
if not path:
if "__root__" in self.contents:
return dict(self.contents["__root__"])
return {}
if path in self.contents:
return self.contents[path].tolist()
return {}
def close(self):
if self.mode == "w":
if self.archive:
self.f = self.archive.open(
self.root_path, mode="w", force_zip64=True
)
else:
self.f = open(self.root_path, mode="wb")
np.savez(self.f, **self.contents)
self.f.close()
def get_temp_dir():
temp_dir = tempfile.mkdtemp()
testfile = tempfile.TemporaryFile(dir=temp_dir)
testfile.close()
return temp_dir
def get_attr_skiplist(obj_type):
skiplist = global_state.get_global_attribute(
f"saving_attr_skiplist_{obj_type}", None
)
if skiplist is not None:
return skiplist
skiplist = [
"_self_unconditional_dependency_names",
]
if obj_type == "Layer":
ref_obj = Layer()
skiplist += dir(ref_obj)
elif obj_type == "Functional":
ref_obj = Layer()
skiplist += dir(ref_obj) + ["operations", "_operations"]
elif obj_type == "Sequential":
ref_obj = Layer()
skiplist += dir(ref_obj) + ["_functional"]
elif obj_type == "Metric":
ref_obj_a = Metric()
ref_obj_b = CompileMetrics([], [])
skiplist += dir(ref_obj_a) + dir(ref_obj_b)
elif obj_type == "Optimizer":
ref_obj = Optimizer(1.0)
skiplist += dir(ref_obj)
skiplist.remove("variables")
elif obj_type == "Loss":
ref_obj = Loss()
skiplist += dir(ref_obj)
else:
raise ValueError(f"Invalid obj_type: {obj_type}")
global_state.set_global_attribute(
f"saving_attr_skiplist_{obj_type}", skiplist
)
return skiplist
def _is_keras_trackable(obj):
return isinstance(
obj,
(
Layer,
Optimizer,
Metric,
Loss,
),
)
| keras/keras/saving/saving_lib.py/0 | {
"file_path": "keras/keras/saving/saving_lib.py",
"repo_id": "keras",
"token_count": 12304
} | 189 |
import itertools
import numpy as np
import tree
from keras import backend
from keras.trainers.data_adapters import data_adapter_utils
from keras.trainers.data_adapters.data_adapter import DataAdapter
class GeneratorDataAdapter(DataAdapter):
"""Adapter for Python generators."""
def __init__(self, generator):
first_batch, generator = peek_and_restore(generator)
self.generator = generator
self._first_batch = first_batch
self._output_signature = None
if not isinstance(first_batch, tuple):
raise ValueError(
"When passing a Python generator to a Keras model, "
"the generator must return a tuple, either "
"(input,) or (inputs, targets) or "
"(inputs, targets, sample_weights). "
f"Received: {first_batch}"
)
def _set_tf_output_signature(self):
from keras.utils.module_utils import tensorflow as tf
def get_tensor_spec(x):
shape = x.shape
if len(shape) < 1:
raise ValueError(
"When passing a Python generator to a Keras model, "
"the arrays returned by the generator "
"must be at least rank 1. Received: "
f"{x} of rank {len(x.shape)}"
)
shape = list(shape)
shape[0] = None # The batch size is not guaranteed to be static.
dtype = backend.standardize_dtype(x.dtype)
if isinstance(x, tf.RaggedTensor):
return tf.RaggedTensorSpec(shape=shape, dtype=dtype)
if (
isinstance(x, tf.SparseTensor)
or is_scipy_sparse(x)
or is_jax_sparse(x)
):
return tf.SparseTensorSpec(shape=shape, dtype=dtype)
else:
return tf.TensorSpec(shape=shape, dtype=dtype)
self._output_signature = tree.map_structure(
get_tensor_spec, self._first_batch
)
def get_numpy_iterator(self):
return data_adapter_utils.get_numpy_iterator(self.generator)
def get_jax_iterator(self):
from keras.backend.jax.core import convert_to_tensor
def convert_to_jax(x):
if is_scipy_sparse(x):
return scipy_sparse_to_jax_sparse(x)
elif is_tf_sparse(x):
return tf_sparse_to_jax_sparse(x)
return convert_to_tensor(x)
for batch in self.generator:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
from keras.utils.module_utils import tensorflow as tf
def convert_to_tf(x):
if is_scipy_sparse(x):
x = scipy_sparse_to_tf_sparse(x)
elif is_jax_sparse(x):
x = jax_sparse_to_tf_sparse(x)
return x
def get_tf_iterator():
for batch in self.generator:
batch = tree.map_structure(convert_to_tf, batch)
yield batch
if self._output_signature is None:
self._set_tf_output_signature()
ds = tf.data.Dataset.from_generator(
get_tf_iterator,
output_signature=self._output_signature,
)
ds = ds.prefetch(tf.data.AUTOTUNE)
return ds
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self.generator)
@property
def num_batches(self):
return None
@property
def batch_size(self):
return None
def peek_and_restore(generator):
element = next(generator)
return element, itertools.chain([element], generator)
def is_scipy_sparse(x):
return x.__class__.__module__.startswith("scipy.sparse") and hasattr(
x, "tocoo"
)
def is_tf_sparse(x):
return (
x.__class__.__name__ == "SparseTensor"
and x.__class__.__module__.startswith("tensorflow")
)
def is_jax_sparse(x):
return x.__class__.__module__.startswith("jax.experimental.sparse")
def scipy_sparse_to_tf_sparse(x):
from keras.utils.module_utils import tensorflow as tf
coo = x.tocoo()
indices = np.concatenate(
(np.expand_dims(coo.row, 1), np.expand_dims(coo.col, 1)),
axis=1,
)
return tf.SparseTensor(indices, coo.data, coo.shape)
def scipy_sparse_to_jax_sparse(x):
import jax.experimental.sparse as jax_sparse
coo = x.tocoo()
indices = np.concatenate(
(np.expand_dims(coo.row, 1), np.expand_dims(coo.col, 1)),
axis=1,
)
return jax_sparse.BCOO((coo.data, indices), shape=coo.shape)
def tf_sparse_to_jax_sparse(x):
import jax.experimental.sparse as jax_sparse
from keras.backend.tensorflow.core import convert_to_numpy
values = convert_to_numpy(x.values)
indices = convert_to_numpy(x.indices)
return jax_sparse.BCOO((values, indices), shape=x.shape)
def jax_sparse_to_tf_sparse(x):
from keras.utils.module_utils import tensorflow as tf
return tf.SparseTensor(x.indices, x.data, x.shape)
| keras/keras/trainers/data_adapters/generator_data_adapter.py/0 | {
"file_path": "keras/keras/trainers/data_adapters/generator_data_adapter.py",
"repo_id": "keras",
"token_count": 2492
} | 190 |
import sys
from keras import backend as backend_module
from keras.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Usage:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
def __getattr__(self, name):
if self._backend == "tensorflow":
from keras.backend import tensorflow as tf_backend
return getattr(tf_backend, name)
if self._backend == "jax":
from keras.backend import jax as jax_backend
return getattr(jax_backend, name)
if self._backend == "torch":
from keras.backend import torch as torch_backend
return getattr(torch_backend, name)
if self._backend == "numpy":
# TODO (ariG23498):
# The import `from keras.backend import numpy as numpy_backend`
# is not working. This is a temporary fix.
# The import is redirected to `keras.backend.numpy.numpy.py`
from keras import backend as numpy_backend
return getattr(numpy_backend, name)
| keras/keras/utils/backend_utils.py/0 | {
"file_path": "keras/keras/utils/backend_utils.py",
"repo_id": "keras",
"token_count": 1024
} | 191 |
import importlib
class LazyModule:
def __init__(self, name, pip_name=None):
self.name = name
pip_name = pip_name or name
self.pip_name = pip_name
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
| keras/keras/utils/module_utils.py/0 | {
"file_path": "keras/keras/utils/module_utils.py",
"repo_id": "keras",
"token_count": 559
} | 192 |
import numpy as np
from keras.api_export import keras_export
from keras.utils import dataset_utils
from keras.utils.module_utils import tensorflow as tf
@keras_export(
[
"keras.utils.text_dataset_from_directory",
"keras.preprocessing.text_dataset_from_directory",
]
)
def text_dataset_from_directory(
directory,
labels="inferred",
label_mode="int",
class_names=None,
batch_size=32,
max_length=None,
shuffle=True,
seed=None,
validation_split=None,
subset=None,
follow_links=False,
):
"""Generates a `tf.data.Dataset` from text files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
```
Then calling `text_dataset_from_directory(main_directory,
labels='inferred')` will return a `tf.data.Dataset` that yields batches of
texts from the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Only `.txt` files are supported at this time.
Args:
directory: Directory where the data is located.
If `labels` is `"inferred"`, it should contain
subdirectories, each containing text files for a class.
Otherwise, the directory structure is ignored.
labels: Either `"inferred"`
(labels are generated from the directory structure),
`None` (no labels),
or a list/tuple of integer labels of the same size as the number of
text files found in the directory. Labels should be sorted according
to the alphanumeric order of the text file paths
(obtained via `os.walk(directory)` in Python).
label_mode: String describing the encoding of `labels`. Options are:
- `"int"`: means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- `"categorical"` means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- `"binary"` means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- `None` (no labels).
class_names: Only valid if `"labels"` is `"inferred"`.
This is the explicit list of class names
(must match names of subdirectories). Used to control the order
of the classes (otherwise alphanumerical order is used).
batch_size: Size of the batches of data. Defaults to 32.
If `None`, the data will not be batched
(the dataset will yield individual samples).
max_length: Maximum size of a text string. Texts longer than this will
be truncated to `max_length`.
shuffle: Whether to shuffle the data. Defaults to `True`.
If set to `False`, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: Subset of the data to return.
One of `"training"`, `"validation"` or `"both"`.
Only used if `validation_split` is set.
When `subset="both"`, the utility returns a tuple of two datasets
(the training and validation datasets respectively).
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to `False`.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is `None`, it yields `string` tensors of shape
`(batch_size,)`, containing the contents of a batch of text files.
- Otherwise, it yields a tuple `(texts, labels)`, where `texts`
has shape `(batch_size,)` and `labels` follows the format described
below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorical`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
"""
if labels not in ("inferred", None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
"`labels` argument should be a list/tuple of integer labels, "
"of the same size as the number of text files in the target "
"directory. If you wish to infer the labels from the "
"subdirectory names in the target directory, "
'pass `labels="inferred"`. '
"If you wish to get a dataset that only contains text samples "
f"(no labels), pass `labels=None`. Received: labels={labels}"
)
if class_names:
raise ValueError(
"You can only pass `class_names` if "
f'`labels="inferred"`. Received: labels={labels}, and '
f"class_names={class_names}"
)
if label_mode not in {"int", "categorical", "binary", None}:
raise ValueError(
'`label_mode` argument must be one of "int", '
'"categorical", "binary", '
f"or None. Received: label_mode={label_mode}"
)
if labels is None or label_mode is None:
labels = None
label_mode = None
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed
)
if seed is None:
seed = np.random.randint(1e6)
file_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=(".txt",),
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links,
)
if label_mode == "binary" and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary"`, there must be exactly 2 '
f"class_names. Received: class_names={class_names}"
)
if subset == "both":
(
file_paths_train,
labels_train,
) = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "training"
)
(
file_paths_val,
labels_val,
) = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, "validation"
)
if not file_paths_train:
raise ValueError(
f"No training text files found in directory {directory}. "
"Allowed format: .txt"
)
if not file_paths_val:
raise ValueError(
f"No validation text files found in directory {directory}. "
"Allowed format: .txt"
)
train_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_train,
labels=labels_train,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
max_length=max_length,
)
val_dataset = paths_and_labels_to_dataset(
file_paths=file_paths_val,
labels=labels_val,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
max_length=max_length,
)
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
if batch_size is not None:
if shuffle:
# Shuffle locally at each iteration
train_dataset = train_dataset.shuffle(
buffer_size=batch_size * 8, seed=seed
)
train_dataset = train_dataset.batch(batch_size)
val_dataset = val_dataset.batch(batch_size)
else:
if shuffle:
train_dataset = train_dataset.shuffle(
buffer_size=1024, seed=seed
)
# Users may need to reference `class_names`.
train_dataset.class_names = class_names
val_dataset.class_names = class_names
dataset = [train_dataset, val_dataset]
else:
file_paths, labels = dataset_utils.get_training_or_validation_split(
file_paths, labels, validation_split, subset
)
if not file_paths:
raise ValueError(
f"No text files found in directory {directory}. "
"Allowed format: .txt"
)
dataset = paths_and_labels_to_dataset(
file_paths=file_paths,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names) if class_names else 0,
max_length=max_length,
)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
if batch_size is not None:
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
else:
if shuffle:
dataset = dataset.shuffle(buffer_size=1024, seed=seed)
# Users may need to reference `class_names`.
dataset.class_names = class_names
return dataset
def paths_and_labels_to_dataset(
file_paths, labels, label_mode, num_classes, max_length
):
"""Constructs a dataset of text strings and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(file_paths)
string_ds = path_ds.map(
lambda x: path_to_string_content(x, max_length),
num_parallel_calls=tf.data.AUTOTUNE,
)
if label_mode:
label_ds = dataset_utils.labels_to_dataset(
labels, label_mode, num_classes
)
string_ds = tf.data.Dataset.zip((string_ds, label_ds))
return string_ds
def path_to_string_content(path, max_length):
txt = tf.io.read_file(path)
if max_length is not None:
txt = tf.strings.substr(txt, 0, max_length)
return txt
| keras/keras/utils/text_dataset_utils.py/0 | {
"file_path": "keras/keras/utils/text_dataset_utils.py",
"repo_id": "keras",
"token_count": 4671
} | 193 |
# Description:
# Contains the TF-Keras API (internal TensorFlow version).
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
# copybara:uncomment_begin(google-only)
# load("//tools/build_defs/license:license.bzl", "license")
# copybara:uncomment_end
package(
# copybara:uncomment default_applicable_licenses = [":license"],
default_visibility = [":friends"],
licenses = ["notice"],
)
# TF-Keras code that doesn't live in core TF-Keras directory, but still
# need to directly access the keras code.
# We shouldn't add any client side package to this list.
package_group(
name = "friends",
packages = ["//tf_keras/..."],
)
exports_files(["LICENSE"])
config_setting(
name = "no_keras_py_deps",
define_values = {"no_keras_py_deps": "true"},
visibility = ["//visibility:public"],
)
py_library(
name = "tf_keras",
srcs = [
"__init__.py",
],
srcs_version = "PY3",
deps = [
":backend",
":engine",
"//:expect_h5py_installed",
"//:expect_numpy_installed",
"//:expect_pydot_installed",
"//:expect_scipy_installed",
"//:expect_tensorflow_installed",
"//:expect_yaml_installed",
"//tf_keras/applications",
"//tf_keras/datasets",
"//tf_keras/distribute",
"//tf_keras/estimator",
"//tf_keras/feature_column",
"//tf_keras/layers",
"//tf_keras/layers/rnn:legacy_cell_wrappers",
"//tf_keras/layers/rnn:legacy_cells",
"//tf_keras/legacy_tf_layers:layers",
"//tf_keras/mixed_precision:mixed_precision_experimental",
"//tf_keras/models",
"//tf_keras/optimizers",
"//tf_keras/premade_models",
"//tf_keras/preprocessing",
"//tf_keras/saving",
"//tf_keras/testing_infra:keras_doctest_lib",
"//tf_keras/testing_infra:test_utils", # For keras.__internal__ API
"//tf_keras/utils",
],
)
py_library(
name = "backend",
srcs = ["backend.py"],
srcs_version = "PY3",
deps = [
":backend_config",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/distribute:distribute_coordinator_utils",
"//tf_keras/engine:keras_tensor",
"//tf_keras/utils:control_flow_util",
"//tf_keras/utils:object_identity",
"//tf_keras/utils:tf_contextlib",
"//tf_keras/utils:tf_inspect",
],
)
py_library(
name = "backend_config",
srcs = ["backend_config.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
],
)
# TODO(scottzhu): Cleanup this target and point all the user to keras/engine.
py_library(
name = "engine",
srcs = [
"//tf_keras/metrics",
"//tf_keras/models",
],
srcs_version = "PY3",
deps = [
"//tf_keras/engine",
],
)
py_library(
name = "activations",
srcs = [
"activations.py",
],
srcs_version = "PY3",
deps = [
":backend",
"//tf_keras/layers/activation",
"//tf_keras/utils:engine_utils",
],
)
# TODO(scottzhu): Cleanup this target and point all the user to keras/engine.
py_library(
name = "base_layer",
srcs = [],
srcs_version = "PY3",
deps = [
"//tf_keras/engine:base_layer",
],
)
py_library(
name = "callbacks",
srcs = [
"callbacks.py",
],
srcs_version = "PY3",
deps = [
":backend",
"//:expect_tensorboard_installed",
"//:expect_tensorflow_installed",
"//tf_keras/distribute:distributed_file_utils",
"//tf_keras/distribute:worker_training_state",
"//tf_keras/protobuf:projector_config_proto_py_pb2",
"//tf_keras/utils:engine_utils",
"//tf_keras/utils:mode_keys",
"//tf_keras/utils:timed_threads",
],
)
py_library(
name = "callbacks_v1",
srcs = [
"callbacks_v1.py",
],
srcs_version = "PY3",
deps = [
":backend",
"//:expect_tensorboard_installed",
"//:expect_tensorflow_installed",
"//tf_keras/utils:engine_utils",
],
)
py_library(
name = "constraints",
srcs = [
"constraints.py",
],
srcs_version = "PY3",
deps = [
":backend",
"//tf_keras/utils:engine_utils",
],
)
py_library(
name = "losses",
srcs = [
"losses.py",
],
srcs_version = "PY3",
deps = [
":backend",
"//:expect_tensorflow_installed",
"//tf_keras/saving:saving_lib",
"//tf_keras/utils:engine_utils",
"//tf_keras/utils:generic_utils",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "regularizers",
srcs = [
"regularizers.py",
],
srcs_version = "PY3",
deps = [
":backend",
"//tf_keras/utils:engine_utils",
],
)
# Internally urllib.request.urlretrieve library requires Google
# SSL context to be provided to work in python 3. This isn't needed in OSS.
# copybara:uncomment_begin(google-only)
# py_library(
# name = "url_utils",
# srcs = ["google/url_utils.py"],
# srcs_version = "PY3",
# deps = ["//pyglib/contrib/google_ssl"],
# )
# copybara:uncomment_end
# Some tf.distribute related feature requires detecting platform.
# Internally we'd like to recognize Borg, which is not needed in OSS.
# copybara:uncomment_begin(google-only)
# py_library(
# name = "distribute_utils",
# srcs = ["google/distribute_utils.py"],
# deps = [
# "//:expect_six_installed",
# "//:expect_tensorflow_installed",
# "//third_party/py/requests",
# ],
# )
# copybara:uncomment_end
tf_py_test(
name = "activations_test",
size = "small",
srcs = ["activations_test.py"],
python_version = "PY3",
deps = [
":activations",
":backend",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_scipy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/layers",
"//tf_keras/layers/activation",
"//tf_keras/layers/core",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "constraints_test",
size = "small",
srcs = ["constraints_test.py"],
python_version = "PY3",
deps = [
":backend",
":constraints",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "regularizers_test",
size = "medium",
srcs = ["regularizers_test.py"],
python_version = "PY3",
deps = [
":tf_keras",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "losses_test",
size = "small",
srcs = ["losses_test.py"],
python_version = "PY3",
shard_count = 4,
tags = [
"noasan", # b/186128525
],
deps = [
":backend",
":losses",
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/utils:engine_utils",
],
)
tf_py_test(
name = "callbacks_test",
size = "medium",
srcs = ["callbacks_test.py"],
python_version = "PY3",
shard_count = 6,
tags = [
"no_pip", # TODO(b/276923757)
"no_tfrt", # TODO(b/179690526)
"notsan",
],
deps = [
":tf_keras",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "callbacks_v1_test",
size = "medium",
srcs = ["callbacks_v1_test.py"],
python_version = "PY3",
tags = [
"nomac", # Using profiler causes segfault in MacOS runs.
"notsan",
],
deps = [
":callbacks",
":callbacks_v1",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/engine",
"//tf_keras/layers",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
"//tf_keras/utils:np_utils",
],
)
tf_py_test(
name = "backend_test",
size = "medium",
srcs = ["backend_test.py"],
python_version = "PY3",
shard_count = 4,
deps = [
":backend",
":engine",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_scipy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
tf_py_test(
name = "backend_config_test",
size = "medium",
srcs = ["backend_config_test.py"],
python_version = "PY3",
deps = [
":backend",
":backend_config",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
# copybara:uncomment_begin(google-only)
# tf_py_test(
# name = "url_utils_test",
# srcs = ["google/url_utils_test.py"],
# python_version = "PY3",
# deps = [
# ":url_utils",
# "//:expect_tensorflow_installed",
# "//testing/pymocks:matchers",
# ],
# )
#
# tf_py_test(
# name = "distribute_utils_test",
# srcs = ["google/distribute_utils_test.py"],
# python_version = "PY3",
# deps = [
# ":distribute_utils",
# "//:expect_tensorflow_installed",
# "//testing/pymocks:matchers",
# "//tf_keras/distribute",
# ],
# )
#
# license(
# name = "license",
# package_name = "tf_keras",
# )
# copybara:uncomment_end
| tf-keras/tf_keras/BUILD/0 | {
"file_path": "tf-keras/tf_keras/BUILD",
"repo_id": "tf-keras",
"token_count": 4912
} | 194 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ConvNeXt models for TF-Keras.
References:
- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
(CVPR 2022)
"""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import initializers
from tf_keras import layers
from tf_keras import utils
from tf_keras.applications import imagenet_utils
from tf_keras.engine import sequential
from tf_keras.engine import training as training_lib
# isort: off
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/convnext/"
)
WEIGHTS_HASHES = {
"convnext_tiny": (
"8ae6e78ce2933352b1ef4008e6dd2f17bc40771563877d156bc6426c7cf503ff",
"d547c096cabd03329d7be5562c5e14798aa39ed24b474157cef5e85ab9e49ef1",
),
"convnext_small": (
"ce1277d8f1ee5a0ef0e171469089c18f5233860ceaf9b168049cb9263fd7483c",
"6fc8009faa2f00c1c1dfce59feea9b0745eb260a7dd11bee65c8e20843da6eab",
),
"convnext_base": (
"52cbb006d3dadd03f6e095a8ca1aca47aecdd75acb4bc74bce1f5c695d0086e6",
"40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45",
),
"convnext_large": (
"070c5ed9ed289581e477741d3b34beffa920db8cf590899d6d2c67fba2a198a6",
"96f02b6f0753d4f543261bc9d09bed650f24dd6bc02ddde3066135b63d23a1cd",
),
"convnext_xlarge": (
"c1f5ccab661354fc3a79a10fa99af82f0fbf10ec65cb894a3ae0815f17a889ee",
"de3f8a54174130e0cecdc71583354753d557fcf1f4487331558e2a16ba0cfe05",
),
}
MODEL_CONFIGS = {
"tiny": {
"depths": [3, 3, 9, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"small": {
"depths": [3, 3, 27, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"base": {
"depths": [3, 3, 27, 3],
"projection_dims": [128, 256, 512, 1024],
"default_size": 224,
},
"large": {
"depths": [3, 3, 27, 3],
"projection_dims": [192, 384, 768, 1536],
"default_size": 224,
},
"xlarge": {
"depths": [3, 3, 27, 3],
"projection_dims": [256, 512, 1024, 2048],
"default_size": 224,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
References:
- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
(CVPR 2022)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The `base`, `large`, and `xlarge` models were first pre-trained on the
ImageNet-21k dataset and then fine-tuned on the ImageNet-1k dataset. The
pre-trained parameters of the models were assembled from the
[official repository](https://github.com/facebookresearch/ConvNeXt). To get a
sense of how these parameters were converted to TF-Keras compatible
parameters, please refer to
[this repository](https://github.com/sayakpaul/keras-convnext-conversion).
Note: Each TF-Keras Application expects a specific kind of input
preprocessing. For ConvNeXt, preprocessing is included in the model using a
`Normalization` layer. ConvNeXt models expect their inputs to be float or
uint8 tensors of pixels with values in the [0-255] range.
When calling the `summary()` method after instantiating a ConvNeXt model,
prefer setting the `expand_nested` argument `summary()` to `True` to better
investigate the instantiated model.
Args:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet-1k), or the path to the weights
file to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional TF-Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
Defaults to `None`.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. 1000 is how many
ImageNet classes there are. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`. Defaults to `"softmax"`.
Returns:
A `keras.Model` instance.
"""
class StochasticDepth(layers.Layer):
"""Stochastic Depth module.
It performs batch-wise dropping rather than sample-wise. In libraries like
`timm`, it's similar to `DropPath` layers that drops residual paths
sample-wise.
References:
- https://github.com/rwightman/pytorch-image-models
Args:
drop_path_rate (float): Probability of dropping paths. Should be within
[0, 1].
Returns:
Tensor either with the residual path dropped or kept.
"""
def __init__(self, drop_path_rate, **kwargs):
super().__init__(**kwargs)
self.drop_path_rate = drop_path_rate
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_path_rate
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
def get_config(self):
config = super().get_config()
config.update({"drop_path_rate": self.drop_path_rate})
return config
class LayerScale(layers.Layer):
"""Layer scale module.
References:
- https://arxiv.org/abs/2103.17239
Args:
init_values (float): Initial value for layer scale. Should be within
[0, 1].
projection_dim (int): Projection dimensionality.
Returns:
Tensor multiplied to the scale.
"""
def __init__(self, init_values, projection_dim, **kwargs):
super().__init__(**kwargs)
self.init_values = init_values
self.projection_dim = projection_dim
def build(self, input_shape):
self.gamma = self.add_weight(
name="gamma",
shape=(self.projection_dim,),
initializer=initializers.Constant(self.init_values),
trainable=True,
)
def call(self, x):
return x * self.gamma
def get_config(self):
config = super().get_config()
config.update(
{
"init_values": self.init_values,
"projection_dim": self.projection_dim,
}
)
return config
def ConvNeXtBlock(
projection_dim, drop_path_rate=0.0, layer_scale_init_value=1e-6, name=None
):
"""ConvNeXt block.
References:
- https://arxiv.org/abs/2201.03545
- https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
Notes:
In the original ConvNeXt implementation (linked above), the authors use
`Dense` layers for pointwise convolutions for increased efficiency.
Following that, this implementation also uses the same.
Args:
projection_dim (int): Number of filters for convolution layers. In the
ConvNeXt paper, this is referred to as projection dimension.
drop_path_rate (float): Probability of dropping paths. Should be within
[0, 1].
layer_scale_init_value (float): Layer scale value. Should be a small float
number.
name: name to path to the keras layer.
Returns:
A function representing a ConvNeXtBlock block.
"""
if name is None:
name = "prestem" + str(backend.get_uid("prestem"))
def apply(inputs):
x = inputs
x = layers.Conv2D(
filters=projection_dim,
kernel_size=7,
padding="same",
groups=projection_dim,
name=name + "_depthwise_conv",
)(x)
x = layers.LayerNormalization(epsilon=1e-6, name=name + "_layernorm")(x)
x = layers.Dense(4 * projection_dim, name=name + "_pointwise_conv_1")(x)
x = layers.Activation("gelu", name=name + "_gelu")(x)
x = layers.Dense(projection_dim, name=name + "_pointwise_conv_2")(x)
if layer_scale_init_value is not None:
x = LayerScale(
layer_scale_init_value,
projection_dim,
name=name + "_layer_scale",
)(x)
if drop_path_rate:
layer = StochasticDepth(
drop_path_rate, name=name + "_stochastic_depth"
)
else:
layer = layers.Activation("linear", name=name + "_identity")
return inputs + layer(x)
return apply
def PreStem(name=None):
"""Normalizes inputs with ImageNet-1k mean and std.
Args:
name (str): Name prefix.
Returns:
A presemt function.
"""
if name is None:
name = "prestem" + str(backend.get_uid("prestem"))
def apply(x):
x = layers.Normalization(
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
variance=[
(0.229 * 255) ** 2,
(0.224 * 255) ** 2,
(0.225 * 255) ** 2,
],
name=name + "_prestem_normalization",
)(x)
return x
return apply
def Head(num_classes=1000, classifier_activation=None, name=None):
"""Implementation of classification head of ConvNeXt.
Args:
num_classes: number of classes for Dense layer
classifier_activation: activation function for the Dense layer
name: name prefix
Returns:
Classification head function.
"""
if name is None:
name = str(backend.get_uid("head"))
def apply(x):
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.LayerNormalization(
epsilon=1e-6, name=name + "_head_layernorm"
)(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name=name + "_head_dense",
)(x)
return x
return apply
def ConvNeXt(
depths,
projection_dims,
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=224,
model_name="convnext",
include_preprocessing=True,
include_top=True,
weights=None,
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates ConvNeXt architecture given specific configuration.
Args:
depths: An iterable containing depths for each individual stages.
projection_dims: An iterable containing output number of channels of
each individual stages.
drop_path_rate: Stochastic depth probability. If 0.0, then stochastic
depth won't be used.
layer_scale_init_value: Layer scale coefficient. If 0.0, layer scaling
won't be used.
default_size: Default input image size.
model_name: An optional name for the model.
include_preprocessing: boolean denoting whther to include preprocessing in
the model. When `weights="imagenet"` this should be always set to True.
But for other models (e.g., randomly initialized) users should set it
to False and apply preprocessing to data accordingly.
include_top: Boolean denoting whether to include classification head to
the model.
weights: one of `None` (random initialization), `"imagenet"` (pre-training
on ImageNet-1k), or the path to the weights file to be loaded.
input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is False. It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor output
of the last convolutional layer.
- `avg` means that global average pooling will be applied to the output
of the last convolutional layer, and thus the output of the model will
be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax`, or `None`
when using a pretrained top layer.
ValueError: if `include_top` is True but `num_classes` is not 1000
when using ImageNet.
"""
if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights` as `'imagenet'` with `include_top`"
" as true, `classes` should be 1000"
)
# Determine proper input shape.
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if input_tensor is not None:
inputs = utils.layer_utils.get_source_inputs(input_tensor)[0]
else:
inputs = img_input
x = inputs
if include_preprocessing:
channel_axis = (
3 if backend.image_data_format() == "channels_last" else 1
)
num_channels = input_shape[channel_axis - 1]
if num_channels == 3:
x = PreStem(name=model_name)(x)
# Stem block.
stem = sequential.Sequential(
[
layers.Conv2D(
projection_dims[0],
kernel_size=4,
strides=4,
name=model_name + "_stem_conv",
),
layers.LayerNormalization(
epsilon=1e-6, name=model_name + "_stem_layernorm"
),
],
name=model_name + "_stem",
)
# Downsampling blocks.
downsample_layers = []
downsample_layers.append(stem)
num_downsample_layers = 3
for i in range(num_downsample_layers):
downsample_layer = sequential.Sequential(
[
layers.LayerNormalization(
epsilon=1e-6,
name=model_name + "_downsampling_layernorm_" + str(i),
),
layers.Conv2D(
projection_dims[i + 1],
kernel_size=2,
strides=2,
name=model_name + "_downsampling_conv_" + str(i),
),
],
name=model_name + "_downsampling_block_" + str(i),
)
downsample_layers.append(downsample_layer)
# Stochastic depth schedule.
# This is referred from the original ConvNeXt codebase:
# https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py#L86
depth_drop_rates = [
float(x) for x in np.linspace(0.0, drop_path_rate, sum(depths))
]
# First apply downsampling blocks and then apply ConvNeXt stages.
cur = 0
num_convnext_blocks = 4
for i in range(num_convnext_blocks):
x = downsample_layers[i](x)
for j in range(depths[i]):
x = ConvNeXtBlock(
projection_dim=projection_dims[i],
drop_path_rate=depth_drop_rates[cur + j],
layer_scale_init_value=layer_scale_init_value,
name=model_name + f"_stage_{i}_block_{j}",
)(x)
cur += depths[i]
if include_top:
imagenet_utils.validate_activation(classifier_activation, weights)
x = Head(
num_classes=classes,
classifier_activation=classifier_activation,
name=model_name,
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
model = training_lib.Model(inputs=inputs, outputs=x, name=model_name)
# Load weights.
if weights == "imagenet":
if include_top:
file_suffix = ".h5"
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_suffix = "_notop.h5"
file_hash = WEIGHTS_HASHES[model_name][1]
file_name = model_name + file_suffix
weights_path = utils.data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
## Instantiating variants ##
@keras_export(
"keras.applications.convnext.ConvNeXtTiny",
"keras.applications.ConvNeXtTiny",
)
def ConvNeXtTiny(
model_name="convnext_tiny",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return ConvNeXt(
depths=MODEL_CONFIGS["tiny"]["depths"],
projection_dims=MODEL_CONFIGS["tiny"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["tiny"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.convnext.ConvNeXtSmall",
"keras.applications.ConvNeXtSmall",
)
def ConvNeXtSmall(
model_name="convnext_small",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return ConvNeXt(
depths=MODEL_CONFIGS["small"]["depths"],
projection_dims=MODEL_CONFIGS["small"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["small"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.convnext.ConvNeXtBase",
"keras.applications.ConvNeXtBase",
)
def ConvNeXtBase(
model_name="convnext_base",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return ConvNeXt(
depths=MODEL_CONFIGS["base"]["depths"],
projection_dims=MODEL_CONFIGS["base"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["base"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.convnext.ConvNeXtLarge",
"keras.applications.ConvNeXtLarge",
)
def ConvNeXtLarge(
model_name="convnext_large",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return ConvNeXt(
depths=MODEL_CONFIGS["large"]["depths"],
projection_dims=MODEL_CONFIGS["large"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["large"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.convnext.ConvNeXtXLarge",
"keras.applications.ConvNeXtXLarge",
)
def ConvNeXtXLarge(
model_name="convnext_xlarge",
include_top=True,
include_preprocessing=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
return ConvNeXt(
depths=MODEL_CONFIGS["xlarge"]["depths"],
projection_dims=MODEL_CONFIGS["xlarge"]["projection_dims"],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
default_size=MODEL_CONFIGS["xlarge"]["default_size"],
model_name=model_name,
include_top=include_top,
include_preprocessing=include_preprocessing,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
)
ConvNeXtTiny.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtTiny")
ConvNeXtSmall.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtSmall")
ConvNeXtBase.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtBase")
ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtLarge")
ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtXLarge")
@keras_export("keras.applications.convnext.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the convnext model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. `None` means
the global setting `tf.keras.backend.image_data_format()` is used
(unless you changed it, it uses "channels_last").
Defaults to `None`.
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
@keras_export("keras.applications.convnext.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| tf-keras/tf_keras/applications/convnext.py/0 | {
"file_path": "tf-keras/tf_keras/applications/convnext.py",
"repo_id": "tf-keras",
"token_count": 11031
} | 195 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG16 model for TF-Keras.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition]
(https://arxiv.org/abs/1409.1556) (ICLR 2015)
"""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.applications import imagenet_utils
from tf_keras.engine import training
from tf_keras.layers import VersionAwareLayers
from tf_keras.utils import data_utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/"
"vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5"
)
WEIGHTS_PATH_NO_TOP = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/vgg16/"
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
)
layers = VersionAwareLayers()
@keras_export("keras.applications.vgg16.VGG16", "keras.applications.VGG16")
def VGG16(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the VGG16 model.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each TF-Keras Application expects a specific kind of input
preprocessing. For VGG16, call
`tf.keras.applications.vgg16.preprocess_input` on your inputs before passing
them to the model. `vgg16.preprocess_input` will convert the input images
from RGB to BGR, then will zero-center each color channel with respect to
the ImageNet dataset, without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional TF-Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation` can
only be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. Received: "
f"weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top` '
"as true, `classes` should be 1000. "
f"Received `classes={classes}`"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv1"
)(img_input)
x = layers.Conv2D(
64, (3, 3), activation="relu", padding="same", name="block1_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block1_pool")(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv1"
)(x)
x = layers.Conv2D(
128, (3, 3), activation="relu", padding="same", name="block2_conv2"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block2_pool")(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv1"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv2"
)(x)
x = layers.Conv2D(
256, (3, 3), activation="relu", padding="same", name="block3_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block3_pool")(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block4_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block4_pool")(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv1"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv2"
)(x)
x = layers.Conv2D(
512, (3, 3), activation="relu", padding="same", name="block5_conv3"
)(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name="block5_pool")(x)
if include_top:
# Classification block
x = layers.Flatten(name="flatten")(x)
x = layers.Dense(4096, activation="relu", name="fc1")(x)
x = layers.Dense(4096, activation="relu", name="fc2")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name="vgg16")
# Load weights.
if weights == "imagenet":
if include_top:
weights_path = data_utils.get_file(
"vgg16_weights_tf_dim_ordering_tf_kernels.h5",
WEIGHTS_PATH,
cache_subdir="models",
file_hash="64373286793e3c8b2b4e3219cbf3544b",
)
else:
weights_path = data_utils.get_file(
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5",
WEIGHTS_PATH_NO_TOP,
cache_subdir="models",
file_hash="6d6bbae143d832006294945121d1f1fc",
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export("keras.applications.vgg16.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="caffe"
)
@keras_export("keras.applications.vgg16.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| tf-keras/tf_keras/applications/vgg16.py/0 | {
"file_path": "tf-keras/tf_keras/applications/vgg16.py",
"repo_id": "tf-keras",
"token_count": 4285
} | 196 |
# Benchmarks for keras model examples
- [Benchmarks for keras model examples](#benchmarks-for-keras-model-examples)
- [Keras benchmarks](#keras-benchmarks)
- [Available models](#available-models)
- [Computer Vision examples](#computer-vision-examples)
- [Text & Sequence examples](#text--sequence-examples)
- [Other examples](#other-examples)
- [Available benchmark results](#available-benchmark-results)
- [Cifar10 CNN benchmark](#cifar10-cnn-benchmark)
- [MNIST Conv benchmark](#mnist-conv-benchmark)
- [MNIST Hierarchical RNN (HRNN) benchmark](#mnist-hierarchical-rnn-hrnn-benchmark)
- [Bidirectional LSTM benchmark](#bidirectional-lstm-benchmark)
- [Text classification with transformer benchmark](#text-classification-with-transformer-benchmark)
- [MLP benchmark](#mlp-benchmark)
- [Antirectifier benchmark](#antirectifier-benchmark)
- [IRNN benchmark](#irnn-benchmark)
- [Install Bazel](#install-bazel)
- [Run benchmarks](#run-benchmarks)
- [Add new benchmarks](#add-new-benchmarks)
- [Troubleshooting](#troubleshooting)
## TF-Keras benchmarks
These are benchmark tests running on keras models: models from
[keras/examples](https://github.com/keras-team/tf-keras/tree/master/examples).
Benchmarks in the current folder
(`https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks`) use Keras
[built-in dataset](https://keras.io/api/datasets/). In addition, these
benchmarks support different
[distribution strategies](https://www.tensorflow.org/guide/distributed_training)
on multiple GPUs.
### Available models
These examples are implemented by Functional API and Sequential API.
#### Computer Vision examples
- [cifar10_cnn_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py):
Simple CNN on CIFAR10 image dataset.
- [mnist_conv_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py):
Simple Convnet that achieves ~99% test accuracy on MNIST.
- [mnist_hierarchical_rnn_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/mnist_hierarchical_rnn_benchmark_test.py):
Hierarchical RNN (HRNN) to classify MNIST digits.
#### Text & Sequence examples
- [Bidirectional_lstm_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py):
2-layer bidirectional LSTM on IMDB movie review dataset.
- [text_classification_transformer_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py):
Text classification with custom transformer block.
- [reuters_mlp_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py):
Simple MLP on Reuters newswire topic classification dataset.
#### Other examples
- [antirectifier_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py):
Simple custom layer example.
- [mnist_irnn_benchmark_test.py](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/mnist_irnn_benchmark_test.py):Reproduction
of the IRNN experiment with pixel-by-pixel sequential MNIST in
["A Simple Way to Initialize Recurrent Networks of Rectified Linear Units"](https://arxiv.org/abs/1504.00941)
by Le et al.
### Available benchmark results
The listed benchmark results are obtained by running on Google Cloud Platform (GCP) with the following setup: </br>
- GPU: 2 x Tesla V100</br>
- OS: Ubuntu 18.04 </br>
- CPU: 8 x vCPUs, 30 GB memory </br>
- CUDA: 10.1 </br>
- Bazel: 3.1.0 </br>
If you want to run benchmark tests on GPU, please make sure you already installed CUDA and other dependencies by following the instructions from the [official tutorial](https://www.tensorflow.org/install/gpu) for GPU support.</br>
Metrics for following benchmarks:</br>
- Batch_size: Number of samples per batch of computation.</br>
- Wall_time: Total time to run benchmark test in seconds.</br>
- Avg_epoch_time: Average time for each epoch.</br>
- Exp_per_sec: Examples per second. The number of examples processed in one second.</br>
- Distribution_Strategy: The [distribution strategies](https://www.tensorflow.org/guide/distributed_training) used in the benchmark. </br>
#### Cifar10 CNN benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 256 | 1393.4896 | 3.21 | 15397.69 | `off`
GPU:2 | 256 | 76.49 | 2.59 | 18758.01 | `mirrored`
#### MNIST Conv benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 256 | 196.52 | 12.19 | 4915.26 | `off`
GPU:2 | 256 | 24.5794 | 1.21 | 47899.32 | `mirrored`
#### MNIST Hierarchical RNN (HRNN) benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 256 | 654.05 | 218.68 | 274.24 | `off`
GPU:2 | 256 | 20.77 | 3.73 | 15088.06 | `mirrored`
#### Bidirectional LSTM benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 512 | 225.57 | 72.55 | 344.70 | `off`
GPU:2 | 512 | 23.54 | 3.23 | 7532.53 | `mirrored`
#### Text classification with transformer benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 512 | 109.22 | 35.93 | 698.10 | `off`
GPU:2 | 512 | 9.28 | 0.83 | 26567.54 | `mirrored`
#### MLP benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 128 | 3.76 | 0.54 | 17678.54 | `off`
GPU:2 | 128 | 5.91 | 0.30 | 25435.14 | `mirrored`
#### Antirectifier benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 512 | 6.77 | 1.79 | 30916.39 | `off`
GPU:2 | 512 | 6.81 | 0.66 | 66563.17 | `mirrored`
#### IRNN benchmark
| Batch_size | Wall_time | Avg_epoch_time | Exp_per_sec | Distribution_Strategy
:---: | :--------: | :-------: | :------------: | :---------: | :-------------------:
CPU | 1024 | 213.00 | 69.01 | 868.08 | `off`
GPU:2 | 1024 | 92.71 | 29.12 | 2042.94 | `mirrored`
**Note**: For the small models, running on GPU might be even slower than CPU.
The potential reason is, training small models is not computation dominant, and
there might be some overhead on model replication and data sharding with
distributed training on GPUs.
## Install Bazel
This step can be skipped if Bazel is already installed. </br>
[Bazel](https://bazel.build/) is used to build targets based on BUILD files. It
will take a while for the first time because it will compile all dependencies
from your BUILD file. For the next time, Bazel will use the cache and it’ll be
much faster. For Ubuntu OS, please use the following steps for Bazel
installation. For other platforms, you may follow the corresponding guide for
the installation.
1. Add bazel as package source
```shell
sudo apt install curl gnupg
```
```shell
curl https://bazel.build/bazel-release.pub.gpg | sudo apt-key add -
```
```shell
echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list
```
Before we install the bazel, We should take a look for a bazel version that
can build the specific tensorflow version, you can check it from
[here](https://www.tensorflow.org/install/source#tested_build_configurations).
In addition, you can follow the instructions from
[Bazel website](https://docs.bazel.build/versions/3.4.0/install.html).
2. Install Bazel
```shell
sudo apt update && sudo apt install bazel-`version`
```
## Run benchmarks
To run benchmarks in
[keras/benchmarks](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/tf_keras/benchmarks),
please take the following steps:
1. Pull the latest tensorflow repo from GitHub.
2. Install the Bazel tool which works with tensorflow, please take a look for
the [Install bazel](#install-bazel) section.
3. To run benchmarks with Bazel, use the `--benchmarks=.` flags to specify the
benchmarks to run.
- To run all benchmarks on CPU
```shell
bazel run -c opt benchmark_test -- --benchmarks=.
```
- To run all benchmarks on GPU
```shell
bazel run run --config=cuda -c opt --copt="-mavx" benchmarks_test -- --benchmarks=.
```
- To run a subset of benchmarks using `--benchmarks` flag, `--benchmarks`:
the list of benchmarks to run. The specified value is interpreted as a
regular expression and any benchmarks whose name contains a partial
match to the regular expression is executed. e.g.
`--benchmarks=".*lstm*."`, will run all lstm layer related benchmarks.
## Add new benchmarks
To add a new benchmark, please take the following steps:
1. Create your own benchmark test file, `xxxx_benchmark_test.py`.
2. Import `benchmark_util` to measure and track performance if needed.
3. Create class which inherits from `tf.test.Benchmark`
4. Define and load dataset in `__init__` method.
5. Design and create a model in `_build_model` method.
6. Define the benchmark_xxx method to measure the performance of benchmarks
with different hyper parameters, such as `batch_size`, `run_iters`,
`distribution_strategy` and etc. You can check examples from
[here](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py#L60).
7. Add the benchmark target to the
[BUILD](https://github.com/keras-team/tf-keras/blob/master/tf_keras/benchmarks/BUILD)
file.
## Troubleshooting
1. tensorflow.python.framework.errors_impl.InternalError: CUDA runtime implicit
initialization on GPU:0 failed. Status: device kernel image is invalid
- Make sure CUDA is installed on your machine.
- Pull the latest tensorflow repo and run the `./configure` in the root
folder of tensorflow. It will help you to create the configuration file
which shows your local environment. Please check
[this post](https://www.tensorflow.org/install/source#configure_the_build)
for more details.
| tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/README.md/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/README.md",
"repo_id": "tf-keras",
"token_count": 4330
} | 197 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Memory profile on TF-Keras model.
To add a new model for memory profile:
1. Create the model.
2. Decorate it with `@memory_profiler.profile`.
3. Add the model function to the dict `models`.
"""
import numpy as np
from absl import app
from absl import flags
from absl import logging
import tf_keras as keras
try:
import memory_profiler
except ImportError:
memory_profiler = None
FLAGS = flags.FLAGS
flags.DEFINE_string("model", None, "The model to run memory profiler.")
def main(_):
@memory_profiler.profile
def _imdb_lstm_model():
"""LSTM model."""
x_train = np.random.randint(0, 1999, size=(2500, 100))
y_train = np.random.random((2500, 1))
# IMDB LSTM model.
model = keras.Sequential()
model.add(keras.layers.Embedding(20000, 128))
model.add(keras.layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.compile("sgd", "mse")
# Warm up the model with one epoch.
model.fit(x_train, y_train, batch_size=512, epochs=3)
# Add the model for memory profile.
models = {
"lstm": _imdb_lstm_model,
}
if FLAGS.model in models:
logging.info("Run memory profile on %s.", FLAGS.model)
run_model = models[FLAGS.model]
run_model()
else:
logging.info("The model does not exist. Please verify the model name.")
if __name__ == "__main__":
flags.mark_flags_as_required(["model"])
if memory_profiler:
app.run(main)
| tf-keras/tf_keras/benchmarks/model_memory_profile.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/model_memory_profile.py",
"repo_id": "tf-keras",
"token_count": 809
} | 198 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constraints: functions that impose constraints on weight values."""
import warnings
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.saving.serialization_lib import deserialize_keras_object
from tf_keras.saving.serialization_lib import serialize_keras_object
# isort: off
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export("keras.constraints.Constraint")
class Constraint:
"""Base class for weight constraints.
A `Constraint` instance works like a stateless function.
Users who subclass this
class should override the `__call__` method, which takes a single
weight parameter and return a projected version of that parameter
(e.g. normalized or clipped). Constraints can be used with various Keras
layers via the `kernel_constraint` or `bias_constraint` arguments.
Here's a simple example of a non-negative weight constraint:
>>> class NonNegative(tf.keras.constraints.Constraint):
...
... def __call__(self, w):
... return w * tf.cast(tf.math.greater_equal(w, 0.), w.dtype)
>>> weight = tf.constant((-1.0, 1.0))
>>> NonNegative()(weight)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 1.],
dtype=float32)>
>>> tf.keras.layers.Dense(4, kernel_constraint=NonNegative())
"""
def __call__(self, w):
"""Applies the constraint to the input weight variable.
By default, the inputs weight variable is not modified.
Users should override this method to implement their own projection
function.
Args:
w: Input weight variable.
Returns:
Projected variable (by default, returns unmodified inputs).
"""
return w
def get_config(self):
"""Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates a weight constraint from a configuration dictionary.
Example:
```python
constraint = UnitNorm()
config = constraint.get_config()
constraint = UnitNorm.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.constraints.Constraint` instance.
"""
return cls(**config)
@keras_export("keras.constraints.MaxNorm", "keras.constraints.max_norm")
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Also available via the shortcut function `tf.keras.constraints.max_norm`.
Args:
max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(
tf.reduce_sum(tf.square(w), axis=self.axis, keepdims=True)
)
desired = backend.clip(norms, 0, self.max_value)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {"max_value": self.max_value, "axis": self.axis}
@keras_export("keras.constraints.NonNeg", "keras.constraints.non_neg")
class NonNeg(Constraint):
"""Constrains the weights to be non-negative.
Also available via the shortcut function `tf.keras.constraints.non_neg`.
"""
def __call__(self, w):
return w * tf.cast(tf.greater_equal(w, 0.0), backend.floatx())
@keras_export("keras.constraints.UnitNorm", "keras.constraints.unit_norm")
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
Also available via the shortcut function `tf.keras.constraints.unit_norm`.
Args:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
return w / (
backend.epsilon()
+ backend.sqrt(
tf.reduce_sum(tf.square(w), axis=self.axis, keepdims=True)
)
)
@doc_controls.do_not_generate_docs
def get_config(self):
return {"axis": self.axis}
@keras_export("keras.constraints.MinMaxNorm", "keras.constraints.min_max_norm")
class MinMaxNorm(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Also available via the shortcut function
`tf.keras.constraints.min_max_norm`.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(
tf.reduce_sum(tf.square(w), axis=self.axis, keepdims=True)
)
desired = (
self.rate * backend.clip(norms, self.min_value, self.max_value)
+ (1 - self.rate) * norms
)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {
"min_value": self.min_value,
"max_value": self.max_value,
"rate": self.rate,
"axis": self.axis,
}
@keras_export(
"keras.constraints.RadialConstraint", "keras.constraints.radial_constraint"
)
class RadialConstraint(Constraint):
"""Constrains `Conv2D` kernel weights to be the same for each radius.
Also available via the shortcut function
`tf.keras.constraints.radial_constraint`.
For example, the desired output for the following 4-by-4 kernel:
```
kernel = [[v_00, v_01, v_02, v_03],
[v_10, v_11, v_12, v_13],
[v_20, v_21, v_22, v_23],
[v_30, v_31, v_32, v_33]]
```
is this::
```
kernel = [[v_11, v_11, v_11, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_11, v_11, v_11]]
```
This constraint can be applied to any `Conv2D` layer version, including
`Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"`
or `"channels_first"` data format. The method assumes the weight tensor is
of shape `(rows, cols, input_depth, output_depth)`.
"""
@doc_controls.do_not_generate_docs
def __call__(self, w):
w_shape = w.shape
if w_shape.rank is None or w_shape.rank != 4:
raise ValueError(
"The weight tensor must have rank 4. "
f"Received weight tensor with shape: {w_shape}"
)
height, width, channels, kernels = w_shape
w = backend.reshape(w, (height, width, channels * kernels))
# TODO(cpeter): Switch map_fn for a faster tf.vectorized_map once
# backend.switch is supported.
w = backend.map_fn(
self._kernel_constraint,
backend.stack(tf.unstack(w, axis=-1), axis=0),
)
return backend.reshape(
backend.stack(tf.unstack(w, axis=0), axis=-1),
(height, width, channels, kernels),
)
def _kernel_constraint(self, kernel):
"""Radially constraints a kernel with shape (height, width,
channels)."""
padding = backend.constant([[1, 1], [1, 1]], dtype="int32")
kernel_shape = backend.shape(kernel)[0]
start = backend.cast(kernel_shape / 2, "int32")
kernel_new = backend.switch(
backend.cast(tf.math.floormod(kernel_shape, 2), "bool"),
lambda: kernel[start - 1 : start, start - 1 : start],
lambda: kernel[start - 1 : start, start - 1 : start]
+ backend.zeros((2, 2), dtype=kernel.dtype),
)
index = backend.switch(
backend.cast(tf.math.floormod(kernel_shape, 2), "bool"),
lambda: backend.constant(0, dtype="int32"),
lambda: backend.constant(1, dtype="int32"),
)
while_condition = lambda index, *args: backend.less(index, start)
def body_fn(i, array):
return i + 1, tf.pad(
array, padding, constant_values=kernel[start + i, start + i]
)
_, kernel_new = tf.compat.v1.while_loop(
while_condition,
body_fn,
[index, kernel_new],
shape_invariants=[index.get_shape(), tf.TensorShape([None, None])],
)
return kernel_new
# Aliases.
max_norm = MaxNorm
non_neg = NonNeg
unit_norm = UnitNorm
min_max_norm = MinMaxNorm
radial_constraint = RadialConstraint
# Legacy aliases.
maxnorm = max_norm
nonneg = non_neg
unitnorm = unit_norm
@keras_export("keras.constraints.serialize")
def serialize(constraint, use_legacy_format=False):
if constraint is None:
return None
if not isinstance(constraint, Constraint):
warnings.warn(
"The `keras.constraints.serialize()` API should only be used for "
"objects of type `keras.constraints.Constraint`. Found an instance "
f"of type {type(constraint)}, which may lead to improper "
"serialization."
)
if use_legacy_format:
return legacy_serialization.serialize_keras_object(constraint)
return serialize_keras_object(constraint)
@keras_export("keras.constraints.deserialize")
def deserialize(config, custom_objects=None, use_legacy_format=False):
if use_legacy_format:
return legacy_serialization.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="constraint",
)
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="constraint",
)
@keras_export("keras.constraints.get")
def get(identifier):
"""Retrieves a TF-Keras constraint function."""
if identifier is None:
return None
if isinstance(identifier, dict):
use_legacy_format = "module" not in identifier
return deserialize(identifier, use_legacy_format=use_legacy_format)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
return get(config)
elif callable(identifier):
return identifier
else:
raise ValueError(
f"Could not interpret constraint function identifier: {identifier}"
)
| tf-keras/tf_keras/constraints.py/0 | {
"file_path": "tf-keras/tf_keras/constraints.py",
"repo_id": "tf-keras",
"token_count": 5758
} | 199 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CollectiveAllReduceStrategy."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import layers
from tf_keras.engine import training
from tf_keras.optimizers.legacy import (
gradient_descent as gradient_descent_keras,
)
from tf_keras.testing_infra import test_utils
@test_utils.run_v2_only
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=[
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu, # noqa: E501
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu, # noqa: E501
],
mode=["eager"],
)
)
class MultiWorkerMirroredStrategyTest(tf.test.TestCase, parameterized.TestCase):
def testFitWithoutStepsPerEpochPartialBatch(self, strategy):
def _model_fn():
x = layers.Input(shape=(1,), name="input")
y = layers.Dense(1, name="dense")(x)
model = training.Model(x, y)
return model
def _get_dataset():
inputs = tf.expand_dims(tf.constant(range(10)), axis=1)
targets = tf.expand_dims(tf.constant(range(10)), axis=1)
# Make global batch size 12 for 2 replicas and a non-repeated
# dataset with 10 elements so that we have partial batch
dataset = tf.data.Dataset.from_tensor_slices(
(inputs, targets)
).batch(12, drop_remainder=False)
return dataset
with strategy.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = _model_fn()
loss = "mse"
metrics = ["mae"]
model.compile(optimizer, loss, metrics=metrics)
dataset = _get_dataset()
kernel_before = model.get_weights()[0][0]
model.fit(dataset, epochs=10)
kernel_after = model.get_weights()[0][0]
self.assertNotEqual(kernel_before, kernel_after)
self.assertGreater(abs(kernel_before - 1), abs(kernel_after - 1))
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/distribute/collective_all_reduce_strategy_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/collective_all_reduce_strategy_test.py",
"repo_id": "tf-keras",
"token_count": 1139
} | 200 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras DNN model using DistributionStrategy."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras import backend
from tf_keras.distribute import keras_correctness_test_base
from tf_keras.distribute import strategy_combinations
from tf_keras.optimizers.legacy import (
gradient_descent as gradient_descent_keras,
)
from tf_keras.testing_infra import test_utils
def all_strategy_combinations_with_eager_and_graph_modes():
return tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["graph", "eager"],
) + tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.multi_worker_mirrored_strategies,
mode="eager",
)
def all_strategy_combinations_with_graph_mode():
return tf.__internal__.test.combinations.combine(
distribution=keras_correctness_test_base.all_strategies, mode=["graph"]
)
def is_default_strategy(strategy):
with strategy.scope():
return not tf.distribute.has_strategy()
@test_utils.run_all_without_tensor_float_32(
"Uses Dense layers, which call matmul"
)
class TestDistributionStrategyDnnCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase
):
def get_model(
self, initial_weights=None, distribution=None, input_shapes=None
):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
# We add few non-linear layers to make it non-trivial.
model = keras.Sequential()
model.add(
keras.layers.Dense(10, activation="relu", input_shape=(1,))
)
model.add(
keras.layers.Dense(
10,
activation="relu",
kernel_regularizer=keras.regularizers.l2(1e-4),
)
)
model.add(keras.layers.Dense(10, activation="relu"))
model.add(keras.layers.Dense(1))
if initial_weights:
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=["mse"],
)
return model
def get_data(self):
x_train = np.random.rand(9984, 1).astype("float32")
y_train = 3 * x_train
x_predict = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
return x_train, y_train, x_predict
def get_data_with_partial_last_batch(self):
x_train = np.random.rand(10000, 1).astype("float32")
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype("float32")
y_eval = 3 * x_eval
x_predict = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
def get_data_with_partial_last_batch_eval(self):
x_train = np.random.rand(9984, 1).astype("float32")
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype("float32")
y_eval = 3 * x_eval
x_predict = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations()
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness(
self, distribution, use_numpy, use_validation_data
):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness_with_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data
):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch="eval",
)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.strategy_minus_tpu_and_input_config_combinations_eager() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness_with_partial_last_batch(
self, distribution, use_numpy, use_validation_data
):
distribution.extended.experimental_enable_get_next_as_optional = True
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch="train_and_eval",
training_epochs=1,
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_graph_mode()
)
def test_dnn_with_dynamic_learning_rate(self, distribution):
self.run_dynamic_lr_test(distribution)
class TestDistributionStrategyDnnMetricCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase
):
def get_model(self, distribution=None, input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(
1, input_shape=(1,), kernel_initializer="ones"
)
)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=[keras.metrics.BinaryAccuracy()],
)
return model
def run_metric_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(distribution=distribution)
batch_size = 64
batch_size = keras_correctness_test_base.get_batch_size(
batch_size, distribution
)
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)
)
train_dataset = keras_correctness_test_base.batch_wrapper(
train_dataset, batch_size
)
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history["binary_accuracy"], [1.0, 1.0])
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes()
)
def test_simple_dnn_metric_correctness(self, distribution):
self.run_metric_correctness_test(distribution)
class TestDistributionStrategyDnnMetricEvalCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase
):
def get_model(self, distribution=None, input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation="relu", input_dim=4, kernel_initializer="ones"
)
)
model.add(
keras.layers.Dense(
1, activation="sigmoid", kernel_initializer="ones"
)
)
model.compile(
loss="mae",
metrics=["accuracy", keras.metrics.BinaryAccuracy()],
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001),
)
return model
def run_eval_metrics_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
model = self.get_model(distribution=distribution)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype("float32")
y = np.ones((100, 1)).astype("float32")
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.0)
self.assertEqual(outs[2], 1.0)
y = np.zeros((100, 1)).astype("float32")
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.0)
self.assertEqual(outs[2], 0.0)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes()
)
def test_identity_model_metric_eval_correctness(self, distribution):
self.run_eval_metrics_correctness_test(distribution)
class SubclassedModel(keras.Model):
def __init__(self, initial_weights, input_shapes):
super().__init__()
self.dense1 = keras.layers.Dense(
10, activation="relu", input_shape=(1,)
)
self.dense2 = keras.layers.Dense(
10,
activation="relu",
kernel_regularizer=keras.regularizers.l2(1e-4),
)
self.dense3 = keras.layers.Dense(10, activation="relu")
self.dense4 = keras.layers.Dense(1)
if input_shapes:
self.build(input_shapes)
else:
# This covers cases when the input is DatasetV1Adapter.
self.build((None, 1))
if initial_weights:
self.set_weights(initial_weights)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
return self.dense4(x)
@test_utils.run_all_without_tensor_float_32(
"Uses Dense layers, which call matmul"
)
class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
TestDistributionStrategyDnnCorrectness
):
def get_model(
self, initial_weights=None, distribution=None, input_shapes=None
):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
model = SubclassedModel(initial_weights, input_shapes)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=["mse"],
)
return model
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations()
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness(
self, distribution, use_numpy, use_validation_data
):
if (tf.executing_eagerly()) or is_default_strategy(distribution):
self.run_correctness_test(
distribution, use_numpy, use_validation_data
)
elif (
backend.is_tpu_strategy(distribution) and not tf.executing_eagerly()
):
with self.assertRaisesRegex(
ValueError,
"Expected `model` argument to be a functional `Model` "
"instance, but got a subclassed model instead.",
):
self.run_correctness_test(
distribution, use_numpy, use_validation_data
)
else:
with self.assertRaisesRegex(
ValueError,
"We currently do not support distribution strategy with a "
"`Sequential` model that is created without `input_shape`/"
"`input_dim` set in its first layer or a subclassed model.",
):
self.run_correctness_test(
distribution, use_numpy, use_validation_data
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_graph_mode()
)
def test_dnn_with_dynamic_learning_rate(self, distribution):
if (
tf.executing_eagerly() and not backend.is_tpu_strategy(distribution)
) or is_default_strategy(distribution):
self.run_dynamic_lr_test(distribution)
elif backend.is_tpu_strategy(distribution):
with self.assertRaisesRegex(
ValueError,
"Expected `model` argument to be a functional `Model` "
"instance, but got a subclassed model instead.",
):
self.run_dynamic_lr_test(distribution)
else:
with self.assertRaisesRegex(
ValueError,
"We currently do not support distribution strategy with a "
"`Sequential` model that is created without `input_shape`/"
"`input_dim` set in its first layer or a subclassed model.",
):
self.run_dynamic_lr_test(distribution)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
)
def test_dnn_correctness_with_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data
):
with self.assertRaisesRegex(
ValueError,
"Expected `model` argument to be a functional `Model` instance, "
"but got a subclassed model instead.",
):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch="eval",
)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/distribute/keras_dnn_correctness_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/keras_dnn_correctness_test.py",
"repo_id": "tf-keras",
"token_count": 6706
} | 201 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import layers
from tf_keras.dtensor import dtensor_api as dtensor
from tf_keras.dtensor import test_util
from tf_keras.dtensor import utils
class UtilsTest(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
global_ids = test_util.create_device_ids_array((2, 2))
local_device_ids = np.ravel(global_ids).tolist()
mesh_dict = {
"CPU": dtensor.Mesh(
["X", "Y"],
global_ids,
local_device_ids,
test_util.create_device_list((2, 2), "CPU"),
)
}
self.mesh = self.configTestMesh(mesh_dict)
self.layout = dtensor.Layout.replicated(self.mesh, rank=1)
@parameterized.named_parameters(
("Dense", layers.Dense, {"units": 4}, ["kernel_layout", "bias_layout"]),
(
"Conv2D",
layers.Conv2D,
{"filters": 2, "kernel_size": 3},
["kernel_layout", "bias_layout"],
),
(
"BatchNorm",
layers.BatchNormalization,
{},
[
"beta_layout",
"gamma_layout",
"moving_mean_layout",
"moving_variance_layout",
],
),
(
"Embedding",
layers.Embedding,
{"input_dim": 100, "output_dim": 20},
["embeddings_layout"],
),
(" PReLU", layers.PReLU, {}, ["alpha_layout"]),
(
"SeparableConv2D",
layers.SeparableConv2D,
{"filters": 2, "kernel_size": 3},
["depthwise_layout", "pointwise_layout", "bias_layout"],
),
# TODO(scottzhu): Probably add more coverage for all the layers.
)
def test_all_layout_decorator(self, layer_cls, init_args, layout_args):
layer_cls.__init__ = utils.allow_initializer_layout(layer_cls.__init__)
# Make sure we don't set the layout attribute if the init kwargs is not
# provided.
layer = layer_cls(**init_args)
for layout_arg in layout_args:
self.assertFalse(hasattr(layer, layout_arg))
layout_kwargs = {k: self.layout for k in layout_args}
init_args.update(layout_kwargs)
layer = layer_cls(**init_args)
for layout_arg in layout_args:
self.assertEqual(getattr(layer, layout_arg), self.layout)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/dtensor/utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/dtensor/utils_test.py",
"repo_id": "tf-keras",
"token_count": 1460
} | 202 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to deferred-build `Sequential` models."""
import os
import unittest
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
try:
import h5py
except ImportError:
h5py = None
@test_utils.run_v2_only
class TestDeferredSequential(test_combinations.TestCase):
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_build_behavior(self):
# Test graph network creation after __call__
model = get_model()
model(np.random.random((2, 6)))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [2, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [2, 2])
# Test effect of new __call__ with a different shape
model(np.random.random((3, 6)))
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
model(np.random.random((4, 6)))
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
# Test graph network creation after build
model = get_model()
model.build((None, 6))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
# Test graph network creation after compile/fit
model = get_model()
model.compile(
loss="mse",
optimizer="rmsprop",
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(np.zeros((2, 6)), np.zeros((2, 2)))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
# Inconsistency here: with eager `fit`, the model is built with shape
# (2, 6), but with graph function `fit`, it is built with shape `(None,
# 6)`. This is likely due to our assumption "the batch size should be
# dynamic" at the level of `Model`. TODO(fchollet): investigate and
# resolve.
self.assertEqual(model.inputs[0].shape.as_list()[-1], 6)
self.assertEqual(model.outputs[0].shape.as_list()[-1], 2)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_add_and_pop(self):
model = get_model()
model.build((None, 6))
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 4)
model.pop()
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 2)
self.assertLen(model.weights, 2)
model.add(keras.layers.Dense(2))
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 4)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_feature_extraction(self):
# This tests layer connectivity reset when rebuilding
model = get_model()
model(np.random.random((3, 6))) # First build
model(np.random.random((4, 6))) # Triggers a rebuild
# Classic feature extractor pattern
extractor = keras.Model(
inputs=model.inputs,
outputs=[layer.output for layer in model.layers],
)
# Check that inputs and outputs are connected
_ = extractor(np.random.random((4, 6)))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_saving_keras_v3(self):
model = get_model()
model(np.random.random((3, 6))) # Build model
path = os.path.join(self.get_temp_dir(), "model_path.keras")
model.save(path)
new_model = keras.models.load_model(path)
model_layers = model._flatten_layers(include_self=True, recursive=False)
new_model_layers = new_model._flatten_layers(
include_self=True, recursive=False
)
for layer1, layer2 in zip(model_layers, new_model_layers):
self.assertEqual(layer1.name, layer2.name)
for w1, w2 in zip(layer1.weights, layer2.weights):
self.assertAllClose(w1, w2)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_saving_savedmodel(self):
model = get_model()
model(np.random.random((3, 6))) # Build model
path = os.path.join(self.get_temp_dir(), "model_path")
model.save(path)
new_model = keras.models.load_model(path)
model_layers = model._flatten_layers(include_self=True, recursive=False)
new_model_layers = new_model._flatten_layers(
include_self=True, recursive=False
)
for layer1, layer2 in zip(model_layers, new_model_layers):
self.assertEqual(layer1.name, layer2.name)
for w1, w2 in zip(layer1.weights, layer2.weights):
self.assertAllClose(w1, w2)
@unittest.skipIf(h5py is None, "Test requires h5py")
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_saving_h5(self):
path = os.path.join(self.get_temp_dir(), "model_path.h5")
model = get_model()
model(np.random.random((3, 6))) # Build model
path = os.path.join(self.get_temp_dir(), "model_path.h5")
model.save(path)
new_model = keras.models.load_model(path)
model_layers = model._flatten_layers(include_self=True, recursive=False)
new_model_layers = new_model._flatten_layers(
include_self=True, recursive=False
)
for layer1, layer2 in zip(model_layers, new_model_layers):
self.assertEqual(layer1.name, layer2.name)
for w1, w2 in zip(layer1.weights, layer2.weights):
self.assertAllClose(w1, w2)
@test_combinations.run_all_keras_modes
def test_shared_layer(self):
# This tests that preexisting layer connectivity is preserved
# when auto-building graph networks
shared_layer = keras.layers.Dense(2)
m1 = keras.Sequential([shared_layer])
m1(np.random.random((3, 6)))
m2 = keras.Sequential([shared_layer])
m2(np.random.random((3, 6)))
# Nesting case
shared_layer = keras.layers.Dense(2)
m1 = keras.Sequential([shared_layer])
m2 = keras.Sequential([shared_layer, m1])
m2(np.random.random((3, 2)))
@test_combinations.run_all_keras_modes
def test_loss_layer(self):
class LossLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs))
return inputs
# Test loss layer alone
model = keras.Sequential([LossLayer()])
model.compile("rmsprop", run_eagerly=test_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)))
self.assertAllClose(loss, 4.0)
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)))
self.assertAllClose(loss, 2.0)
# Test loss layer combined with another layer
model = keras.Sequential(
[keras.layers.Dense(1, kernel_initializer="ones"), LossLayer()]
)
model.compile("rmsprop", run_eagerly=test_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)))
self.assertAllClose(loss, 4.0)
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)))
self.assertLess(loss, 2.0)
# Test loss layer combined with external loss
model = keras.Sequential(
[keras.layers.Dense(1, kernel_initializer="ones"), LossLayer()]
)
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
loss = model.train_on_batch(np.ones((2, 2)), np.ones((2, 2)))
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)), np.ones((1, 2)))
def get_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, name="first_layer"))
model.add(keras.layers.Dropout(0.3, name="dp"))
model.add(keras.layers.Dense(2, name="last_layer"))
return model
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/deferred_sequential_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/deferred_sequential_test.py",
"repo_id": "tf-keras",
"token_count": 4307
} | 203 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
import abc
import atexit
import collections
import functools
import multiprocessing.pool
import threading
import time
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import callbacks as cbks
from tf_keras import losses
from tf_keras import metrics as metrics_module
from tf_keras.utils import data_utils
from tf_keras.utils import generic_utils
from tf_keras.utils import losses_utils
from tf_keras.utils import tf_inspect
# isort: off
from tensorflow.python.platform import tf_logging as logging
def is_composite_or_composite_value(tensor):
"""Returns true if 'tensor' is a CompositeTensor or a CT Value object."""
# TODO(b/125094323): This should be isinstance(CompositeTensor) or
# isinstance(CompositeTensorValue) once we support that.
return isinstance(
tensor,
(
tf.__internal__.CompositeTensor,
tf.compat.v1.SparseTensorValue,
tf.compat.v1.ragged.RaggedTensorValue,
),
)
class Aggregator(object, metaclass=abc.ABCMeta):
"""Abstract base class used to aggregate batch-level outputs of a loop.
Attributes:
use_steps: Whether the loop is using `step` or `batch_size`.
num_samples: Total number of samples: `batch_size * num_batches`.
steps: Total number of steps.
batch_size: Batch size. It is used for validation checks between inputs
and outputs.
results: What to return at the end of the aggregation loop.
"""
def __init__(
self, use_steps, num_samples=None, steps=None, batch_size=None
):
self.use_steps = use_steps
self.num_samples = num_samples
self.steps = steps
self.batch_size = batch_size
self.results = []
@abc.abstractmethod
def create(self, batch_outs):
"""Creates the initial results from the first batch outputs.
Args:
batch_outs: A list of batch-level outputs.
"""
raise NotImplementedError("Must be implemented in subclasses.")
@abc.abstractmethod
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
"""Aggregates batch-level results into total results.
Args:
batch_outs: A list of batch-level outputs.
batch_start: The start index of this batch. Always `None` if
`use_steps` is `True`.
batch_end: The end index of this batch. Always `None` if `use_steps`
is `True`.
"""
raise NotImplementedError("Must be implemented in subclasses.")
@abc.abstractmethod
def finalize(self):
"""Prepares the total results to be returned."""
raise NotImplementedError("Must be implemented in subclasses.")
class MetricsAggregator(Aggregator):
"""Aggregator that calculates loss and metrics info.
Attributes:
use_steps: Whether the loop is using `step` or `batch_size`.
num_samples: Total number of samples: `batch_size*num_batches`.
steps: Total number of steps, ie number of times to iterate over a dataset
to cover all samples.
"""
def __init__(self, use_steps, num_samples=None, steps=None):
super().__init__(
use_steps=use_steps,
num_samples=num_samples,
steps=steps,
batch_size=None,
)
def create(self, batch_outs):
self.results = [0.0] * len(batch_outs)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
# Loss.
if self.use_steps:
self.results[0] += batch_outs[0]
else:
self.results[0] += batch_outs[0] * (batch_end - batch_start)
# Metrics (always stateful, just grab current values.)
self.results[1:] = batch_outs[1:]
def finalize(self):
if not self.results:
raise ValueError("Empty training data.")
self.results[0] /= self.num_samples or self.steps
def _append_sparse_tensor_value(target, to_append):
"""Append sparse tensor value objects."""
# Make sure the sparse tensors are of the same size (except for the 0th
# dim).
if len(target.dense_shape) != len(to_append.dense_shape):
raise RuntimeError(
"Unable to concatenate %s and %s. The inner dense shapes do not "
"have the same number of dimensions (%s vs %s)"
% (target, to_append, target.dense_shape, to_append.dense_shape)
)
if target.dense_shape[1:] != to_append.dense_shape[1:]:
raise RuntimeError(
"Unable to concatenate %s and %s. The inner dense shapes do not "
"match inner dimensions (%s vs %s)"
% (
target,
to_append,
target.dense_shape[1:],
to_append.dense_shape[1:],
)
)
# Add the to_append indices to target, updating the 0th value, and keeping
# track of the maximum so we know the final dense_shape of this tensor.
base_dim0_value = target.dense_shape[0]
max_dim0_value = target.dense_shape[0]
new_indices = target.indices
for index in to_append.indices:
# Here, we iterate through the sparse indices of the tensor to append.
# For each index, we update its zeroth value (the batch index) by adding
# the number of batch items in the tensor we are appending to (so an
# index of [0, 0, 1] for a value that is being appended to a tensor with
# 0th dim size 3 would become [3, 0, 1].)
index[0] += base_dim0_value
max_dim0_value = max(max_dim0_value, index[0])
new_indices = np.append(new_indices, [index], axis=0)
# Extend the values array to contain all of the appended values. These will
# be in the same order as the indices added above.
new_values = np.concatenate((target.values, to_append.values), axis=0)
# Create a new dense shape by replacing the value for the 0th dimension
# with the new max dim0 value.
new_dense_shape = list(target.dense_shape)
new_dense_shape[0] = max_dim0_value + 1
new_dense_shape = tuple(new_dense_shape)
return tf.compat.v1.SparseTensorValue(
indices=new_indices, values=new_values, dense_shape=new_dense_shape
)
def _append_ragged_tensor_value(target, to_append):
"""Append ragged tensor value objects."""
# Make sure the ragged tensors are of the same size (save for the 0th dim).
if len(target.shape) != len(to_append.shape):
raise RuntimeError(f"Unable to concatenate {target} and {to_append}")
if target.shape[1:] != to_append.shape[1:]:
raise RuntimeError(f"Unable to concatenate {target} and {to_append}")
adjusted_row_splits = to_append.row_splits[1:] + target.row_splits[-1]
new_row_splits = np.append(target.row_splits, adjusted_row_splits)
if isinstance(target.values, tf.compat.v1.ragged.RaggedTensorValue):
new_values = _append_ragged_tensor_value(
target.values, to_append.values
)
else:
new_values = np.concatenate((target.values, to_append.values), axis=0)
return tf.compat.v1.ragged.RaggedTensorValue(new_values, new_row_splits)
def _append_composite_tensor(target, to_append):
"""Helper function to append composite tensors to each other in the 0 axis.
In order to support batching within a fit/evaluate/predict call, we need
to be able to aggregate within a CompositeTensor. Unfortunately, the CT
API currently does not make this easy - especially in V1 mode, where we're
working with CompositeTensor Value objects that have no connection with the
CompositeTensors that created them.
Args:
target: CompositeTensor or CompositeTensor value object that will be
appended to.
to_append: CompositeTensor or CompositeTensor value object to append to.
'target'.
Returns:
A CompositeTensor or CompositeTensor value object.
Raises:
RuntimeError: if concatenation is not possible.
"""
if type(target) is not type(to_append):
raise RuntimeError(
f"Unable to concatenate {type(target)} and {type(to_append)}"
)
# Perform type-specific concatenation.
# TODO(b/125094323): This should be replaced by a simple call to
# target.append() that should work on all of the below classes.
# If we're seeing a CompositeTensor here, we know it's because we're in
# Eager mode (or else we'd have evaluated the CT to a CT Value object
# already). Therefore, it's safe to call concat() on it without evaluating
# the result any further. If not - that is, if we're seeing a
# SparseTensorValue or a RaggedTensorValue - we need to hand-update it
# since we're outside of the graph anyways.
if isinstance(target, tf.SparseTensor):
# We need to invoke the sparse version of concatenate here - tf.concat
# won't work.
return tf.compat.v1.sparse_concat(sp_inputs=[target, to_append], axis=0)
elif isinstance(target, tf.RaggedTensor):
return tf.concat([target, to_append], axis=0)
elif isinstance(target, tf.compat.v1.SparseTensorValue):
return _append_sparse_tensor_value(target, to_append)
elif isinstance(target, tf.compat.v1.ragged.RaggedTensorValue):
return _append_ragged_tensor_value(target, to_append)
else:
raise RuntimeError(
f"Attempted to concatenate unsupported object {type(target)}."
)
class ConcatAggregator(Aggregator):
"""Combine tensor-likes which cannot be merged on the fly.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
"""
def __init__(self, batch_size):
self.composite = None
super().__init__(
use_steps=True, num_samples=None, steps=None, batch_size=batch_size
)
def create(self, batch_element):
self.composite = is_composite_or_composite_value(batch_element)
def aggregate(self, batch_element, batch_start=None, batch_end=None):
# TODO(psv): Add num_samples check here to detect when output batch
# #samples is < batch size and != input batch #samples.
if self.batch_size and self.batch_size < batch_element.shape[0]:
raise ValueError(
"Mismatch between expected batch size and model output batch "
"size. Output shape = {}, "
"expected output shape = shape {}".format(
batch_element.shape,
(self.batch_size,) + batch_element.shape[1:],
)
)
self.results.append(batch_element)
def finalize(self):
# Special case of single batch inference which skips a copy.
if len(self.results) == 1:
self.results = self.results[0]
elif self.composite:
# TODO(taylorrobie): efficiently concatenate.
results = self.results[0]
for r in self.results[1:]:
results = _append_composite_tensor(results, r)
self.results = results
else:
self.results = np.concatenate(self.results, axis=0)
_COPY_THREADS = 4
_COPY_POOL = None
def get_copy_pool():
"""Shared threadpool for copying arrays.
Pool instantiation takes ~ 2ms, so a singleton pool is used rather than
creating a pool per SliceAggregator.
Returns:
The global copy threadpool.
"""
global _COPY_POOL
if _COPY_POOL is None:
_COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS)
atexit.register(_COPY_POOL.close)
return _COPY_POOL
class SliceAggregator(Aggregator):
"""Combine arrays where the final size is known.
This class expects to aggregate a single tensor-like rather than a nested
structure of tensor-likes.
NumPy copies are an operation that threads handle quite well because all of
the heavy lifting is in c and does not need the GIL. Moreover, we can
perform lock-free writes to the same buffer in multiple threads because the
nature of result aggregation guarantees that either the indices are disjoint
or the aggregator will throw an exception in finalize. Moreover, because
aggregation is performed on the slowest varying dimension, assignments for a
given batch will write to contiguous blocks of memory, further minimizing
contention.
There is, however, some scheduling and context switching overhead which will
offset the gains from pipelining the slice assignment. Below a given
threshold it is faster to simply assign in the main thread rather than
enqueue the assignment in a side thread. The exact threshold will vary from
system to system, but the time is not very sensitive to the exact transition
so a value of 2 ** 14 was chosen which should be reasonable on most systems.
"""
_BINARY_SIZE_THRESHOLD = 2**14
_MAX_COPY_SECONDS = 300
def __init__(self, num_samples, batch_size):
self._async_copies = []
self._pool = get_copy_pool()
self._errors = []
super().__init__(
use_steps=False,
num_samples=num_samples,
steps=None,
batch_size=batch_size,
)
def create(self, batch_element):
# This step does not need to be pipelined because NumPy empty array
# initialization is effectively instantaneous.
shape = (self.num_samples,) + batch_element.shape[1:]
dtype = batch_element.dtype
self.results = np.empty(shape=shape, dtype=dtype)
def aggregate(self, batch_element, batch_start, batch_end):
# Fail early.
if self._errors:
raise self._errors[0]
# In the special case of single batch inference, no copy is needed.
if batch_end - batch_start == self.num_samples:
if self.num_samples != batch_element.shape[0]:
raise ValueError(
"Mismatch between expected batch size and model "
"output batch size. Output shape = {}, "
"expected output shape = shape {}".format(
batch_element.shape, self.results.shape
)
)
self.results = batch_element
return
# This is an approximate threshold, so we don't need to consider the
# number of bytes per element.
num_elements = np.prod(batch_element.shape)
if num_elements < self._BINARY_SIZE_THRESHOLD:
self.results[batch_start:batch_end] = batch_element
else:
is_finished = threading.Event()
self._pool.apply_async(
self._slice_assign,
args=(batch_element, batch_start, batch_end, is_finished),
)
self._async_copies.append(is_finished)
def _slice_assign(self, batch_element, batch_start, batch_end, is_finished):
"""Legacy utility method to slice input arrays."""
try:
self.results[batch_start:batch_end] = batch_element
except Exception as e:
# `_slice_assign` should only be called in threads and exceptions
# raised in threads do not carry over to the main thread. So instead
# we perform a a broad catch in the thread and then store the
# exception to be re-raised in the main thread.
self._errors.append(e)
finally:
is_finished.set()
def finalize(self):
start_time = time.time()
for is_finished in self._async_copies:
timeout = max(
[0.0, self._MAX_COPY_SECONDS - (time.time() - start_time)]
)
if not is_finished.wait(timeout):
raise ValueError("Timed out waiting for copy to complete.")
if self._errors:
raise self._errors[0]
class OutputsAggregator(Aggregator):
"""Aggregator that concatenates outputs."""
_structure = None
def create(self, batch_outs):
# SparseTensorValue is a named tuple which nest will flatten, so we need
# to guard it to properly handle the structure.
self._structure = tf.__internal__.nest.get_traverse_shallow_structure(
lambda x: not is_composite_or_composite_value(x), batch_outs
)
batch_outs = tf.__internal__.nest.flatten_up_to(
self._structure, batch_outs
)
for batch_element in batch_outs:
if is_composite_or_composite_value(batch_element):
# If the output is not a ndarray, it will be either a composite
# tensor or a composite tensor's Value object. In either case,
# we can't allocate an array to hold the object - we'll handle
# it later.
self.results.append(ConcatAggregator(self.batch_size))
elif isinstance(batch_element, np.ndarray):
self.results.append(
(
ConcatAggregator(self.batch_size)
if self.use_steps
else SliceAggregator(self.num_samples, self.batch_size)
)
)
else:
# This is not a ndarray, a CompositeTensor, or a
# CompositeTensorValue. Fail fast rather than trying to
# concatenate it.
raise RuntimeError(
"Attempted to aggregate unsupported object {}.".format(
batch_element
)
)
self.results[-1].create(batch_element)
def aggregate(self, batch_outs, batch_start=None, batch_end=None):
batch_outs = tf.__internal__.nest.flatten_up_to(
self._structure, batch_outs
)
for batch_element, result in zip(batch_outs, self.results):
result.aggregate(batch_element, batch_start, batch_end)
def finalize(self):
for result in self.results:
result.finalize()
self.results = [i.results for i in self.results]
self.results = tf.nest.pack_sequence_as(self._structure, self.results)
def get_progbar(model, count_mode, include_metrics=True):
"""Get Progbar."""
if include_metrics:
stateful_metric_names = getattr(model, "metrics_names", None)
if stateful_metric_names:
stateful_metric_names = stateful_metric_names[1:] # Exclude `loss`
else:
stateful_metric_names = None
return cbks.ProgbarLogger(
count_mode, stateful_metrics=stateful_metric_names
)
def check_num_samples(ins, batch_size=None, steps=None, steps_name="steps"):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Args:
ins: List of tensors to be fed to the TF-Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples) before declaring
`_predict_loop` finished. Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
"""
if steps is not None and batch_size is not None:
raise ValueError(
"If " + steps_name + " is set, the `batch_size` must be None."
)
if check_steps_argument(ins, steps, steps_name):
return None
if hasattr(ins[0], "shape"):
return int(ins[0].shape[0])
return None # Edge case where ins == [static_learning_phase]
def standardize_single_array(x, expected_shape=None):
"""Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1."""
if x is None:
return None
if is_composite_or_composite_value(x):
return x
if isinstance(x, int):
raise ValueError(
f"Expected an array data type but received an integer: {x}"
)
if (
x.shape is not None
and len(x.shape) == 1
and (expected_shape is None or len(expected_shape) != 1)
):
if tf.is_tensor(x):
x = tf.compat.v1.expand_dims(x, axis=1)
else:
x = np.expand_dims(x, 1)
return x
def get_composite_shape(tensor):
"""Returns the shape of the passed composite tensor."""
if isinstance(tensor, tf.compat.v1.SparseTensorValue):
# SparseTensorValues use a 'dense_shape' attribute
return tensor.dense_shape
else:
return tensor.shape
def standardize_input_data(
data, names, shapes=None, check_batch_axis=True, exception_prefix=""
):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Args:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that the batch axis of the
arrays matches the expected value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
try:
data_len = len(data)
except TypeError:
# For instance if data is `None` or a symbolic Tensor.
data_len = None
if not names:
if data_len and not isinstance(data, dict):
raise ValueError(
"Error when checking model "
+ exception_prefix
+ ": expected no data, but got:",
data,
)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
try:
data = [
data[x].values
if data[x].__class__.__name__ == "DataFrame"
else data[x]
for x in names
]
except KeyError as e:
raise ValueError(
'No data provided for "'
+ e.args[0]
+ '". Need data for each key in: '
+ str(names)
)
elif isinstance(data, (list, tuple)):
if isinstance(data[0], (list, tuple)):
data = [np.asarray(d) for d in data]
elif len(names) == 1 and isinstance(data[0], (float, int)):
data = [np.asarray(data)]
else:
data = [
x.values if x.__class__.__name__ == "DataFrame" else x
for x in data
]
else:
data = data.values if data.__class__.__name__ == "DataFrame" else data
data = [data]
if shapes is not None:
data = [
standardize_single_array(x, shape)
for (x, shape) in zip(data, shapes)
]
else:
data = [standardize_single_array(x) for x in data]
if len(data) != len(names):
if data and hasattr(data[0], "shape"):
raise ValueError(
"Error when checking model "
+ exception_prefix
+ ": the list of Numpy arrays that you are passing to "
"your model is not the size the model expected. "
"Expected to see "
+ str(len(names))
+ " array(s), "
+ "for inputs "
+ str(names)
+ " but instead got the following list of "
+ str(len(data))
+ " arrays: "
+ str(data)[:200]
+ "..."
)
elif len(names) > 1:
raise ValueError(
"Error when checking model "
+ exception_prefix
+ ": you are passing a list as input to your model, "
"but the model expects a list of "
+ str(len(names))
+ " Numpy arrays instead. The list you passed was: "
+ str(data)[:200]
)
elif len(data) == 1 and not hasattr(data[0], "shape"):
raise TypeError(
"Error when checking model "
+ exception_prefix
+ ": data should be a Numpy array, or list/dict of "
"Numpy arrays. Found: " + str(data)[:200] + "..."
)
elif len(names) == 1:
data = [np.asarray(data)]
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is not None:
if tf.is_tensor(data[i]):
tensorshape = data[i].shape
if not tensorshape:
continue
data_shape = tuple(tensorshape.as_list())
elif is_composite_or_composite_value(data[i]):
tensorshape = get_composite_shape(data[i])
data_shape = tuple(tensorshape.as_list())
else:
data_shape = data[i].shape
shape = shapes[i]
if len(data_shape) != len(shape):
raise ValueError(
"Error when checking "
+ exception_prefix
+ ": expected "
+ names[i]
+ " to have "
+ str(len(shape))
+ " dimensions, but got array with shape "
+ str(data_shape)
)
if not check_batch_axis:
data_shape = data_shape[1:]
shape = shape[1:]
for dim, ref_dim in zip(data_shape, shape):
if (
ref_dim != dim
and ref_dim is not None
and dim is not None
):
raise ValueError(
"Error when checking "
+ exception_prefix
+ ": expected "
+ names[i]
+ " to have shape "
+ str(shape)
+ " but got array with shape "
+ str(data_shape)
)
return data
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Args:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or (
isinstance(x_weight, (list, tuple)) and len(x_weight) == 0
):
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, (list, tuple)):
if len(x_weight) != len(output_names):
raise ValueError(
"Provided `"
+ weight_type
+ "` was a list of "
+ str(len(x_weight))
+ " elements, but the model has "
+ str(len(output_names))
+ " outputs. You should provide one `"
+ weight_type
+ "`array per model output."
)
return x_weight
if isinstance(x_weight, collections.abc.Mapping):
generic_utils.check_for_unexpected_keys(
weight_type, x_weight, output_names
)
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError(
"The model has multiple outputs, so `"
+ weight_type
+ "` should be either a list or a dict. Provided `"
+ weight_type
+ "` type not understood: "
+ str(x_weight)
)
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(
class_weight, output_names, "class_weight"
)
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(
sample_weight, output_names, "sample_weight"
)
def check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Args:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def is_tensor_or_composite_tensor(x):
return tf.is_tensor(x) or is_composite_or_composite_value(x)
def set_of_lengths(x):
# Returns a set with the variation between
# different shapes, with None => 0
if x is None:
return {}
else:
return set(
[
y.shape[0]
for y in x
if y is not None and not is_tensor_or_composite_tensor(y)
]
)
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError(
"All input arrays (x) should have "
"the same number of samples. Got array shapes: "
+ str([x.shape for x in inputs])
)
if len(set_y) > 1:
raise ValueError(
"All target arrays (y) should have "
"the same number of samples. Got array shapes: "
+ str([y.shape for y in targets])
)
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError(
"Input arrays should have "
"the same number of samples as target arrays. "
"Found "
+ str(list(set_x)[0])
+ " input samples and "
+ str(list(set_y)[0])
+ " target samples."
)
if len(set_w) > 1:
raise ValueError(
"All sample_weight arrays should have "
"the same number of samples. Got array shapes: "
+ str([w.shape for w in weights])
)
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError(
"Sample_weight arrays should have "
"the same number of samples as target arrays. Got "
+ str(list(set_y)[0])
+ " input samples and "
+ str(list(set_w)[0])
+ " target samples."
)
def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly. This check
is purely for UX purposes.
Args:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_loss_fns = {
losses.mean_squared_error,
losses.binary_crossentropy,
losses.categorical_crossentropy,
}
key_loss_classes = (
losses.MeanSquaredError,
losses.BinaryCrossentropy,
losses.CategoricalCrossentropy,
)
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if y is None or loss is None or tf.is_tensor(y):
continue
if losses.is_categorical_crossentropy(loss):
if y.shape[-1] == 1:
raise ValueError(
"You are passing a target array of shape "
+ str(y.shape)
+ " while using as loss `categorical_crossentropy`. "
"`categorical_crossentropy` expects "
"targets to be binary matrices (1s and 0s) "
"of shape (samples, classes). "
"If your targets are integer classes, "
"you can convert them to the expected format via:\n"
"```\n"
"from tf_keras.utils import to_categorical\n"
"y_binary = to_categorical(y_int)\n"
"```\n"
"\n"
"Alternatively, you can use the loss function "
"`sparse_categorical_crossentropy` instead, "
"which does expect integer targets."
)
is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)
if isinstance(loss, key_loss_classes) or (
is_loss_wrapper and (loss.fn in key_loss_fns)
):
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
loss_name = loss.name
if loss_name is None:
loss_type = loss.fn if is_loss_wrapper else type(loss)
loss_name = loss_type.__name__
raise ValueError(
"A target array with shape "
+ str(y.shape)
+ " was passed for an output of shape "
+ str(shape)
+ " while using as loss `"
+ loss_name
+ "`. "
"This loss expects targets to have the same shape "
"as the output."
)
def collect_per_output_metric_info(
metrics,
output_names,
output_shapes,
loss_fns,
from_serialized=False,
is_weighted=False,
):
"""Maps metric names and functions to model outputs.
Args:
metrics: a list or a list of lists or a dict of metric functions.
output_names: a list of the names (strings) of model outputs.
output_shapes: a list of the shapes (strings) of model outputs.
loss_fns: a list of the loss functions corresponding to the model
outputs.
from_serialized: whether the model the metrics are being sourced from is
being initialized from a serialized format.
is_weighted: Boolean indicating whether the given metrics are weighted.
Returns:
A list (one entry per model output) of dicts.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like: `[{
'acc': binary_accuracy(),
'ce': binary_crossentropy(),
}, {
'acc': binary_accuracy(),
}]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [{} for _ in output_names]
if isinstance(metrics, list):
any_sub_list = any(isinstance(m, list) for m in metrics)
if any_sub_list:
if len(metrics) != len(output_names):
raise ValueError(
"When passing a list of lists as `metrics`, "
"it should have one entry per model output. "
"The model has "
+ str(len(output_names))
+ " outputs, but you passed metrics="
+ str(metrics)
)
# User has provided a list of len = len(outputs).
nested_metrics = [generic_utils.to_list(m) for m in metrics]
else:
# If it is a single list we then apply all metrics to all outputs.
if len(output_names) > 1:
nested_metrics = []
for _ in output_names:
nested_metrics.append(
[metrics_module.clone_metric(m) for m in metrics]
)
else:
nested_metrics = [metrics]
elif isinstance(metrics, collections.abc.Mapping):
generic_utils.check_for_unexpected_keys(
"metrics", metrics, output_names
)
nested_metrics = []
for name in output_names:
output_metrics = generic_utils.to_list(metrics.get(name, []))
nested_metrics.append(output_metrics)
else:
raise TypeError(
"Type of `metrics` argument not understood. "
"Expected a list or dictionary, found: " + str(metrics)
)
per_output_metrics = []
for i, metrics in enumerate(nested_metrics):
metrics_dict = collections.OrderedDict()
for metric in metrics:
metric_name = get_metric_name(metric, is_weighted)
metric_fn = get_metric_function(
metric, output_shape=output_shapes[i], loss_fn=loss_fns[i]
)
metric_fn._from_serialized = from_serialized
# If the metric function is not stateful, we create a stateful
# version.
if not isinstance(metric_fn, metrics_module.Metric):
metric_fn = metrics_module.MeanMetricWrapper(
metric_fn, name=metric_name
)
# If the metric is being revived from something stateless, such
# as a string (e.g. "accuracy"), we may need to later reapply
# transformations such as renaming.
metric_fn._from_serialized = False
metrics_dict[metric_name] = metric_fn
per_output_metrics.append(metrics_dict)
return per_output_metrics
def batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Args:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size :]
index_array = index_array[: batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def standardize_weights(
y, sample_weight=None, class_weight=None, sample_weight_mode=None
):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied.
Args:
y: Numpy array or Tensor of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`. `"temporal"`
indicated that we expect 2D weight data that will be applied to the
last 2 dimensions of the targets (i.e. we are weighting timesteps, not
samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
# Iterator may return sample_weight as 1-tuple
if isinstance(sample_weight, tuple):
sample_weight = sample_weight[0]
if sample_weight_mode is not None and sample_weight_mode != "samplewise":
if sample_weight_mode != "temporal":
raise ValueError(
'"sample_weight_mode should be None or "temporal". Found: '
+ str(sample_weight_mode)
)
if len(y.shape) < 3:
raise ValueError(
"Found a sample_weight array for an input with shape "
+ str(y.shape)
+ ". "
"Timestep-wise sample weighting (use of "
'sample_weight_mode="temporal") is restricted to '
"outputs that are at least 3D, i.e. that have "
"a time dimension."
)
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError(
"Found a sample_weight array with shape "
+ str(sample_weight.shape)
+ ". "
"In order to use timestep-wise sample weighting, "
"you should pass a 2D sample_weight array."
)
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError(
"Found a sample_weight array with shape {}. In order to "
"use timestep-wise sample weights, you should specify "
'sample_weight_mode="temporal" in compile(); founssd "{}" '
"instead. If you just mean to use sample-wise weights, "
"make sure your sample_weight array is 1D.".format(
sample_weight.shape, sample_weight_mode
)
)
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError(
"Found a sample_weight with shape"
+ str(sample_weight.shape)
+ ".Expected sample_weight with rank less than or equal to "
+ str(len(y.shape))
)
if (
not tf.is_tensor(sample_weight)
and y.shape[: sample_weight.ndim] != sample_weight.shape
):
raise ValueError(
"Found a sample_weight array with shape "
+ str(sample_weight.shape)
+ " for an input with shape "
+ str(y.shape)
+ ". sample_weight cannot be broadcast."
)
# Class weights applied per-sample.
class_sample_weight = None
if isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError(
"`class_weight` not supported for 3+ dimensional targets."
)
if tf.is_tensor(y):
# Few classes are expected, so densifying is reasonable.
keys = np.array(sorted(class_weight.keys()))
values = np.array([class_weight[i] for i in keys])
weight_vector = np.zeros(np.max(keys) + 1)
weight_vector[:] = np.nan
weight_vector[keys] = values
y_classes = tf.__internal__.smart_cond.smart_cond(
len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1,
lambda: backend.argmax(y, axis=1),
lambda: tf.cast(backend.reshape(y, (-1,)), tf.int64),
)
class_sample_weight = tf.compat.v1.gather(weight_vector, y_classes)
tf.debugging.check_numerics(
class_sample_weight,
"Invalid classes or class weights detected. NaN values "
"indicate that an appropriate class weight could not be "
"determined.",
)
class_sample_weight = tf.cast(class_sample_weight, backend.floatx())
if sample_weight is not None:
sample_weight = tf.cast(
tf.convert_to_tensor(sample_weight), backend.floatx()
)
else:
y_classes = y
if len(y.shape) == 2:
if y.shape[1] > 1:
y_classes = np.argmax(y, axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
class_sample_weight = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight]
)
if len(class_sample_weight) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError(
"`class_weight` must contain all classes in the data."
" The classes %s exist in the data but not in "
"`class_weight`."
% (existing_classes - existing_class_weight)
)
if class_sample_weight is not None and sample_weight is not None:
# Multiply weights if both are provided.
return class_sample_weight * sample_weight
if sample_weight is not None:
return sample_weight
if class_sample_weight is not None:
return class_sample_weight
return None
def has_symbolic_tensors(ls):
if tf.executing_eagerly():
return False
return has_tensors(ls)
def has_tensors(ls):
"""Returns true if `ls` contains tensors."""
# Note: at some point in time ragged tensors didn't count as tensors, so
# this returned false for ragged tensors. Making this return true fails some
# tests which would then require a steps_per_epoch argument.
if isinstance(ls, (list, tuple)):
return any(
tf.is_tensor(v) and not isinstance(v, tf.RaggedTensor) for v in ls
)
if isinstance(ls, dict):
return any(
tf.is_tensor(v) and not isinstance(v, tf.RaggedTensor)
for _, v in ls.items()
)
return tf.is_tensor(ls) and not isinstance(ls, tf.RaggedTensor)
def get_metric_name(metric, weighted=False):
"""Returns the name corresponding to the given metric input.
Args:
metric: Metric function name or reference.
weighted: Boolean indicating if the given metric is weighted.
Returns:
The metric name.
"""
if tf.__internal__.tf2.enabled():
# We keep the string that the user has set in compile as the metric
# name.
if isinstance(metric, str):
return metric
metric = metrics_module.get(metric)
return metric.name if hasattr(metric, "name") else metric.__name__
else:
metric_name_prefix = "weighted_" if weighted else ""
if metric in ("accuracy", "acc", "crossentropy", "ce"):
if metric in ("accuracy", "acc"):
suffix = "acc"
elif metric in ("crossentropy", "ce"):
suffix = "ce"
else:
metric_fn = metrics_module.get(metric)
# Get metric name as string
if hasattr(metric_fn, "name"):
suffix = metric_fn.name
else:
suffix = metric_fn.__name__
metric_name = metric_name_prefix + suffix
return metric_name
def get_metric_function(metric, output_shape=None, loss_fn=None):
"""Returns the metric function corresponding to the given metric input.
Args:
metric: Metric function name or reference.
output_shape: The shape of the output that this metric will be
calculated for.
loss_fn: The loss function used.
Returns:
The metric function.
"""
if metric not in ["accuracy", "acc", "crossentropy", "ce"]:
return metrics_module.get(metric)
is_sparse_categorical_crossentropy = isinstance(
loss_fn, losses.SparseCategoricalCrossentropy
) or (
isinstance(loss_fn, losses.LossFunctionWrapper)
and loss_fn.fn == losses.sparse_categorical_crossentropy
)
is_binary_crossentropy = isinstance(loss_fn, losses.BinaryCrossentropy) or (
isinstance(loss_fn, losses.LossFunctionWrapper)
and loss_fn.fn == losses.binary_crossentropy
)
if metric in ["accuracy", "acc"]:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_accuracy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_accuracy
# If the output_shape[-1] is not 1, then we know output is
# `categorical`. We assume it is sparse categorical only if loss is
# explicitly given as sparse categorical crossentropy loss.
return metrics_module.categorical_accuracy
else:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_crossentropy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_crossentropy
return metrics_module.categorical_crossentropy
def call_metric_function(
metric_fn, y_true, y_pred=None, weights=None, mask=None
):
"""Invokes metric function and returns the metric result tensor."""
if mask is not None:
mask = tf.cast(mask, y_pred.dtype)
if weights is None:
# Use mask as sample weight.
weights = mask
else:
# Update dimensions of weights to match with mask.
weights = tf.cast(weights, dtype=y_pred.dtype)
mask, _, weights = losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=weights
)
weights *= mask
if y_pred is not None:
return metric_fn(y_true, y_pred, sample_weight=weights)
# `Mean` metric only takes a single value.
return metric_fn(y_true, sample_weight=weights)
def get_loss_function(loss):
"""Returns the loss corresponding to the loss input in `compile` API."""
if loss is None or isinstance(loss, losses.Loss):
return loss
if tf_inspect.isclass(loss) and issubclass(loss, losses.Loss):
# It is not safe to assume that the loss takes no constructor arguments.
raise ValueError(
"Received uninstantiated Loss class: {}\n"
"Please call loss classes "
"before passing them to Model.compile.".format(loss)
)
# Deserialize loss configuration, if needed.
if isinstance(loss, collections.abc.Mapping):
loss = losses.get(loss)
# Custom callable class.
if callable(loss) and not hasattr(loss, "__name__"):
return loss
# Wrap loss function with signature `(y_true, y_pred, **kwargs)`
# in `LossFunctionWrapper` class.
loss_fn = losses.get(loss)
# For losses which are given as strings/functions in the compile API,
# we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`
# (both in distribution strategy context and otherwise).
return losses.LossFunctionWrapper(
loss_fn,
name=loss_fn.__name__,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
)
def validate_dataset_input(x, y, sample_weight, validation_split=None):
"""Validates user input arguments when a dataset iterator is passed.
Args:
x: Input data. A `tf.data` dataset or iterator.
y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).
Expected to be `None` when `x` is a dataset iterator.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`. Expected to be `None` when
`x` is a dataset iterator
validation_split: Float between 0 and 1. Fraction of the training data to
be used as validation data. Expected to be `None` when `x` is a dataset
iterator.
Raises:
ValueError: if argument `y` or `sample_weight` or `validation_split` are
provided by user.
"""
if y is not None:
raise ValueError(
"You passed a dataset or dataset iterator (%s) as "
"input `x` to your model. In that case, you should "
"not specify a target (`y`) argument, since the dataset "
"or dataset iterator generates both input data and "
"target data. "
"Received: %s" % (x, y)
)
if sample_weight is not None:
raise ValueError(
"`sample_weight` argument is not supported when input "
"`x` is a dataset or a dataset iterator. Instead, you"
"can provide sample_weight as the third element of your"
"dataset, i.e. (inputs, targets, sample_weight). "
"Received: x=%s, sample_weight=%s" % (x, sample_weight)
)
if validation_split is not None and validation_split != 0.0:
raise ValueError(
"`validation_split` argument is not supported when "
"input `x` is a dataset or a dataset iterator. "
"Received: x=%s, validation_split=%f" % (x, validation_split)
)
def validate_input_types(inp, orig_inp, allow_dict=True, field_name="inputs"):
"""Helper function to validate either inputs or targets."""
if isinstance(inp, (list, tuple)):
if not all(isinstance(v, np.ndarray) or tf.is_tensor(v) for v in inp):
raise ValueError(
"Please provide as model inputs either a single array or a "
f"list of arrays. You passed: {field_name}={str(orig_inp)}"
)
elif isinstance(inp, dict):
if not allow_dict:
raise ValueError(
f"You cannot pass a dictionary as model {field_name}."
)
elif not isinstance(inp, np.ndarray) and not tf.is_tensor(inp):
raise ValueError(
"Please provide as model inputs either a single array or a list of "
"arrays. You passed: {}={}".format(field_name, orig_inp)
)
def check_generator_arguments(
y=None, sample_weight=None, validation_split=None
):
"""Validates arguments passed when using a generator."""
if y is not None:
raise ValueError(
"`y` argument is not supported when data is"
"a generator or Sequence instance. Instead pass targets"
" as the second element of the generator."
)
if sample_weight is not None:
raise ValueError(
"`sample_weight` argument is not supported when data is"
"a generator or Sequence instance. Instead pass sample"
" weights as the third element of the generator."
)
if validation_split:
raise ValueError(
"If your data is in the form of a Python generator, "
"you cannot use `validation_split`."
)
def check_steps_argument(input_data, steps, steps_name):
"""Validates `steps` argument based on input data's type.
The cases when `steps` value must be provided are when
1. input data passed is an iterator.
2. model was built on top of symbolic tensors, input data is not
required and is `None`.
3. input data passed is a symbolic tensor.
Args:
input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or
tf.data.Dataset iterator or `None`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
steps_name: The public API's parameter name for `steps`.
Returns:
boolean, True if `steps` argument is required, else False.
Raises:
ValueError: if `steps` argument is required for given input data type
but not provided.
"""
is_x_iterator = isinstance(
input_data, (tf.compat.v1.data.Iterator, tf.data.Iterator)
)
if (
input_data is None
or is_x_iterator
or has_symbolic_tensors(input_data)
or (isinstance(input_data, list) and not input_data)
):
if steps is None:
input_type_str = (
"a Dataset iterator" if is_x_iterator else "data tensors"
)
raise ValueError(
"When using {input_type} as input to a model, you should"
" specify the `{steps_name}` argument.".format(
input_type=input_type_str, steps_name=steps_name
)
)
return True
if isinstance(input_data, (tf.compat.v1.data.Dataset, tf.data.Dataset)):
return True
if steps is not None:
list_types = (np.ndarray, list, tuple)
if isinstance(input_data, list_types) or (
isinstance(input_data, dict)
and any(isinstance(v, list_types) for v in input_data.values())
):
logging.warning(
"When passing input data as arrays, do not specify "
"`steps_per_epoch`/`steps` argument. "
"Please use `batch_size` instead."
)
return False
def cast_single_tensor(x, dtype=None):
if isinstance(x, np.ndarray):
x = tf.convert_to_tensor(x)
dtype = dtype or backend.floatx()
if x.dtype.is_floating:
return tf.cast(x, dtype=dtype)
return x
def cast_if_floating_dtype_and_mismatch(targets, outputs):
"""Returns target data tensors using correct datatype.
Checks that each target and output pair are the same datatype. If not, casts
the target to the output's datatype.
Args:
targets: tensor or list of targets.
outputs: tensor or list of outputs.
Returns:
Targets in appropriate datatype.
"""
if tf.is_tensor(targets):
# There is one target, so output[0] should be the only output.
return cast_single_tensor(targets, dtype=outputs[0].dtype)
new_targets = []
for target, out in zip(targets, outputs):
if isinstance(target, np.ndarray):
target = tf.convert_to_tensor(target)
if target.dtype != out.dtype:
new_targets.append(cast_single_tensor(target, dtype=out.dtype))
else:
new_targets.append(target)
return new_targets
def cast_if_floating_dtype(x, dtype=None):
"""Casts the given data tensors to the default floating point type.
Casts only if the input is already a floating point type.
Args:
x: tensor or list/tuple of tensors.
dtype: The dtype to which Tensors should be cast.
Returns:
Converted input.
"""
return tf.nest.map_structure(
functools.partial(cast_single_tensor, dtype=dtype), x
)
def cast_to_model_input_dtypes(x, model):
"""Casts the given data tensors to the dtypes of the model inputs.
Args:
x: tensor or list/tuple of tensors.
model: The model.
Returns:
Converted input. Each tensor is casted to the corresponding input in
`model.inputs`.
"""
input_dtypes = tf.nest.map_structure(lambda t: t.dtype, model.inputs)
return tf.nest.map_structure(tf.cast, x, input_dtypes)
def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):
"""Prepares sample weight modes for the model.
Args:
training_endpoints: List of model _TrainingEndpoints.
sample_weight_mode: sample weight mode user input passed from compile API.
Raises:
ValueError: In case of invalid `sample_weight_mode` input.
"""
if isinstance(sample_weight_mode, collections.abc.Mapping):
generic_utils.check_for_unexpected_keys(
"sample_weight_mode",
sample_weight_mode,
[e.output_name for e in training_endpoints],
)
for end_point in training_endpoints:
if not end_point.should_skip_target_weights():
if end_point.output_name not in sample_weight_mode:
raise ValueError(
"Output "
+ end_point.output_name
+ "missing from `_sample_weight_modes` dictionary"
)
else:
end_point.sample_weight_mode = sample_weight_mode.get(
end_point.output_name
)
elif isinstance(sample_weight_mode, (list, tuple)):
if len(sample_weight_mode) != len(training_endpoints):
raise ValueError(
"When passing a list as sample_weight_mode, "
"it should have one entry per model output. "
"The model has "
+ str(len(training_endpoints))
+ " outputs, but you passed "
+ str(len(sample_weight_mode))
+ "_sample_weight_modes."
)
for mode, endpoint in zip(sample_weight_mode, training_endpoints):
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = mode
else:
for endpoint in training_endpoints:
if not endpoint.should_skip_target_weights():
endpoint.sample_weight_mode = sample_weight_mode
def prepare_loss_functions(loss, output_names):
"""Converts loss to a list of loss functions.
Args:
loss: String (name of objective function), objective function or
`tf.keras.losses.Loss` instance. See `tf.keras.losses`.
If the model has multiple
outputs, you can use a different loss on each output by passing a
dictionary or a list of losses. The loss value that will be minimized
by the model will then be the sum of all individual losses.
output_names: List of model output names.
Returns:
A list of loss objective functions.
Raises:
ValueError: If loss is a dict with keys not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if isinstance(loss, collections.abc.Mapping):
generic_utils.check_for_unexpected_keys("loss", loss, output_names)
loss_functions = []
for name in output_names:
if name not in loss:
logging.warning(
"Output {0} missing from loss dictionary. We assume "
"this was done on purpose. The fit and evaluate APIs will "
f"not be expecting any data to be passed to {name}."
)
loss_functions.append(get_loss_function(loss.get(name, None)))
elif isinstance(loss, str):
loss_functions = [get_loss_function(loss) for _ in output_names]
elif isinstance(loss, collections.abc.Sequence):
if len(loss) != len(output_names):
raise ValueError(
"When passing a list as loss, it should have one entry "
"per model outputs. The model has {} outputs, but you "
"passed loss={}".format(len(output_names), loss)
)
loss_functions = tf.nest.map_structure(get_loss_function, loss)
else:
loss_functions = [
get_loss_function(loss) for _ in range(len(output_names))
]
return loss_functions
def prepare_loss_weights(training_endpoints, loss_weights=None):
"""Converts loss weights to a list of loss weights.
The result loss weights will be populated on the training endpoint.
Args:
training_endpoints: List of model training endpoints.
loss_weights: Optional list or dictionary specifying scalar coefficients
(Python floats) to weight the loss contributions of different model
outputs. The loss value that will be minimized by the model will then
be the *weighted sum* of all individual losses, weighted by the
`loss_weights` coefficients. If a list, it is expected to have a 1:1
mapping to the model's outputs. If a dict, it is expected to map
output names (strings) to scalar coefficients.
Raises:
ValueError: If loss weight is a dict with key not in model output names,
or if loss is a list with len not equal to model outputs.
"""
if loss_weights is None:
for e in training_endpoints:
e.loss_weight = 1.0
elif isinstance(loss_weights, collections.abc.Mapping):
generic_utils.check_for_unexpected_keys(
"loss_weights",
loss_weights,
[e.output_name for e in training_endpoints],
)
for e in training_endpoints:
e.loss_weight = loss_weights.get(e.output_name, 1.0)
elif isinstance(loss_weights, list):
if len(loss_weights) != len(training_endpoints):
raise ValueError(
"When passing a list as loss_weights, "
"it should have one entry per model output. "
"The model has "
+ str(len(training_endpoints))
+ " outputs, but you passed loss_weights="
+ str(loss_weights)
)
for w, e in zip(loss_weights, training_endpoints):
e.loss_weight = w
else:
raise TypeError(
"Could not interpret loss_weights argument: "
+ str(loss_weights)
+ " - expected a list of dicts."
)
# TODO(rohanj): This is a hack to get around not depending on feature_column and
# create a cyclical dependency. Figure out a cleaner solution
def is_feature_layer(layer):
"""Returns whether `layer` is a FeatureLayer or not."""
return getattr(layer, "_is_feature_layer", False)
def is_eager_dataset_or_iterator(data):
return tf.executing_eagerly() and isinstance(
data, (tf.compat.v1.data.Dataset, tf.data.Dataset, tf.data.Iterator)
)
def get_dataset_graph_def(dataset):
if tf.executing_eagerly():
graph_def_str = dataset._as_serialized_graph().numpy()
else:
graph_def_str = backend.get_value(dataset._as_serialized_graph())
return tf.compat.v1.GraphDef().FromString(graph_def_str)
def verify_dataset_shuffled(x):
"""Verifies that the dataset is shuffled.
Args:
x: Dataset passed as an input to the model.
Returns:
boolean, whether the input dataset is shuffled or not.
"""
assert isinstance(x, tf.data.Dataset)
graph_def = get_dataset_graph_def(x)
for node in graph_def.node:
if node.op.startswith("ShuffleDataset"):
return True
# Also check graph_def.library.function for ds.interleave or ds.flat_map
for function in graph_def.library.function:
for node in function.node_def:
if node.op.startswith("ShuffleDataset"):
return True
logging.warning(
"Expected a shuffled dataset but input dataset `x` is "
"not shuffled. Please invoke `shuffle()` on input dataset."
)
return False
def is_dataset_or_iterator(data):
return isinstance(
data,
(
tf.compat.v1.data.Dataset,
tf.data.Dataset,
tf.compat.v1.data.Iterator,
tf.data.Iterator,
),
)
def get_iterator(dataset):
"""Create and initialize an iterator from a dataset."""
if tf.executing_eagerly():
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
else:
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
initialize_iterator(iterator)
return iterator
def initialize_iterator(iterator):
if not tf.executing_eagerly():
init_op = iterator.initializer
backend.get_session((init_op,)).run(init_op)
def extract_tensors_from_dataset(dataset):
"""Extract tuple of tensors `inputs, targets, sample_weight` from a dataset.
Args:
dataset: Dataset instance.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
iterator = get_iterator(dataset)
inputs, targets, sample_weight = unpack_iterator_input(iterator)
return inputs, targets, sample_weight
def unpack_iterator_input(iterator):
"""Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.
Args:
iterator: Instance of a dataset iterator.
Returns:
Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
"""
try:
next_element = iterator.get_next()
except tf.errors.OutOfRangeError:
raise RuntimeError(
"Your dataset iterator ran out of data; "
"Make sure that your dataset can generate "
"required number of samples."
)
if isinstance(next_element, (list, tuple)):
if len(next_element) not in [2, 3]:
raise ValueError(
"Please provide model inputs as a list or tuple of 2 or 3 "
"elements: (input, target) or (input, target, sample_weights) "
"Received %s" % next_element
)
if len(next_element) == 2:
x, y = next_element
weights = None
else:
x, y, weights = next_element
else:
x = next_element
y = None
weights = None
return x, y, weights
def infer_steps_for_dataset(
model, dataset, steps, epochs=1, steps_name="steps"
):
"""Infers steps_per_epoch needed to loop through a dataset.
Args:
model: TF-Keras model instance.
dataset: Input data of type tf.data.Dataset.
steps: Number of steps to draw from the dataset (may be None if
unknown).
epochs: Number of times to iterate over the dataset.
steps_name: The string name of the steps argument, either `steps`,
`validation_steps`, or `steps_per_epoch`. Only used for error message
formatting.
Returns:
Integer or `None`. Inferred number of steps to loop through the dataset.
`None` is returned if 1) the size of the dataset is unknown and `steps`
was not specified, or 2) this is multi-worker training and auto sharding
is enabled.
Raises:
ValueError: In case of invalid argument values.
"""
assert isinstance(dataset, tf.data.Dataset)
if model._in_multi_worker_mode() and (
dataset.options().experimental_distribute.auto_shard_policy
!= tf.data.experimental.AutoShardPolicy.OFF
):
# If the dataset would be auto-sharded, we should not infer a local
# steps_per_epoch due to the possible imbalanced sharding between
# workers.
return None
size = backend.get_value(tf.data.experimental.cardinality(dataset))
if size == tf.data.experimental.INFINITE_CARDINALITY and steps is None:
raise ValueError(
"When passing an infinitely repeating dataset, you "
"must specify the `%s` argument." % (steps_name,)
)
if size >= 0:
if steps is not None and steps * epochs > size:
if epochs > 1:
raise ValueError(
"The dataset you passed contains %s batches, but you "
"passed `epochs=%s` and `%s=%s`, which is a total of "
"%s steps. We cannot draw that many steps from this "
"dataset. We suggest to set `%s=%s`."
% (
size,
epochs,
steps_name,
steps,
steps * epochs,
steps_name,
size // epochs,
)
)
else:
raise ValueError(
"The dataset you passed contains %s batches, but you "
"passed `%s=%s`. We cannot draw that many steps from "
"this dataset. We suggest to set `%s=%s`."
% (size, steps_name, steps, steps_name, size)
)
if steps is None:
if size >= 0:
return size
return None
return steps
class ModelInputs:
"""Encapsulates model inputs.
Allows for transforming model inputs while keeping the same structure.
"""
def __init__(self, inputs):
self._inputs = inputs
self._is_dict = isinstance(self._inputs, dict)
self._is_single_input = not isinstance(
self._inputs, (list, tuple, dict)
)
self._flattened_inputs = []
self._input_names = []
if self._is_dict:
for k in sorted(self._inputs.keys()):
self._flattened_inputs.append(self._inputs[k])
self._input_names.append(k)
else:
self._flattened_inputs = tf.nest.flatten(self._inputs)
self._input_names = [
"input_%d" % (i + 1) for i in range(len(self._flattened_inputs))
]
def get_input_names(self):
"""Returns keys to name inputs by.
In case inputs provided were a list, tuple or single entry, we make up a
key 'input_%d'. For dictionary case, we return a sorted list of keys.
"""
return self._input_names
def get_symbolic_inputs(self, return_single_as_list=False):
"""Returns inputs to be set as self.inputs for a model."""
# TODO(karmel): There is a side-effect here where what you get
# with as_list and as_dict depends on whether you have called this
# method first, since it modifies in place.
for i, (k, v) in enumerate(
zip(self._input_names, self._flattened_inputs)
):
if isinstance(v, (list, float, int)):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, np.ndarray):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call
# `model._set_inputs(placeholders)` to specify custom
# placeholders if the need arises.
shape = (None,) + tuple(v.shape[1:])
if shape == (None,):
shape = (None, 1)
dtype = tf.as_dtype(v.dtype)
if dtype.is_floating:
dtype = backend.floatx()
v = backend.placeholder(shape=shape, name=k, dtype=dtype)
elif isinstance(v, tf.TensorSpec):
shape = (None,) + tuple(v.shape.as_list()[1:])
if shape == (None,):
shape = (None, 1)
v = backend.placeholder(shape=shape, name=k, dtype=v.dtype)
self._flattened_inputs[i] = v
if self._is_dict:
return dict(zip(self._input_names, self._flattened_inputs))
if self._is_single_input and not return_single_as_list:
return self._flattened_inputs[0]
return self._flattened_inputs
def as_dict(self):
"""An iterable over a dictionary version of inputs."""
for k, v in zip(self._input_names, self._flattened_inputs):
yield k, v
def as_list(self):
"""Returning the inputs as a list."""
return self._flattened_inputs
# Allow use of methods not exposed to the user.
def generic_output_names(outputs_list):
return ["output_%d" % (i + 1) for i in range(len(outputs_list))]
def should_run_validation(validation_freq, epoch):
"""Checks if validation should be run this epoch.
Args:
validation_freq: Integer or list. If an integer, specifies how many
training epochs to run before a new validation run is performed. If a
list, specifies the epochs on which to run validation.
epoch: Integer, the number of the training epoch just completed.
Returns:
Bool, True if validation should be run.
Raises:
ValueError: if `validation_freq` is an Integer and less than 1, or if
it is neither an Integer nor a Sequence.
"""
# `epoch` is 0-indexed internally but 1-indexed in the public API.
one_indexed_epoch = epoch + 1
if isinstance(validation_freq, int):
if validation_freq < 1:
raise ValueError("`validation_freq` can not be less than 1.")
return one_indexed_epoch % validation_freq == 0
if not isinstance(validation_freq, collections.abc.Container):
raise ValueError(
"`validation_freq` must be an Integer or "
"`collections.abc.Container` (e.g. list, tuple, etc.)"
)
return one_indexed_epoch in validation_freq
def split_training_and_validation_data(x, y, sample_weights, validation_split):
"""Split input data into train/eval section based on validation_split."""
if has_symbolic_tensors(x):
raise ValueError(
"If your data is in the form of symbolic tensors, "
"you cannot use `validation_split`."
)
if hasattr(x[0], "shape"):
split_at = int(x[0].shape[0] * (1.0 - validation_split))
else:
split_at = int(len(x[0]) * (1.0 - validation_split))
x, val_x = (
generic_utils.slice_arrays(x, 0, split_at),
generic_utils.slice_arrays(x, split_at),
)
y, val_y = (
generic_utils.slice_arrays(y, 0, split_at),
generic_utils.slice_arrays(y, split_at),
)
if sample_weights:
sample_weights, val_sample_weights = (
generic_utils.slice_arrays(sample_weights, 0, split_at),
generic_utils.slice_arrays(sample_weights, split_at),
)
else:
val_sample_weights = None
return x, y, sample_weights, val_x, val_y, val_sample_weights
def unpack_validation_data(validation_data, raise_if_ambiguous=True):
"""Unpack validation data based input type.
The validation data is not touched if its dataset or dataset iterator.
For other type of input (Numpy or tensor), it will be unpacked into tuple of
3 which is x, y and sample weights.
Args:
validation_data: dataset, dataset iterator, or numpy, tensor tuple.
raise_if_ambiguous: boolean on whether to fail if validation_data cannot
be parsed. Otherwise simply return validation_data, None, None and defer
the decision to the caller.
Returns:
tuple of 3, (x, y, sample_weights) for numpy and tensor input.
"""
if isinstance(
validation_data,
(
tf.compat.v1.data.Iterator,
tf.data.Iterator,
tf.data.Dataset,
data_utils.Sequence,
),
) or not hasattr(validation_data, "__len__"):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
try:
(
val_x,
val_y,
) = validation_data
val_sample_weight = None
except ValueError:
val_x, val_y, val_sample_weight = validation_data, None, None
elif len(validation_data) == 3:
try:
(
val_x,
val_y,
val_sample_weight,
) = validation_data
except ValueError:
val_x, val_y, val_sample_weight = validation_data, None, None
else:
if raise_if_ambiguous:
raise ValueError(
"When passing a `validation_data` argument, "
"it must contain either 2 items (x_val, y_val), "
"or 3 items (x_val, y_val, val_sample_weights), "
"or alternatively it could be a dataset or a "
"dataset or a dataset iterator. "
"However we received `validation_data=%s`" % validation_data
)
val_x, val_y, val_sample_weight = validation_data, None, None
return val_x, val_y, val_sample_weight
class TrainingLoop:
"""TrainingLoop is a wrapper class around the training logic.
This class is trying to encapsulate the different logic of fit/eval/predict
with regard to different data input and model condition.
Note that TrainingLoop is stateless, which means it doesn't contain any
internal field and can be reused with different model and inputs.
"""
def fit(
self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs,
):
"""Train the model with the inputs and targets."""
raise NotImplementedError()
def evaluate(
self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs,
):
"""Returns the loss value & metrics values for the model in test
mode."""
raise NotImplementedError()
def predict(
self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs,
):
raise NotImplementedError()
| tf-keras/tf_keras/engine/training_utils_v1.py/0 | {
"file_path": "tf-keras/tf_keras/engine/training_utils_v1.py",
"repo_id": "tf-keras",
"token_count": 36707
} | 204 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn for sequential input.
NOTE: This API is a work in progress and will likely be changing frequently.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.feature_column import base_feature_layer as kfc
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.experimental.SequenceFeatures")
class SequenceFeatures(kfc._BaseFeaturesLayer):
"""A layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
import tensorflow as tf
# Behavior of some cells or feature columns may depend on whether we are in
# training or inference mode, e.g. applying dropout.
training = True
rating = tf.feature_column.sequence_numeric_column('rating')
watches = tf.feature_column.sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = tf.feature_column.embedding_column(watches,
dimension=10)
columns = [rating, watches_embedding]
features = {
'rating': tf.sparse.from_dense([[1.0,1.1, 0, 0, 0],
[2.0,2.1,2.2, 2.3, 2.5]]),
'watches': tf.sparse.from_dense([[2, 85, 0, 0, 0],[33,78, 2, 73, 1]])
}
sequence_input_layer = tf.keras.experimental.SequenceFeatures(columns)
sequence_input, sequence_length = sequence_input_layer(
features, training=training)
sequence_length_mask = tf.sequence_mask(sequence_length)
hidden_size = 32
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
"""
def __init__(self, feature_columns, trainable=True, name=None, **kwargs):
""" "Constructs a SequenceFeatures layer.
Args:
feature_columns: An iterable of dense sequence columns. Valid columns
are
- `embedding_column` that wraps a
`sequence_categorical_column_with_*`
- `sequence_numeric_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the SequenceFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: If any of the `feature_columns` is not a
`SequenceDenseColumn`.
"""
super().__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
expected_column_type=tf.__internal__.feature_column.SequenceDenseColumn, # noqa: E501
**kwargs
)
@property
def _is_feature_layer(self):
return True
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], input_shape[1], total_elements)
def call(self, features, training=None):
"""Returns sequence input corresponding to the `feature_columns`.
Args:
features: A dict mapping keys to tensors.
training: Python boolean or None, indicating whether to the layer is
being run in training mode. This argument is passed to the call
method of any `FeatureColumn` that takes a `training` argument. For
example, if a `FeatureColumn` performed dropout, the column could
expose a `training` argument to control whether the dropout should
be applied. If `None`, becomes `tf.keras.backend.learning_phase()`.
Defaults to `None`.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could
differ from batch to batch. `D` is the sum of `num_elements` for
all `feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The
sequence length for each example.
Raises:
ValueError: If features are not a dictionary.
"""
if not isinstance(features, dict):
raise ValueError(
"We expected a dictionary here. Instead we got: ", features
)
if training is None:
training = backend.learning_phase()
transformation_cache = (
tf.__internal__.feature_column.FeatureTransformationCache(features)
)
output_tensors = []
sequence_lengths = []
for column in self._feature_columns:
with backend.name_scope(column.name):
try:
(
dense_tensor,
sequence_length,
) = column.get_sequence_dense_tensor(
transformation_cache,
self._state_manager,
training=training,
)
except TypeError:
(
dense_tensor,
sequence_length,
) = column.get_sequence_dense_tensor(
transformation_cache, self._state_manager
)
# Flattens the final dimension to produce a 3D Tensor.
output_tensors.append(
self._process_dense_tensor(column, dense_tensor)
)
sequence_lengths.append(sequence_length)
# Check and process sequence lengths.
kfc._verify_static_batch_size_equality(
sequence_lengths, self._feature_columns
)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return self._verify_and_concat_tensors(output_tensors), sequence_length
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with backend.name_scope(name or "assert_all_equal"):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(tf.compat.v1.assert_equal(tensors[0], t))
with tf.control_dependencies(assert_equal_ops):
return tf.identity(tensors[0])
| tf-keras/tf_keras/feature_column/sequence_feature_column.py/0 | {
"file_path": "tf-keras/tf_keras/feature_column/sequence_feature_column.py",
"repo_id": "tf-keras",
"token_count": 3204
} | 205 |
"""Test Model.fit across a diverse range of models."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.integration_test.models import bert
from tf_keras.integration_test.models import dcgan
from tf_keras.integration_test.models import edge_case_model
from tf_keras.integration_test.models import efficientnet_v2
from tf_keras.integration_test.models import input_spec
from tf_keras.integration_test.models import low_level_model
from tf_keras.integration_test.models import mini_unet
from tf_keras.integration_test.models import mini_xception
from tf_keras.integration_test.models import retinanet
from tf_keras.integration_test.models import structured_data_classification
from tf_keras.integration_test.models import text_classification
from tf_keras.integration_test.models import timeseries_forecasting
from tf_keras.integration_test.models import vae
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# from tf_keras.integration_test.models import ctc_speech_rnn
# from tf_keras.integration_test.models import translation
def get_dataset(data_specs, batch_size):
values = tf.nest.map_structure(input_spec.spec_to_value, data_specs)
dataset = (
tf.data.Dataset.from_tensor_slices(values)
.prefetch(batch_size * 2)
.batch(batch_size)
)
return dataset
@test_utils.run_v2_only
class FitTest(test_combinations.TestCase):
@parameterized.named_parameters(
("bert", bert),
# ("ctc_speech_rnn", ctc_speech_rnn), # Buggy?
("dcgan", dcgan),
("edge_case_model", edge_case_model),
("efficientnet_v2", efficientnet_v2),
("low_level_model", low_level_model),
("mini_unet", mini_unet),
("mini_xception", mini_xception),
("retinanet", retinanet),
("structured_data_classification", structured_data_classification),
("text_classification", text_classification),
("timeseries_forecasting", timeseries_forecasting),
# ("translation", translation), # Buggy?
("vae", vae),
)
def test_fit_on_all_models_with_sync_preprocessing(self, module):
batch_size = 4
data_specs = module.get_data_spec(batch_size * 3)
dataset = get_dataset(data_specs, batch_size)
model = module.get_model(
build=True,
compile=True,
jit_compile=False,
include_preprocessing=True,
)
model.fit(dataset, epochs=1)
@parameterized.named_parameters(
("bert", bert),
# ("ctc_speech_rnn", ctc_speech_rnn), # Buggy?
("dcgan", dcgan),
("edge_case_model", edge_case_model),
("efficientnet_v2", efficientnet_v2),
("low_level_model", low_level_model),
# ("mini_unet", mini_unet), # Not XLA compatible b/c of UpSampling2D
("mini_xception", mini_xception),
# ("retinanet", retinanet), # Not XLA compatible b/c of UpSampling2D
("structured_data_classification", structured_data_classification),
("text_classification", text_classification),
("timeseries_forecasting", timeseries_forecasting),
# ("translation", translation), # Buggy?
("vae", vae),
)
def test_fit_on_all_models_with_async_preprocessing_and_xla(self, module):
batch_size = 4
data_specs = module.get_data_spec(batch_size * 3)
dataset = get_dataset(data_specs, batch_size)
preprocessor = module.get_input_preprocessor()
if preprocessor is not None:
dataset = dataset.map(lambda x, y: (preprocessor(x), y))
model = module.get_model(
build=True,
compile=True,
jit_compile=True,
include_preprocessing=False,
)
model.fit(dataset, epochs=1)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/integration_test/fit_test.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/fit_test.py",
"repo_id": "tf-keras",
"token_count": 1629
} | 206 |
"""Mini-Xception classification model.
Adapted from https://keras.io/examples/vision/image_classification_from_scratch/
"""
from tensorflow import keras
from tf_keras.integration_test.models.input_spec import InputSpec
IMG_SIZE = (120, 120)
def get_data_spec(batch_size):
return (
InputSpec((batch_size,) + IMG_SIZE + (3,)),
InputSpec((batch_size, 1), dtype="int32", range=[0, 2]),
)
def get_input_preprocessor():
return keras.Sequential(
[
keras.layers.RandomFlip(),
keras.layers.RandomRotation(0.2),
keras.layers.RandomZoom(0.2),
keras.layers.Rescaling(1.0 / 255),
]
)
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
inputs = keras.Input(shape=IMG_SIZE + (3,))
if include_preprocessing:
x = get_input_preprocessor()(inputs)
else:
x = inputs
x = keras.layers.Conv2D(32, 3, strides=2, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation("relu")(x)
x = keras.layers.Conv2D(64, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation("relu")(x)
previous_block_activation = x
for size in [128, 256, 512, 728]:
x = keras.layers.Activation("relu")(x)
x = keras.layers.SeparableConv2D(size, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation("relu")(x)
x = keras.layers.SeparableConv2D(size, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.MaxPooling2D(3, strides=2, padding="same")(x)
residual = keras.layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = keras.layers.add([x, residual])
previous_block_activation = x
x = keras.layers.SeparableConv2D(1024, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation("relu")(x)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.5)(x)
outputs = keras.layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
if compile:
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"],
jit_compile=jit_compile,
)
return model
def get_custom_objects():
return {}
| tf-keras/tf_keras/integration_test/models/mini_xception.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/models/mini_xception.py",
"repo_id": "tf-keras",
"token_count": 1164
} | 207 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities for our TF-Keras preprocessing integration tests."""
import os
import tensorflow.compat.v2 as tf
preprocessing = tf.keras.layers
BATCH_SIZE = 64
DS_SIZE = BATCH_SIZE * 16
STEPS = DS_SIZE / BATCH_SIZE
VOCAB_SIZE = 100
def make_dataset():
"""Make a simple structured dataset.
The dataset contains three feature columns.
- float_col: an unnormalized numeric column.
- int_col: an column of integer IDs.
- string_col: a column of fixed vocabulary terms.
Returns:
The dataset.
"""
tf.random.set_seed(197011)
floats = tf.random.uniform((DS_SIZE, 1), maxval=10, dtype="float32")
# Generate a 100 unique integer values, but over a wide range to showcase a
# common use case for IntegerLookup.
ints = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype="int64")
ints = ints * 1000
# Use a fixed vocabulary of strings from 0 to 99, to showcase loading a
# vocabulary from a file.
strings = tf.random.uniform((DS_SIZE, 1), maxval=VOCAB_SIZE, dtype="int64")
strings = tf.strings.as_string(strings)
features = {"float_col": floats, "int_col": ints, "string_col": strings}
# Random binary label.
labels = tf.random.uniform((DS_SIZE, 1), maxval=2, dtype="int64")
ds = tf.data.Dataset.from_tensor_slices((features, labels))
return ds
def make_preprocessing_model(file_dir):
"""Make a standalone preprocessing model."""
# The name of our keras.Input should match the column name in the dataset.
float_in = tf.keras.Input(shape=(1,), dtype="float32", name="float_col")
int_in = tf.keras.Input(shape=(1,), dtype="int64", name="int_col")
string_in = tf.keras.Input(shape=(1,), dtype="string", name="string_col")
# We need to batch a dataset before adapting.
ds = make_dataset().batch(BATCH_SIZE)
# Normalize floats by adapting the mean and variance of the input.
normalization = preprocessing.Normalization()
normalization.adapt(ds.map(lambda features, labels: features["float_col"]))
float_out = normalization(float_in)
# Lookup ints by adapting a vocab of integer IDs.
int_lookup = preprocessing.IntegerLookup()
int_lookup.adapt(ds.map(lambda features, labels: features["int_col"]))
int_out = int_lookup(int_in)
# Lookup strings from a fixed file based vocabulary.
string_vocab = list(str(i) for i in range(VOCAB_SIZE))
vocab_file = os.path.join(file_dir, "vocab_file.txt")
with open(vocab_file, "w") as f:
f.write("\n".join(string_vocab))
string_lookup = preprocessing.StringLookup(vocabulary=vocab_file)
string_out = string_lookup(string_in)
return tf.keras.Model(
inputs=(float_in, int_in, string_in),
outputs=(float_out, int_out, string_out),
)
def make_training_model():
"""Make a trainable model for the preprocessed inputs."""
float_in = tf.keras.Input(shape=(1,), dtype="float32", name="float_col")
# After preprocessing, both the string and int column are integer ready for
# embedding.
int_in = tf.keras.Input(shape=(1,), dtype="int64", name="int_col")
string_in = tf.keras.Input(shape=(1,), dtype="int64", name="string_col")
# Feed the lookup layers into an embedding.
int_embedding = tf.keras.layers.Embedding(VOCAB_SIZE + 1, 8, input_length=1)
int_out = int_embedding(int_in)
int_out = tf.keras.layers.Flatten()(int_out)
string_embedding = tf.keras.layers.Embedding(
VOCAB_SIZE + 1, 8, input_length=1
)
string_out = string_embedding(string_in)
string_out = tf.keras.layers.Flatten()(string_out)
# Concatenate outputs.
concatate = tf.keras.layers.Concatenate()
# Feed our preprocessed inputs into a simple MLP.
x = concatate((float_in, int_out, string_out))
x = tf.keras.layers.Dense(32, activation="relu")(x)
x = tf.keras.layers.Dense(32, activation="relu")(x)
outputs = tf.keras.layers.Dense(1, activation="softmax")(x)
return tf.keras.Model(inputs=(float_in, int_in, string_in), outputs=outputs)
| tf-keras/tf_keras/integration_test/preprocessing_test_utils.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/preprocessing_test_utils.py",
"repo_id": "tf-keras",
"token_count": 1711
} | 208 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests AdditiveAttention layer."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.mixed_precision import policy
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class AdditiveAttentionTest(tf.test.TestCase, parameterized.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = keras.layers.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
expected = np.array([[[0.49550372683]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_multi_dim(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32
)
# Key tensor of shape [1, 3, 4]
k = np.array(
[
[
[1.5, 1.6, 1.7, 1.8],
[2.5, 2.6, 2.7, 2.8],
[3.5, 3.6, 3.7, 3.8],
]
],
dtype=np.float32,
)
attention_layer = keras.layers.AdditiveAttention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
# Scale tensor of shape [4]
attention_layer.scale = np.array(
[[[0.5, 0.6, 0.7, 0.8]]], dtype=np.float32
)
actual = attention_layer._calculate_scores(query=q, key=k)
# expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + \
# 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581
# expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + \
# 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449
# expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + \
# 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652
# expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + \
# 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449
# expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + \
# 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652
# expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + \
# 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916
expected = np.array(
[
[
[2.58044532581, 2.59734317449, 2.59964024652],
[2.59734317449, 2.59964024652, 2.59995130916],
]
],
dtype=np.float32,
)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_batch_size_two(self):
# Query tensor of shape [2, 1, 1]
q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Key tensor of shape [2, 1, 1]
k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
attention_layer = keras.layers.AdditiveAttention()
attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [2, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
# expected100 = 0.5 * tanh(2.1 + 2.6) = 0.49991728277
expected = np.array(
[[[0.49550372683]], [[0.49991728277]]], dtype=np.float32
)
self.assertAllClose(expected, actual)
def test_shape(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32
)
# Value tensor of shape [1, 3, 4]
v = np.array(
[
[
[1.5, 1.6, 1.7, 1.8],
[2.5, 2.6, 2.7, 2.8],
[3.5, 3.6, 3.7, 3.8],
]
],
dtype=np.float32,
)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = keras.layers.AdditiveAttention()
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, tf.shape(actual))
def test_shape_no_scale(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32
)
# Value tensor of shape [1, 3, 4]
v = np.array(
[
[
[1.5, 1.6, 1.7, 1.8],
[2.5, 2.6, 2.7, 2.8],
[3.5, 3.6, 3.7, 3.8],
]
],
dtype=np.float32,
)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = keras.layers.AdditiveAttention(use_scale=False)
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, tf.shape(actual))
def test_shape_with_key(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1.0, 1.1, 1.2, 1.3], [2.0, 2.1, 2.2, 2.3]]], dtype=np.float32
)
# Value tensor of shape [1, 3, 4]
v = np.array(
[
[
[1.5, 1.6, 1.7, 1.8],
[2.5, 2.6, 2.7, 2.8],
[3.5, 3.6, 3.7, 3.8],
]
],
dtype=np.float32,
)
# Key tensor of shape [1, 3, 4]
k = np.array(
[
[
[1.5, 1.6, 1.7, 1.8],
[2.5, 2.6, 2.7, 2.8],
[3.5, 3.6, 3.7, 3.8],
]
],
dtype=np.float32,
)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = keras.layers.AdditiveAttention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, tf.shape(actual))
def test_multi_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = keras.layers.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6),
# 0.5 * tanh(1.1 + 0.7),
# 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
expected = np.array([[[1.15497245968]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_key(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 1]
k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = keras.layers.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v, k], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6),
# 0.5 * tanh(1.1 + 0.7),
# 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3
# = 0.64834251342
expected = np.array([[[0.64834251342]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_query_mask(self):
# Query tensor of shape [1, 2, 1]
q = np.array([[[1.1], [-0.5]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Query mask tensor of shape [1, 2]
q_mask = np.array([[True, False]], dtype=np.bool_)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = keras.layers.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[q_mask, v_mask])
# Expected scores of shape [1, 2, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6),
# 0.5 * tanh(1.1 + 0.7),
# 0.5 * tanh(1.1 - 0.8)],
# [0.5 * tanh(-0.5 + 1.6),
# 0.5 * tanh(-0.5 + 0.7),
# 0.5 * tanh(-0.5 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622],
# [0.40024951088, 0.09868766011, -0.43086157965]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
# => attention_distribution010
# = exp(0.40024951088)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.57482427975
# attention_distribution011
# = exp(0.09868766011)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.42517572025
# attention_distribution012 = 0
#
# Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
# expected000 = 0
expected = np.array([[[1.15497245968], [0.0]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_serialization(self):
# Test serialization with use_scale
layer = keras.layers.AdditiveAttention(use_scale=True)
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.use_scale, True)
config = layer.get_config()
new_layer = keras.layers.AdditiveAttention.from_config(config)
self.assertEqual(new_layer.use_scale, True)
@test_utils.enable_v2_dtype_behavior
def test_mixed_float16_policy(self):
# Test case for GitHub issue:
# https://github.com/tensorflow/tensorflow/issues/46064
with policy.policy_scope("mixed_float16"):
q = tf.cast(tf.random.uniform((2, 3, 4), seed=1), "float16")
v = tf.cast(tf.random.uniform((2, 3, 4), seed=2), "float16")
k = tf.cast(tf.random.uniform((2, 3, 4), seed=3), "float16")
layer = keras.layers.AdditiveAttention()
_ = layer([q, v, k], use_causal_mask=True)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/attention/additive_attention_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/attention/additive_attention_test.py",
"repo_id": "tf-keras",
"token_count": 7680
} | 209 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras 3D convolution layer."""
from tf_keras import activations
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.dtensor import utils
from tf_keras.layers.convolutional.base_conv import Conv
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Conv3D", "keras.layers.Convolution3D")
class Conv3D(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Examples:
>>> # The inputs are 28x28x28 volumes with a single channel, and the
>>> # batch size is 4
>>> input_shape =(4, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 26, 26, 26, 2)
>>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of
>>> # 3D frames, with 7 frames per video.
>>> input_shape = (4, 7, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 26, 26, 26, 2)
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
height and width of the 3D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the convolution along each spatial dimension. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value !=
1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros
evenly to the left/right or up/down of the input such that output has
the same height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `batch_shape +
(spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`channels_first` corresponds to inputs with shape `batch_shape +
(channels, spatial_dim1, spatial_dim2, spatial_dim3)`. When unspecified,
uses `image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'. Note that the
`channels_first` format is currently not supported by TensorFlow on CPU.
Defaults to 'channels_last'.
dilation_rate: an integer or tuple/list of 3 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any
stride value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters / groups` filters. The output is the
concatenation of all the `groups` results along the channel axis. Input
channels and `filters` must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (see
`keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (see
`keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2,
conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3,
channels)` if data_format='channels_last'.
Output shape:
5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,
new_conv_dim2, new_conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,
new_conv_dim3, filters)` if data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
Returns:
A tensor of rank 5+ representing
`activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
@utils.allow_initializer_layout
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs
)
# Alias
Convolution3D = Conv3D
| tf-keras/tf_keras/layers/convolutional/conv3d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/convolutional/conv3d.py",
"repo_id": "tf-keras",
"token_count": 3080
} | 210 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based einsum dense layer."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.core import einsum_dense
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
{
"testcase_name": "_1d_end_weight",
"equation": "ab,b->a",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": [],
"expected_weight_shape": [32],
"expected_bias_shape": None,
"expected_output_shape": (None,),
},
{
"testcase_name": "_2d_middle_weight",
"equation": "ab,bc->ac",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, 64),
},
{
"testcase_name": "_3d_bert",
"equation": "abc,cde->abde",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 3, 4),
},
{
"testcase_name": "_3d_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [4],
"expected_output_shape": (None, 1, 3, 4),
},
{
"testcase_name": "_3d_2_bias",
"equation": "abc,cde->abde",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 1],
"expected_output_shape": (None, 1, 3, 4),
},
{
"testcase_name": "_3d_1_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "be",
"input_shape": (None, 7, 2),
"output_shape": (7, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [7, 1, 4],
"expected_output_shape": (None, 7, 3, 4),
},
{
"testcase_name": "_3d_bert_projection",
"equation": "BFNH,NHD->BFD",
"bias_axes": None,
"input_shape": (None, 1, 2, 3),
"output_shape": (1, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 4),
},
{
"testcase_name": "_2d_bert",
"equation": "abc,cd->abd",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (1, 4),
"expected_weight_shape": [2, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 4),
},
{
"testcase_name": "_embedding_1d",
"equation": "i,d->id",
"bias_axes": None,
"input_shape": (None,),
"output_shape": (2),
"expected_weight_shape": [2],
"expected_bias_shape": None,
"expected_output_shape": (None, 2),
},
{
"testcase_name": "_xlnet_lm",
"equation": "ibd,nd->ibn",
"bias_axes": None,
"input_shape": (None, None, 1),
"output_shape": (None, 2),
"expected_weight_shape": [2, 1],
"expected_bias_shape": None,
"expected_output_shape": (None, None, 2),
},
{
"testcase_name": "_2d_precast",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, 64),
},
{
"testcase_name": "_2d_precast_elided_input_used_in_output",
"equation": "...bc,bc->...b",
"bias_axes": None,
"input_shape": (None, 32, 64),
"output_shape": (32),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, 32),
},
{
"testcase_name": "_2d_precast_multiple_elided_dims",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (None, None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, None, 64),
},
{
"testcase_name": "_3d_precast",
"equation": "...c,cde->...de",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [4],
"expected_output_shape": (None, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_bias",
"equation": "...c,cde->...de",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 1],
"expected_output_shape": (None, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "de",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 4],
"expected_output_shape": (None, 1, 3, 4),
},
{
"testcase_name": "_2d_postcast",
"equation": "bc...,cd->bd...",
"bias_axes": None,
"input_shape": (None, 1, 2, 3),
"output_shape": (4),
"expected_weight_shape": [1, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 4, 2, 3),
},
{
"testcase_name": "_3d_postcast",
"equation": "bc...,cde->bde...",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [3, 1, 1],
"expected_output_shape": (None, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [4, 1],
"expected_output_shape": (None, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "de",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [3, 4, 1],
"expected_output_shape": (None, 3, 4, 2),
},
)
class TestEinsumDenseLayer(test_combinations.TestCase):
def test_weight_shapes(
self,
equation,
bias_axes,
input_shape,
output_shape,
expected_weight_shape,
expected_bias_shape,
expected_output_shape,
):
del expected_output_shape # Not used in this test.
weight_shape, bias_shape, _ = einsum_dense._analyze_einsum_string(
equation, bias_axes, input_shape, output_shape
)
self.assertAllEqual(expected_weight_shape, weight_shape)
self.assertAllEqual(expected_bias_shape, bias_shape)
def test_layer_creation(
self,
equation,
bias_axes,
input_shape,
output_shape,
expected_weight_shape,
expected_bias_shape,
expected_output_shape,
):
# TF-Keras elides the 0-dimension of the input shape when constructing
# inputs.
non_batch_input_shape = list(input_shape)[1:]
input_tensor = keras.Input(shape=non_batch_input_shape)
layer = einsum_dense.EinsumDense(
equation=equation, output_shape=output_shape, bias_axes=bias_axes
)
output_tensor = layer(input_tensor)
self.assertAllEqual(expected_weight_shape, layer.kernel.shape.as_list())
if expected_bias_shape is None:
self.assertIsNone(layer.bias)
else:
self.assertAllEqual(expected_bias_shape, layer.bias.shape.as_list())
self.assertAllEqual(
expected_output_shape, output_tensor.shape.as_list()
)
@test_combinations.run_all_keras_modes
class TestEinsumLayerAPI(test_combinations.TestCase):
def test_layer_api(self):
input_data = np.array([[1.0, 2.0], [3.0, 4.0]])
kwargs = {
"equation": "...b,bc->...c",
"bias_axes": "c",
"output_shape": 4,
"bias_initializer": keras.initializers.constant(0.03),
"kernel_initializer": keras.initializers.constant(0.5),
"dtype": input_data.dtype,
}
expected_output = np.array(
[[1.53, 1.53, 1.53, 1.53], [3.53, 3.53, 3.53, 3.53]]
)
output_data = test_utils.layer_test(
einsum_dense.EinsumDense,
kwargs=kwargs,
input_shape=(None, 2),
input_data=input_data,
)
self.assertAllClose(expected_output, output_data)
def test_unspecified_bias_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(
equation="ab,bc->ac", output_shape=64, bias_axes="y"
)
with self.assertRaisesRegex(
ValueError, ".*is not part of the output spec.*"
):
_ = layer(input_tensor)
def test_incompatible_input_output_shape_fails(self):
input_tensor = keras.Input(shape=(32, 64))
layer = einsum_dense.EinsumDense(
equation="abc,cd->abd", output_shape=(10, 96)
)
with self.assertRaisesRegex(
ValueError,
".*Input shape and output shape do not match at shared "
"dimension 'b'.*",
):
_ = layer(input_tensor)
def test_unspecified_output_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(equation="ab,bc->cd", output_shape=64)
with self.assertRaisesRegex(
ValueError,
".*Dimension 'd' was specified in the output 'cd' but has "
"no corresponding dim.*",
):
_ = layer(input_tensor)
def test_unspecified_weight_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(equation="ab,zd->ad", output_shape=64)
with self.assertRaisesRegex(
ValueError, ".*Weight dimension 'z' did not have a match "
):
_ = layer(input_tensor)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/core/einsum_dense_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/core/einsum_dense_test.py",
"repo_id": "tf-keras",
"token_count": 6058
} | 211 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized.py."""
import functools
import math
import os
import shutil
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import backend as keras_backend
from tf_keras import initializers
from tf_keras.engine import base_layer_utils
from tf_keras.engine import input_layer
from tf_keras.engine import training
from tf_keras.layers import kernelized as kernel_layers
from tf_keras.saving.legacy import save
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import kernelized_utils
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev
)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev
)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class RandomFourierFeaturesTest(tf.test.TestCase, parameterized.TestCase):
def _assert_all_close(self, expected, actual, atol=0.001):
if not tf.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
self.assertAllClose(expected, actual, atol=atol)
else:
self.assertAllClose(expected, actual, atol=atol)
@test_utils.run_v2_only
def test_state_saving_and_loading(self):
with self.cached_session():
input_data = np.random.random((1, 2))
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10, scale=3.0
)
inputs = input_layer.Input((2,))
outputs = rff_layer(inputs)
model = training.Model(inputs, outputs)
output_data = model.predict(input_data)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, "rff_model")
model.save(saved_model_dir)
new_model = save.load_model(saved_model_dir)
new_output_data = new_model.predict(input_data)
self.assertAllClose(output_data, new_output_data, atol=1e-4)
def test_invalid_output_dim(self):
with self.assertRaisesRegex(
ValueError, "`output_dim` should be a positive integer"
):
_ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0)
def test_unsupported_kernel_type(self):
with self.assertRaisesRegex(
ValueError, "Unsupported `kernel_initializer`"
):
_ = kernel_layers.RandomFourierFeatures(
3, "unsupported_kernel", stddev=2.0
)
def test_invalid_scale(self):
with self.assertRaisesRegex(
ValueError, "When provided, `scale` should be a positive float"
):
_ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0)
def test_invalid_input_shape(self):
inputs = tf.random.uniform((3, 2, 4), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10, scale=3.0
)
with self.assertRaisesRegex(
ValueError, "The rank of the input tensor should be 2"
):
_ = rff_layer(inputs)
@parameterized.named_parameters(
("gaussian", "gaussian", 10.0, False),
("random", tf.compat.v1.random_uniform_initializer, 1.0, True),
)
def test_random_features_properties(self, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=scale,
trainable=trainable,
)
self.assertEqual(rff_layer.output_dim, 10)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
@parameterized.named_parameters(
("gaussian", "gaussian", False),
("laplacian", "laplacian", True),
("other", tf.compat.v1.ones_initializer, True),
)
def test_call(self, initializer, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=1.0,
trainable=trainable,
name="random_fourier_features",
)
inputs = tf.random.uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, 10], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(
rff_layer.non_trainable_variables, 3 - num_trainable_vars
)
@tf_test_utils.assert_no_new_pyobjects_executing_eagerly()
def test_no_eager_Leak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = tf.random.uniform((5, 4), seed=1)
kernel_layers.RandomFourierFeatures(output_dim=4, name="rff")(inputs)
kernel_layers.RandomFourierFeatures(output_dim=10, scale=2.0)(inputs)
def test_output_shape(self):
inputs = tf.random.uniform((3, 2), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=7, name="random_fourier_features", trainable=True
)
outputs = rff_layer(inputs)
self.assertEqual([3, 7], outputs.shape.as_list())
@parameterized.named_parameters(
("gaussian", "gaussian"),
("laplacian", "laplacian"),
("other", tf.compat.v1.random_uniform_initializer),
)
def test_call_on_placeholder(self, initializer):
with tf.Graph().as_default():
inputs = tf.compat.v1.placeholder(
dtype=tf.float32, shape=[None, None]
)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name="random_fourier_features",
)
with self.assertRaisesRegex(
ValueError,
"The last dimension of the input tensor should be defined",
):
rff_layer(inputs)
inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name="random_fourier_features",
)
with self.assertRaisesRegex(
ValueError,
"The last dimension of the input tensor should be defined",
):
rff_layer(inputs)
inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5, name="random_fourier_features"
)
rff_layer(inputs)
@parameterized.named_parameters(
("gaussian", 10, "gaussian", 2.0),
("laplacian", 5, "laplacian", None),
("other", 10, tf.compat.v1.ones_initializer, 1.0),
)
def test_compute_output_shape(self, output_dim, initializer, scale):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim, initializer, scale=scale, name="rff"
)
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tf.TensorShape(None))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tf.TensorShape([]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tf.TensorShape([3]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tf.TensorShape([3, 2, 3]))
with self.assertRaisesRegex(
ValueError,
"The last dimension of the input tensor should be defined",
):
rff_layer.compute_output_shape(tf.TensorShape([3, None]))
self.assertEqual(
[None, output_dim],
rff_layer.compute_output_shape((None, 3)).as_list(),
)
self.assertEqual(
[None, output_dim],
rff_layer.compute_output_shape(tf.TensorShape([None, 2])).as_list(),
)
self.assertEqual(
[4, output_dim], rff_layer.compute_output_shape((4, 1)).as_list()
)
@parameterized.named_parameters(
("gaussian", 10, "gaussian", 3.0, False),
("laplacian", 5, "laplacian", 5.5, True),
("other", 7, tf.compat.v1.random_uniform_initializer(), None, True),
)
def test_get_config(self, output_dim, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim,
initializer,
scale=scale,
trainable=trainable,
name="random_fourier_features",
)
expected_initializer = initializer
if not isinstance(initializer, str):
expected_initializer = initializers.serialize(initializer)
expected_dtype = (
"float32" if base_layer_utils.v2_dtype_behavior_enabled() else None
)
expected_config = {
"output_dim": output_dim,
"kernel_initializer": expected_initializer,
"scale": scale,
"name": "random_fourier_features",
"trainable": trainable,
"dtype": expected_dtype,
}
self.assertLen(expected_config, len(rff_layer.get_config()))
self.assertSameElements(
list(expected_config.items()), list(rff_layer.get_config().items())
)
@parameterized.named_parameters(
("gaussian", 5, "gaussian", None, True),
("laplacian", 5, "laplacian", 5.5, False),
("other", 7, tf.compat.v1.ones_initializer(), 2.0, True),
)
def test_from_config(self, output_dim, initializer, scale, trainable):
model_config = {
"output_dim": output_dim,
"kernel_initializer": initializer,
"scale": scale,
"trainable": trainable,
"name": "random_fourier_features",
}
rff_layer = kernel_layers.RandomFourierFeatures.from_config(
model_config
)
self.assertEqual(rff_layer.output_dim, output_dim)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
inputs = tf.random.uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, output_dim], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.trainable_variables, num_trainable_vars)
if trainable:
self.assertEqual(
"random_fourier_features/kernel_scale:0",
rff_layer.trainable_variables[0].name,
)
self.assertLen(
rff_layer.non_trainable_variables, 3 - num_trainable_vars
)
@parameterized.named_parameters(
("gaussian", 10, "gaussian", 3.0, True),
("laplacian", 5, "laplacian", 5.5, False),
("other", 10, tf.compat.v1.random_uniform_initializer(), None, True),
)
def test_same_random_features_params_reused(
self, output_dim, initializer, scale, trainable
):
"""Applying the layer on the same input twice gives the same output."""
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
trainable=trainable,
name="random_fourier_features",
)
inputs = tf.constant(np.random.uniform(low=-1.0, high=1.0, size=(2, 4)))
output1 = rff_layer(inputs)
output2 = rff_layer(inputs)
self._assert_all_close(output1, output2)
@parameterized.named_parameters(
("gaussian", "gaussian", 5.0),
("laplacian", "laplacian", 3.0),
("other", tf.compat.v1.random_uniform_initializer(), 5.0),
)
def test_different_params_similar_approximation(self, initializer, scale):
tf.compat.v1.set_random_seed(12345)
rff_layer1 = kernel_layers.RandomFourierFeatures(
output_dim=3000,
kernel_initializer=initializer,
scale=scale,
name="rff1",
)
rff_layer2 = kernel_layers.RandomFourierFeatures(
output_dim=2000,
kernel_initializer=initializer,
scale=scale,
name="rff2",
)
# Two distinct inputs.
x = tf.constant([[1.0, -1.0, 0.5]])
y = tf.constant([[-1.0, 1.0, 1.0]])
# Apply both layers to both inputs.
output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1(x)
output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1(y)
output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2(x)
output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y)
# Compute the inner products of the outputs (on inputs x and y) for both
# layers. For any fixed random features layer rff_layer, and inputs x,
# y, rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization
# factor.
approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08)
@parameterized.named_parameters(
("gaussian", "gaussian", 5.0, _exact_gaussian(stddev=5.0)),
("laplacian", "laplacian", 20.0, _exact_laplacian(stddev=20.0)),
)
def test_bad_kernel_approximation(
self, initializer, scale, exact_kernel_fn
):
"""Approximation is bad when output dimension is small."""
# Two distinct inputs.
x = tf.constant([[1.0, -1.0, 0.5]])
y = tf.constant([[-1.0, 1.0, 1.0]])
small_output_dim = 10
tf.compat.v1.set_random_seed(1234)
# Initialize layer.
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=small_output_dim,
kernel_initializer=initializer,
scale=scale,
name="random_fourier_features",
)
# Apply layer to both inputs.
output_x = math.sqrt(2.0 / small_output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y)
# The inner products of the outputs (on inputs x and y) approximates the
# real value of the RBF kernel but poorly since the output dimension of
# the layer is small.
exact_kernel_value = exact_kernel_fn(x, y)
approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
abs_error = tf.abs(exact_kernel_value - approx_kernel_value)
if not tf.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
abs_error_eval = sess.run([abs_error])
self.assertGreater(abs_error_eval[0][0], 0.01)
self.assertLess(abs_error_eval[0][0], 0.5)
else:
self.assertGreater(abs_error, 0.01)
self.assertLess(abs_error, 0.5)
@parameterized.named_parameters(
("gaussian", "gaussian", 5.0, _exact_gaussian(stddev=5.0)),
("laplacian", "laplacian", 10.0, _exact_laplacian(stddev=10.0)),
)
def test_good_kernel_approximation_multiple_inputs(
self, initializer, scale, exact_kernel_fn
):
# Parameters.
input_dim = 5
output_dim = 2000
x_rows = 20
y_rows = 30
x = tf.constant(
np.random.uniform(size=(x_rows, input_dim)), dtype=tf.float32
)
y = tf.constant(
np.random.uniform(size=(y_rows, input_dim)), dtype=tf.float32
)
tf.compat.v1.set_random_seed(1234)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
name="random_fourier_features",
)
# The shapes of output_x and output_y are (x_rows, output_dim) and
# (y_rows, output_dim) respectively.
output_x = math.sqrt(2.0 / output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / output_dim) * rff_layer(y)
approx_kernel_matrix = kernelized_utils.inner_product(
output_x, output_y
)
exact_kernel_matrix = exact_kernel_fn(x, y)
self._assert_all_close(
approx_kernel_matrix, exact_kernel_matrix, atol=0.05
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/kernelized_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/kernelized_test.py",
"repo_id": "tf-keras",
"token_count": 8254
} | 212 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for merging layers."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import backend
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import tf_inspect
@test_combinations.run_all_keras_modes
class MergingLayersTest(test_combinations.TestCase):
def test_add(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
add_layer = keras.layers.Add()
o = add_layer([i1, i2, i3])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)
self.assertIsNone(
add_layer.compute_mask([i1, i2, i3], [None, None, None])
)
self.assertTrue(
np.all(
backend.eval(
add_layer.compute_mask(
[i1, i2], [backend.variable(x1), backend.variable(x2)]
)
)
)
)
with self.assertRaisesRegex(ValueError, "`mask` should be a list."):
add_layer.compute_mask([i1, i2, i3], x1)
with self.assertRaisesRegex(ValueError, "`inputs` should be a list."):
add_layer.compute_mask(i1, [None, None, None])
with self.assertRaisesRegex(
ValueError, " should have the same length."
):
add_layer.compute_mask([i1, i2, i3], [None, None])
def test_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
subtract_layer = keras.layers.Subtract()
o = subtract_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 - x2, atol=1e-4)
self.assertIsNone(subtract_layer.compute_mask([i1, i2], [None, None]))
self.assertTrue(
np.all(
backend.eval(
subtract_layer.compute_mask(
[i1, i2], [backend.variable(x1), backend.variable(x2)]
)
)
)
)
with self.assertRaisesRegex(ValueError, "`mask` should be a list."):
subtract_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegex(ValueError, "`inputs` should be a list."):
subtract_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegex(
ValueError, "layer should be called on exactly 2 inputs"
):
subtract_layer([i1, i2, i3])
with self.assertRaisesRegex(
ValueError, "layer should be called on exactly 2 inputs"
):
subtract_layer([i1])
def test_multiply(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.multiply([i1, i2, i3])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 * x2 * x3, atol=1e-4)
def test_average(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.average([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4)
def test_maximum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.maximum([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4)
def test_minimum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.minimum([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4)
def test_concatenate(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
concat_layer = keras.layers.Concatenate(axis=1)
o = concat_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 8, 5))
self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
self.assertIsNone(concat_layer.compute_mask([i1, i2], [None, None]))
self.assertTrue(
np.all(
backend.eval(
concat_layer.compute_mask(
[i1, i2], [backend.variable(x1), backend.variable(x2)]
)
)
)
)
# Should work with unit-length input.
unit_length_o = concat_layer([i1])
self.assertListEqual(unit_length_o.shape.as_list(), i1.shape.as_list())
with self.assertRaisesRegex(ValueError, "`mask` should be a list."):
concat_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegex(ValueError, "`inputs` should be a list."):
concat_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegex(ValueError, "should have the same length"):
concat_layer.compute_mask([i1, i2], [None])
with self.assertRaisesRegex(
ValueError, "layer should be called on a list of inputs"
):
concat_layer(i1)
def test_concatenate_numpy_inputs(self):
if tf.executing_eagerly():
layer = keras.layers.Concatenate()
x, y = np.ones((10, 10)), np.ones((10, 10))
self.assertAllEqual(np.ones((10, 20)), layer([x, y]))
def test_dot(self):
i1 = keras.layers.Input(shape=(4,))
i2 = keras.layers.Input(shape=(4,))
o = keras.layers.dot([i1, i2], axes=1)
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = test_utils.should_run_eagerly()
_ = keras.layers.Dot(axes=1).get_config()
x1 = np.random.random((2, 4))
x2 = np.random.random((2, 4))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
expected = np.zeros((2, 1))
expected[0, 0] = np.dot(x1[0], x2[0])
expected[1, 0] = np.dot(x1[1], x2[1])
self.assertAllClose(out, expected, atol=1e-4)
# Test with negative tuple of axes.
o = keras.layers.dot([i1, i2], axes=(-1, -1))
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = test_utils.should_run_eagerly()
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
self.assertAllClose(out, expected, atol=1e-4)
# test compute_output_shape
layer = keras.layers.Dot(axes=-1)
self.assertEqual(layer.compute_output_shape([(4, 5), (4, 5)]), (4, 1))
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Minimum,
keras.layers.Maximum,
keras.layers.Average,
]
)
)
def test_merging_with_ragged_input(self, layer):
ragged_data = tf.ragged.constant(
[[1.0, 1.0, 1.0], [1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], ragged_rank=1
)
dense_data = ragged_data.to_tensor()
input1 = keras.Input(shape=(None,), ragged=True)
input2 = keras.Input(shape=(None,), ragged=True)
out = layer()([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=out)
out_ragged = model.predict([ragged_data, ragged_data], steps=1)
out_ragged = convert_ragged_tensor_value(out_ragged).to_tensor()
input1 = keras.Input(shape=(None,))
input2 = keras.Input(shape=(None,))
out = layer()([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=out)
out_dense = model.predict([dense_data, dense_data], steps=1)
self.assertAllEqual(out_dense, out_ragged)
def test_concatenate_with_ragged_input(self):
ragged1 = tf.ragged.constant(
[[1.0, 1.0], [1.0], [1.0, 1.0, 1.0]], ragged_rank=1
)
ragged2 = tf.ragged.constant(
[[2.0, 2.0, 2.0], [2.0], [2.0, 2.0]], ragged_rank=1
)
expected_concatenated_ragged = tf.ragged.constant(
[[1.0, 1.0, 2.0, 2.0, 2.0], [1.0, 2.0], [1.0, 1.0, 1.0, 2.0, 2.0]],
ragged_rank=1,
)
input1 = keras.Input(shape=(None,), ragged=True)
input2 = keras.Input(shape=(None,), ragged=True)
out = keras.layers.Concatenate(axis=1)([input1, input2])
model = keras.models.Model(inputs=[input1, input2], outputs=out)
out_ragged = model.predict([ragged1, ragged2], steps=1)
self.assertAllEqual(out_ragged, expected_concatenated_ragged)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Minimum,
keras.layers.Maximum,
keras.layers.Average,
]
)
)
def test_merging_with_scalar_input(self, layer):
x1 = np.array((1))
x2 = np.array((2))
out = layer()([x1, x2])
self.assertEqual(out.shape, ())
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[
keras.layers.Add,
keras.layers.add,
keras.layers.Average,
keras.layers.average,
keras.layers.Concatenate,
keras.layers.concatenate,
keras.layers.Maximum,
keras.layers.maximum,
keras.layers.Minimum,
keras.layers.minimum,
keras.layers.Multiply,
keras.layers.multiply,
]
)
)
def test_single_element(self, layer):
# Instantiate the Layer subclasses
if tf_inspect.isclass(layer) and issubclass(layer, keras.layers.Layer):
layer = layer()
# Processing a single element list should behave as identity.
i1 = keras.layers.Input(shape=(4, 5))
o = layer([i1])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model(i1, o)
model.run_eagerly = test_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
out = model.predict(x1)
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1)
# A single element must be passed as a list, not by itself.
with self.assertRaisesRegex(ValueError, "called on a list"):
layer(i1)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MergingLayersTestNoExecution(tf.test.TestCase):
def test_add_elementwise_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.add([i1, i2])
with self.assertRaises(ValueError):
keras.layers.add(i1)
def test_concatenate_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaisesRegex(ValueError, "inputs with matching shapes"):
keras.layers.concatenate([i1, i2], axis=-1)
with self.assertRaisesRegex(ValueError, "called on a list"):
keras.layers.concatenate(i1, axis=-1)
def test_concatenate_with_partial_shape(self):
i1 = keras.layers.Input(shape=(5,), batch_size=32)
i2 = keras.layers.Input(shape=(5,))
i3 = keras.layers.Input(shape=(4, 5), batch_size=32)
i4 = keras.layers.Input(shape=(None,), batch_size=64)
i5 = keras.layers.Input(shape=(7,))
# Valid case since the i2 has a dynamic batch size.
keras.layers.concatenate([i1, i2], axis=-1)
# Different rank
with self.assertRaisesRegex(ValueError, "inputs with matching shapes"):
keras.layers.concatenate([i1, i3], axis=-1)
# Valid case with partial dimension information
keras.layers.concatenate([i1, i4], axis=0)
keras.layers.concatenate([i2, i4], axis=0)
keras.layers.concatenate([i2, i4], axis=1)
keras.layers.concatenate([i1, i2, i4], axis=0)
keras.layers.concatenate([i1, i5], axis=1)
# Mismatch in batch dimension.
with self.assertRaisesRegex(ValueError, "inputs with matching shapes"):
keras.layers.concatenate([i1, i4], axis=-1)
with self.assertRaisesRegex(ValueError, "inputs with matching shapes"):
keras.layers.concatenate([i1, i2, i4], axis=-1)
def test_dot_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
i3 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot(i1, axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2, i3], axes=-1)
with self.assertRaises(ValueError):
dot = keras.layers.Dot(1)
dot.compute_output_shape(1)
def test_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
y = keras.layers.subtract([i1, i2])
self.assertEqual(y.shape.as_list(), [None, 4, 5])
# Test invalid use cases
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i2])
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i1, i1])
def test_add_masking(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Add()
o = layer([m1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
mask = layer.output_mask
self.assertListEqual(mask.shape.as_list(), [None, 4])
def test_add_dynamic_shape(self):
i1 = keras.Input(batch_shape=(4, None), dtype="float32")
i2 = keras.Input(batch_shape=(4, 5), dtype="float32")
layer = keras.layers.Add()
o = layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [4, 5])
def test_concatenate_masking(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Concatenate()
o = layer([m1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 10])
mask = layer.output_mask
self.assertListEqual(mask.shape.as_list(), [None, 4])
def test_concatenate_sparse_shape(self):
i1 = keras.layers.Input(shape=(1,), batch_size=2, sparse=True)
i2 = keras.layers.Input(shape=(2,), batch_size=2, sparse=True)
layer = keras.layers.Concatenate(axis=1)
o = layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [2, 3])
# Make sure it also respect None as the batch size
i1 = keras.layers.Input(shape=(1,), sparse=True)
i2 = keras.layers.Input(shape=(2,), sparse=True)
layer = keras.layers.Concatenate(axis=1)
o = layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 3])
def test_concatenate_user_changes_to_input_structure(self):
a = keras.layers.Input(shape=(4, 5))
struct = [a, a]
concat1 = keras.layers.Concatenate(1)
b = concat1(struct)
struct.append(b)
concat2 = keras.layers.Concatenate(1)
c = concat2(struct)
# Checks that the append to `struct` doesn't affect `concat1`s
# node data.
self.assertLen(concat1.inbound_nodes[0].input_tensors, 2)
self.assertLen(concat2.inbound_nodes[0].input_tensors, 3)
keras.Model(a, c) # Ensure model can be built.
def convert_ragged_tensor_value(inputs):
if isinstance(inputs, tf.compat.v1.ragged.RaggedTensorValue):
flat_values = tf.convert_to_tensor(
value=inputs.flat_values, name="flat_values"
)
return tf.RaggedTensor.from_nested_row_splits(
flat_values, inputs.nested_row_splits, validate=False
)
return inputs
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/merging/merging_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/merging/merging_test.py",
"repo_id": "tf-keras",
"token_count": 9965
} | 213 |
# Copyright 2023 The TF-Keras Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
class SpectralNormalizationTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes
def test_basic_spectralnorm(self):
test_utils.layer_test(
keras.layers.SpectralNormalization,
kwargs={"layer": keras.layers.Dense(2), "input_shape": (3, 4)},
input_data=tf.random.uniform((10, 3, 4)),
)
@test_combinations.run_all_keras_modes
def test_from_to_config(self):
base_layer = keras.layers.Dense(1)
sn = keras.layers.SpectralNormalization(base_layer)
config = sn.get_config()
new_sn = keras.layers.SpectralNormalization.from_config(config)
self.assertEqual(sn.power_iterations, new_sn.power_iterations)
@test_combinations.run_all_keras_modes
def test_save_load_model(self):
base_layer = keras.layers.Dense(1)
input_shape = [1]
inputs = keras.layers.Input(shape=input_shape)
sn_layer = keras.layers.SpectralNormalization(base_layer)
model = keras.models.Sequential(layers=[inputs, sn_layer])
# initialize model
model.predict(tf.random.uniform((2, 1)))
with self.subTest("h5"):
model.save("test.h5")
new_model = keras.models.load_model("test.h5")
self.assertEqual(
model.layers[0].get_config(), new_model.layers[0].get_config()
)
with self.subTest("savedmodel"):
model.save("test")
new_model = keras.models.load_model("test")
self.assertEqual(
model.layers[0].get_config(), new_model.layers[0].get_config()
)
with self.subTest("keras_v3"):
model.save("test.keras")
new_model = keras.models.load_model("test.keras")
self.assertEqual(
model.layers[0].get_config(), new_model.layers[0].get_config()
)
@test_combinations.run_all_keras_modes
def test_normalization(self):
inputs = keras.layers.Input(shape=[2, 2, 1])
base_layer = keras.layers.Conv2D(
1, (2, 2), kernel_initializer=tf.constant_initializer(value=2)
)
sn_layer = keras.layers.SpectralNormalization(base_layer)
model = keras.models.Sequential(layers=[inputs, sn_layer])
weights = tf.squeeze(model.layers[0].w.numpy())
# This wrapper normalizes weights by the maximum eigen value
eigen_val, _ = tf.linalg.eig(weights)
weights_normalized = weights / tf.reduce_max(eigen_val)
for training in [False, True]:
_ = model(
tf.constant(tf.ones((1, 2, 2, 1), dtype=tf.float32)),
training=training,
)
if training:
w = weights_normalized
else:
w = weights
self.assertAllClose(w, tf.squeeze(model.layers[0].w.numpy()))
@test_combinations.run_all_keras_modes
def test_apply_layer(self):
images = tf.ones((1, 2, 2, 1))
sn_wrapper = keras.layers.SpectralNormalization(
keras.layers.Conv2D(
1, [2, 2], kernel_initializer=tf.constant_initializer(value=1)
),
input_shape=(2, 2, 1),
)
result = sn_wrapper(images, training=False)
result_train = sn_wrapper(images, training=True)
expected_output = tf.constant([[[[4.0]]]], dtype=tf.float32)
self.assertAllClose(result, expected_output)
# max eigen value of 2x2 matrix of ones is 2
self.assertAllClose(result_train, expected_output / 2)
self.assertTrue(hasattr(sn_wrapper, "u"))
@test_combinations.run_all_keras_modes
def test_no_layer(self):
images = tf.random.uniform((2, 4, 43))
with self.assertRaises(AssertionError):
keras.layers.SpectralNormalization(images)
@test_combinations.run_all_keras_modes
def test_no_kernel(self):
with self.assertRaises(AttributeError):
keras.layers.SpectralNormalization(
keras.layers.MaxPooling2D(2, 2)
).build((2, 2))
@parameterized.parameters(
[
(lambda: keras.layers.Dense(2), [3, 2]),
(
lambda: keras.layers.Conv2D(3, (2, 2), padding="same"),
[4, 4, 3],
),
(lambda: keras.layers.Embedding(2, 10), [2]),
],
)
@test_combinations.run_all_keras_modes
def test_model_build(self, base_layer_fn, input_shape):
inputs = keras.layers.Input(shape=input_shape)
base_layer = base_layer_fn()
sn_layer = keras.layers.SpectralNormalization(base_layer)
model = keras.models.Sequential(layers=[inputs, sn_layer])
model.build()
self.assertTrue(hasattr(model.layers[0], "vector_u"))
@parameterized.parameters(
[
(lambda: keras.layers.Dense(2), [3, 2], [3, 2]),
(
lambda: keras.layers.Conv2D(3, (2, 2), padding="same"),
[4, 4, 3],
[4, 4, 3],
),
(lambda: keras.layers.Embedding(2, 10), [2], [2, 10]),
],
)
@test_combinations.run_all_keras_modes
def test_model_fit(self, base_layer_fn, input_shape, output_shape):
inputs = keras.layers.Input(shape=input_shape)
base_layer = base_layer_fn()
sn_layer = keras.layers.SpectralNormalization(base_layer)
model = keras.models.Sequential(layers=[inputs, sn_layer])
model.add(keras.layers.Activation("relu"))
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=0.001),
loss="mse",
)
model.fit(
tf.random.uniform((2, *input_shape)),
tf.random.uniform((2, *output_shape)),
epochs=3,
batch_size=10,
verbose=0,
)
self.assertTrue(hasattr(model.layers[0], "vector_u"))
| tf-keras/tf_keras/layers/normalization/spectral_normalization_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/normalization/spectral_normalization_test.py",
"repo_id": "tf-keras",
"token_count": 3198
} | 214 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global average pooling 2D layer."""
from tf_keras import backend
from tf_keras.layers.pooling.base_global_pooling2d import GlobalPooling2D
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.GlobalAveragePooling2D", "keras.layers.GlobalAvgPool2D"
)
class GlobalAveragePooling2D(GlobalPooling2D):
"""Global average pooling operation for spatial data.
Examples:
>>> input_shape = (2, 4, 5, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.GlobalAveragePooling2D()(x)
>>> print(y.shape)
(2, 3)
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
When unspecified, uses `image_data_format` value found
in your TF-Keras config file at `~/.keras/keras.json`
(if exists) else 'channels_last'. Defaults to 'channels_last'.
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, 1, 1)`
"""
def call(self, inputs):
if self.data_format == "channels_last":
return backend.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
else:
return backend.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
# Alias
GlobalAvgPool2D = GlobalAveragePooling2D
| tf-keras/tf_keras/layers/pooling/global_average_pooling2d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/pooling/global_average_pooling2d.py",
"repo_id": "tf-keras",
"token_count": 1171
} | 215 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for TF-Keras category_encoding preprocessing layer."""
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import category_encoding
class BenchmarkLayer(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(
self, output_mode, batch_size, sequence_length, max_tokens
):
input_t = keras.Input(shape=(sequence_length,), dtype=tf.int32)
layer = category_encoding.CategoryEncoding(
max_tokens=max_tokens, output_mode=output_mode
)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_tensor_slices(
tf.random.uniform(
[batch_size * 10, sequence_length],
minval=0,
maxval=max_tokens - 1,
dtype=tf.int32,
)
)
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "category_encoding|batch_%s|seq_length_%s|%s_max_tokens" % (
batch_size,
sequence_length,
max_tokens,
)
self.report_benchmark(iters=num_repeats, wall_time=avg_time, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 256, 2048]:
for sequence_length in [10, 1000]:
for num_tokens in [100, 1000, 20000]:
self.run_dataset_implementation(
output_mode="count",
batch_size=batch,
sequence_length=sequence_length,
max_tokens=num_tokens,
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/category_encoding_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/category_encoding_benchmark.py",
"repo_id": "tf-keras",
"token_count": 1364
} | 216 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for TF-Keras text vectorization preprocessing layer's adapt method.
"""
import collections
import itertools
import random
import string
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import index_lookup
tf.compat.v1.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
def get_top_k(dataset, k):
"""Python implementation of vocabulary building using a defaultdict."""
counts = collections.defaultdict(int)
for tensor in dataset:
data = tensor.numpy()
for element in data:
counts[element] += 1
sorted_vocab = [
k
for k, _ in sorted(
counts.items(), key=lambda item: item[1], reverse=True
)
]
if len(sorted_vocab) > k:
sorted_vocab = sorted_vocab[:k]
return sorted_vocab
class BenchmarkAdapt(tf.test.Benchmark):
"""Benchmark adapt."""
def run_numpy_implementation(self, num_elements, batch_size, k):
"""Test the python implementation."""
ds = tf.data.Dataset.from_generator(
word_gen, tf.string, tf.TensorShape([])
)
batched_ds = ds.take(num_elements).batch(batch_size)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=k,
num_oov_indices=0,
mask_token=None,
oov_token="OOV",
dtype=tf.string,
)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
starts.append(time.time())
vocab = get_top_k(batched_ds, k)
layer.set_vocabulary(vocab)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
return avg_time
def bm_adapt_implementation(self, num_elements, batch_size, k):
"""Test the KPL adapt implementation."""
ds = tf.data.Dataset.from_generator(
word_gen, tf.string, tf.TensorShape([])
)
batched_ds = ds.take(num_elements).batch(batch_size)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=k,
num_oov_indices=0,
mask_token=None,
oov_token="OOV",
dtype=tf.string,
)
_ = layer(input_t)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
starts.append(time.time())
layer.adapt(batched_ds)
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts))
name = "index_lookup_adapt|%s_elements|vocab_size_%s|batch_%s" % (
num_elements,
k,
batch_size,
)
baseline = self.run_numpy_implementation(num_elements, batch_size, k)
extras = {
"numpy implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100,
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name
)
def benchmark_vocab_size_by_batch(self):
for vocab_size in [100, 1000, 10000, 100000, 1000000]:
for batch in [1, 16, 2048]:
self.bm_adapt_implementation(
vocab_size, batch, int(vocab_size / 10)
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py",
"repo_id": "tf-keras",
"token_count": 1980
} | 217 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.image_preprocessing."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.distribute import strategy_combinations
from tf_keras.layers.preprocessing import image_preprocessing
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_utils.run_v2_only
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.all_strategies
+ strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager"],
)
)
class ImagePreprocessingDistributionTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_distribution(self, strategy):
if "CentralStorage" in type(strategy).__name__:
self.skipTest("Does not work with CentralStorageStrategy yet.")
# TODO(b/159738418): large image input causes OOM in ubuntu multi gpu.
np_images = np.random.random((32, 32, 32, 3)).astype(np.float32)
image_dataset = tf.data.Dataset.from_tensor_slices(np_images).batch(
16, drop_remainder=True
)
with strategy.scope():
input_data = keras.Input(shape=(32, 32, 3), dtype=tf.float32)
image_preprocessor = keras.Sequential(
[
image_preprocessing.Resizing(height=256, width=256),
image_preprocessing.RandomCrop(height=224, width=224),
image_preprocessing.RandomTranslation(0.1, 0.1),
image_preprocessing.RandomBrightness(
0.1, value_range=(0, 1)
),
image_preprocessing.RandomRotation(0.2),
image_preprocessing.RandomFlip(),
image_preprocessing.RandomZoom(0.2, 0.2),
]
)
preprocessed_image = image_preprocessor(input_data)
flatten_layer = keras.layers.Flatten(data_format="channels_last")
output = flatten_layer(preprocessed_image)
cls_layer = keras.layers.Dense(units=1, activation="sigmoid")
output = cls_layer(output)
model = keras.Model(inputs=input_data, outputs=output)
_ = model.predict(image_dataset)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| tf-keras/tf_keras/layers/preprocessing/image_preprocessing_distribution_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/image_preprocessing_distribution_test.py",
"repo_id": "tf-keras",
"token_count": 1283
} | 218 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras string lookup preprocessing layer."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.layers.preprocessing import index_lookup
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.StringLookup",
"keras.layers.experimental.preprocessing.StringLookup",
v1=[],
)
class StringLookup(index_lookup.IndexLookup):
"""A preprocessing layer which maps string features to integer indices.
This layer translates a set of arbitrary strings into integer output via a
table-based vocabulary lookup. This layer will perform no splitting or
transformation of input strings. For a layer that can split and tokenize
natural language, see the `tf.keras.layers.TextVectorization` layer.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
determine the frequency of individual strings tokens, and create a
vocabulary from them. If the vocabulary is capped in size, the most frequent
tokens will be used to create the vocabulary and all others will be treated
as out-of-vocabulary (OOV).
There are two possible output modes for the layer.
When `output_mode` is `"int"`,
input strings are converted to their index in the vocabulary (an integer).
When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`, input strings
are encoded into an array where each dimension corresponds to an element in
the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode`
is `"int"`, the vocabulary will begin with the mask token (if set), followed
by OOV indices, followed by the rest of the vocabulary. When `output_mode`
is `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with
OOV indices and instances of the mask token will be dropped.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting the vocabulary or when setting
`pad_to_max_tokens=True`. If None, there is no cap on the size of the
vocabulary. Note that this size includes the OOV and mask tokens.
Defaults to `None`.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are hashed to determine their OOV
value. If this value is 0, OOV inputs will cause an error when calling
the layer. Defaults to `1`.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in vocabulary and mapped to index 0. In
other output modes, the token will not appear in the vocabulary and
instances of the mask token in the input will be dropped. If set to
None, no mask term will be added. Defaults to `None`.
oov_token: Only used when `invert` is True. The token to return for OOV
indices. Defaults to `"[UNK]"`.
vocabulary: Optional. Either an array of strings or a string path to a
text file. If passing an array, can pass a tuple, list, 1D numpy array,
or 1D tensor containing the string vocabulary terms. If passing a file
path, the file should contain one line per term in the vocabulary. If
this argument is set, there is no need to `adapt()` the layer.
idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
1D numpy array, or 1D tensor or the same length as the vocabulary,
containing the floating point inverse document frequency weights, which
will be multiplied by per sample term counts for the final `tf_idf`
weight. If the `vocabulary` argument is set, and `output_mode` is
`"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`. If True, this layer will
map indices to vocabulary items instead of mapping vocabulary items to
indices. Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1 at the element
index. If the last dimension is size 1, will encode on that
dimension. If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as the vocabulary, containing a 1 for each vocabulary
term present in the sample. Treats the last dimension as the sample
dimension, if input shape is (..., sample_length), output shape will
be (..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of
the number of times the token at that index appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
For `"int"` output, any shape of input and output is supported. For all
other output modes, currently only output up to rank 2 is supported.
Defaults to `"int"`.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, the output will have its feature axis
padded to `max_tokens` even if the number of unique tokens in the
vocabulary is less than max_tokens, resulting in a tensor of shape
[batch_size, max_tokens] regardless of vocabulary size. Defaults to
`False`.
sparse: Boolean. Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, returns a `SparseTensor` instead of a
dense `Tensor`. Defaults to `False`.
encoding: Optional. The text encoding to use to interpret the input
strings. Defaults to `"utf-8"`.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[1, 3, 4],
[4, 0, 2]])>
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by
analyzing the dataset.
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
['[UNK]', 'd', 'z', 'c', 'b', 'a']
Note that the OOV token `"[UNK]"` has been added to the vocabulary.
The remaining tokens are sorted by frequency
(`"d"`, which has 2 occurrences, is first) then by inverse sort order.
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup()
>>> layer.adapt(data)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[5, 3, 1],
[1, 2, 4]])>
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV
indices. When a layer is created with more than one OOV index, any OOV
values are hashed into the number of OOV buckets, distributing OOV values in
a deterministic fashion across the set.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["m", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab,
... num_oov_indices=2)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[2, 4, 5],
[0, 1, 3]])>
Note that the output for OOV value 'm' is 0, while the output for OOV value
'z' is 1. The in-vocab terms have their output index increased by 1 from
earlier examples (a maps to 2, etc) in order to make space for the extra OOV
value.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the one_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant(["a", "b", "c", "d", "z"])
>>> layer = tf.keras.layers.StringLookup(
... vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
<tf.Tensor: shape=(5, 5), dtype=float32, numpy=
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=float32)>
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(
... vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=float32)>
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(
... vocabulary=vocab, output_mode='count')
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=float32)>
**TF-IDF output**
Configure the layer with `output_mode="tf_idf"`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV values.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be
provided along with the vocabulary. Note that the `idf_weight` for OOV
values will default to the average of all idf weights passed in.
>>> vocab = ["a", "b", "c", "d"]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
To specify the idf weights for oov values, you will need to pass the entire
vocabularly including the leading oov token.
>>> vocab = ["[UNK]", "a", "b", "c", "d"]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = tf.constant([["a", "c", "d", "d"], ["d", "z", "b", "z"]])
>>> layer = tf.keras.layers.StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)>
When adapting the layer in `"tf_idf"` mode, each input sample will be
considered a document, and IDF weight per token will be calculated as
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to strings using this layer.
(You can also use `adapt()` with `inverse=True`, but for simplicity we'll
pass the vocab in this example.)
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([[1, 3, 4], [4, 0, 2]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab, invert=True)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=string, numpy=
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)>
Note that the first index correspond to the oov token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = tf.keras.layers.StringLookup(vocabulary=vocab)
>>> i_layer = tf.keras.layers.StringLookup(vocabulary=vocab, invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
<tf.Tensor: shape=(2, 3), dtype=string, numpy=
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)>
In this example, the input value `"z"` resulted in an output of `"[UNK]"`,
since 1000 was not in the vocabulary - it got represented as an OOV, and all
OOV values are returned as `"[UNK]"` in the inverse layer. Also, note that
for the inverse to work, you must have already set the forward layer
vocabulary either directly or via `adapt()` before calling
`get_vocabulary()`.
"""
def __init__(
self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[UNK]",
vocabulary=None,
idf_weights=None,
encoding="utf-8",
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
**kwargs
):
# Legacy versions of the StringLookup layer set layer dtype to string,
# instead of the output type. If we see this, clear it.
if "dtype" in kwargs and (
kwargs["dtype"] == tf.string or kwargs["dtype"] == "string"
):
del kwargs["dtype"]
self.encoding = encoding
super().__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
vocabulary_dtype=tf.string,
idf_weights=idf_weights,
invert=invert,
output_mode=output_mode,
sparse=sparse,
pad_to_max_tokens=pad_to_max_tokens,
**kwargs
)
def get_config(self):
config = {"encoding": self.encoding}
base_config = super().get_config()
# There is only one valid dtype for strings, so we don't expose this.
del base_config["vocabulary_dtype"]
return dict(list(base_config.items()) + list(config.items()))
# We override this method solely to generate a docstring.
def adapt(self, data, batch_size=None, steps=None):
"""Computes a vocabulary of string terms from tokens in a dataset.
Calling `adapt()` on a `StringLookup` layer is an alternative to passing
in a precomputed vocabulary on construction via the `vocabulary`
argument. A `StringLookup` layer should always be either adapted over a
dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all string tokens
seen in the dataset, sorted by occurrence count, with ties broken by
sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary wil be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
In order to make `StringLookup` efficient in any distribution context,
the vocabulary is kept static with respect to any compiled `tf.Graph`s
that call the layer. As a consequence, if the layer is adapted a second
time, any models using the layer should be re-compiled. For more
information see
`tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.
`adapt()` is meant only as a single machine utility to compute layer
state. To analyze a dataset that cannot fit on a single machine, see
[Tensorflow Transform](
https://www.tensorflow.org/tfx/transform/get_started) for a
multi-machine, map-reduce solution.
Arguments:
data: The data to train on. It can be passed either as a
`tf.data.Dataset`, or as a numpy array.
batch_size: Integer or `None`.
Number of samples per state update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of datasets, generators, or `keras.utils.Sequence` instances
(since they generate batches).
steps: Integer or `None`.
Total number of steps (batches of samples)
When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined. If x is a
`tf.data` dataset, and 'steps' is None, the epoch will run until
the input dataset is exhausted. When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs.
"""
super().adapt(data, batch_size=batch_size, steps=steps)
# Overridden methods from IndexLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
vocabulary = vocabulary.numpy()
return np.array(
[tf.compat.as_text(x, self.encoding) for x in vocabulary]
)
| tf-keras/tf_keras/layers/preprocessing/string_lookup.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/string_lookup.py",
"repo_id": "tf-keras",
"token_count": 7292
} | 219 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for recurrent layers backed by cuDNN."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.rnn.base_rnn import RNN
class _CuDNNRNN(RNN):
"""Private base class for CuDNNGRU and CuDNNLSTM layers.
Args:
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
time_major: Boolean (default False). If true, the inputs and outputs will
be in shape `(timesteps, batch, ...)`, whereas in the False case, it
will be `(batch, timesteps, ...)`.
"""
def __init__(
self,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
**kwargs
):
# We invoke the base layer's initializer directly here because we do not
# want to create RNN cell instance.
super(RNN, self).__init__(**kwargs)
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.time_major = time_major
self.supports_masking = False
self.input_spec = [InputSpec(ndim=3)]
if hasattr(self.cell.state_size, "__len__"):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]
self.constants_spec = None
self._states = None
self._num_constants = 0
self._vector_shape = tf.constant([-1])
def call(self, inputs, mask=None, training=None, initial_state=None):
if isinstance(mask, list):
mask = mask[0]
if mask is not None:
raise ValueError("Masking is not supported for CuDNN RNNs.")
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError(
"Layer has "
+ str(len(self.states))
+ " states but was passed "
+ str(len(initial_state))
+ " initial states."
)
if self.go_backwards:
# Reverse time axis.
inputs = backend.reverse(inputs, 1)
output, states = self._process_batch(inputs, initial_state)
if self.stateful:
updates = [
tf.compat.v1.assign(self_state, state)
for self_state, state in zip(self.states, states)
]
self.add_update(updates)
if self.return_state:
return [output] + states
else:
return output
def get_config(self):
config = {
"return_sequences": self.return_sequences,
"return_state": self.return_state,
"go_backwards": self.go_backwards,
"stateful": self.stateful,
"time_major": self.time_major,
}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def trainable_weights(self):
if self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def non_trainable_weights(self):
if not self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def losses(self):
return super(RNN, self).losses
def get_losses_for(self, inputs=None):
return super(RNN, self).get_losses_for(inputs=inputs)
| tf-keras/tf_keras/layers/rnn/base_cudnn_rnn.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/base_cudnn_rnn.py",
"repo_id": "tf-keras",
"token_count": 2258
} | 220 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mixin holding dropout fields for RNN cells."""
import tensorflow.compat.v2 as tf
from tensorflow.tools.docs import doc_controls
from tf_keras import backend
@doc_controls.do_not_generate_docs
class DropoutRNNCellMixin:
"""Object that hold dropout related fields for RNN Cell.
This class is not a standalone RNN cell. It suppose to be used with a RNN
cell by multiple inheritance. Any cell that mix with class should have
following fields:
dropout: a float number within range [0, 1). The ratio that the input
tensor need to dropout.
recurrent_dropout: a float number within range [0, 1). The ratio that the
recurrent state weights need to dropout.
_random_generator: A backend.RandomGenerator instance, which will be used
to produce outputs based on the inputs and dropout rate.
This object will create and cache created dropout masks, and reuse them for
the incoming data, so that the same mask is used for every batch input.
"""
def __init__(self, *args, **kwargs):
self._create_non_trackable_mask_cache()
super().__init__(*args, **kwargs)
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _create_non_trackable_mask_cache(self):
"""Create the cache for dropout and recurrent dropout mask.
Note that the following two masks will be used in "graph function" mode,
e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`
tensors will be generated differently than in the "graph function" case,
and they will be cached.
Also note that in graph mode, we still cache those masks only because
the RNN could be created with `unroll=True`. In that case, the
`cell.call()` function will be invoked multiple times, and we want to
ensure same mask is used every time.
Also the caches are created without tracking. Since they are not
pickleable by python when deepcopy, we don't want
`layer._obj_reference_counts_dict` to track it by default.
"""
self._dropout_mask_cache = backend.ContextValueCache(
self._create_dropout_mask
)
self._recurrent_dropout_mask_cache = backend.ContextValueCache(
self._create_recurrent_dropout_mask
)
def reset_dropout_mask(self):
"""Reset the cached dropout masks if any.
This is important for the RNN layer to invoke this in it `call()` method
so that the cached mask is cleared before calling the `cell.call()`. The
mask should be cached across the timestep within the same batch, but
shouldn't be cached between batches. Otherwise it will introduce
unreasonable bias against certain index of data within the batch.
"""
self._dropout_mask_cache.clear()
def reset_recurrent_dropout_mask(self):
"""Reset the cached recurrent dropout masks if any.
This is important for the RNN layer to invoke this in it call() method
so that the cached mask is cleared before calling the cell.call(). The
mask should be cached across the timestep within the same batch, but
shouldn't be cached between batches. Otherwise it will introduce
unreasonable bias against certain index of data within the batch.
"""
self._recurrent_dropout_mask_cache.clear()
def _create_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
self._random_generator,
tf.ones_like(inputs),
self.dropout,
training=training,
count=count,
)
def _create_recurrent_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
self._random_generator,
tf.ones_like(inputs),
self.recurrent_dropout,
training=training,
count=count,
)
def get_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will
be ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for
cell that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the recurrent dropout mask for RNN cell.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will
be ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for
cell that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.recurrent_dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)
def __getstate__(self):
# Used for deepcopy. The caching can't be pickled by python, since it
# will contain tensor and graph.
state = super().__getstate__()
state.pop("_dropout_mask_cache", None)
state.pop("_recurrent_dropout_mask_cache", None)
return state
def __setstate__(self, state):
state["_dropout_mask_cache"] = backend.ContextValueCache(
self._create_dropout_mask
)
state["_recurrent_dropout_mask_cache"] = backend.ContextValueCache(
self._create_recurrent_dropout_mask
)
super().__setstate__(state)
def _generate_dropout_mask(generator, ones, rate, training=None, count=1):
def dropped_inputs():
return generator.dropout(ones, rate)
if count > 1:
return [
backend.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return backend.in_train_phase(dropped_inputs, ones, training=training)
| tf-keras/tf_keras/layers/rnn/dropout_rnn_cell_mixin.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/dropout_rnn_cell_mixin.py",
"repo_id": "tf-keras",
"token_count": 2767
} | 221 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
import copy
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.generate(test_combinations.keras_mode_combinations())
class SimpleRNNLayerTest(tf.test.TestCase, parameterized.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={"units": units, "return_sequences": True},
input_shape=(num_samples, timesteps, embedding_dim),
)
@test_utils.run_v2_only
def test_float64_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={
"units": units,
"return_sequences": True,
"dtype": "float64",
},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype="float64",
)
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile("rmsprop", "mse")
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={"units": units, "dropout": 0.1, "recurrent_dropout": 0.1},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
for mode in [0, 1, 2]:
test_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={"units": units, "implementation": mode},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_constraints_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint,
)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_deep_copy_SimpleRNN(self):
cell = keras.layers.SimpleRNNCell(5)
copied_cell = copy.deepcopy(cell)
self.assertEqual(copied_cell.units, 5)
self.assertEqual(cell.get_config(), copied_cell.get_config())
def test_regularizers_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer="l2",
activity_regularizer="l1",
)
layer.build((None, None, 2))
self.assertLen(layer.losses, 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertLen(layer.losses, 4)
else:
self.assertLen(layer.get_losses_for(x), 1)
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps),
)
)
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None
)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units))
)
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_get_initial_states(self):
batch_size = 4
cell = keras.layers.SimpleRNNCell(20)
initial_state = cell.get_initial_state(
batch_size=batch_size, dtype=tf.float32
)
_, state = cell(
np.ones((batch_size, 20), dtype=np.float32), initial_state
)
self.assertEqual(state.shape, initial_state.shape)
@test_utils.run_v2_only
def test_cloned_weight_names(self):
inp = keras.Input([None, 3])
rnn = keras.layers.SimpleRNN(units=3)
model = keras.Model(inp, rnn(inp))
clone = keras.models.clone_model(model)
model_names = [x.name for x in model.weights]
clone_names = [x.name for x in clone.weights]
self.assertEqual(model_names, clone_names)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/rnn/simple_rnn_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/simple_rnn_test.py",
"repo_id": "tf-keras",
"token_count": 4295
} | 222 |
"""The DetermisticRandomTestTool.
(from www.tensorflow.org/guide/migrate/validate_correctness) is a tool used to
make random number generation semantics match between TF1.x graphs/sessions and
eager execution.
"""
import sys
import tensorflow.compat.v2 as tf
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.utils.DeterministicRandomTestTool"])
class DeterministicRandomTestTool(object):
"""DeterministicRandomTestTool is a testing tool.
This tool is used to validate random number generation semantics match
between TF1.x graphs/sessions and eager execution.
This is useful when you are migrating from TF 1.x to TF2 and need to make
sure your computation is still happening correctly along the way. See the
validating correctness migration guide for more info:
https://www.tensorflow.org/guide/migrate/validate_correctness
The following DeterministicRandomTestTool object provides a context manager
scope() that can make stateful random operations use the same seed across
both TF1 graphs/sessions and eager execution,The tool provides two testing
modes:
- constant which uses the same seed for every single operation no matter how
many times it has been called and,
- num_random_ops which uses the number of previously-observed stateful
random operations as the operation seed.
The num_random_ops mode serves as a more sensitive validation check than the
constant mode. It ensures that the random numbers initialization does not
get accidentaly reused.(for example if several weights take on the same
initializations), you can use the num_random_ops mode to avoid this. In the
num_random_ops mode, the generated random numbers will depend on the
ordering of random ops in the program.
This applies both to the stateful random operations used for creating and
initializing variables, and to the stateful random operations used in
computation (such as for dropout layers).
Args:
mode: Set mode to 'constant' or 'num_random_ops'. Defaults to
'constant'.
seed: The random seed to use.
"""
def __init__(self, seed: int = 42, mode="constant"):
if mode not in {"constant", "num_random_ops"}:
raise ValueError(
"Mode arg must be 'constant' or 'num_random_ops'. "
+ f"Got: {mode}"
)
self.seed_implementation = sys.modules[tf.compat.v1.get_seed.__module__]
self._mode = mode
self._seed = seed
self.operation_seed = 0
self._observed_seeds = set()
@property
def operation_seed(self):
return self._operation_seed
@operation_seed.setter
def operation_seed(self, value):
self._operation_seed = value
def scope(self):
"""set random seed."""
tf.random.set_seed(self._seed)
def _get_seed(_):
"""Wraps TF get_seed to make deterministic random generation easier.
This makes a variable's initialization (and calls that involve
random number generation) depend only on how many random number
generations were used in the scope so far, rather than on how many
unrelated operations the graph contains.
Returns:
Random seed tuple.
"""
op_seed = self._operation_seed
if self._mode == "constant":
tf.random.set_seed(op_seed)
else:
if op_seed in self._observed_seeds:
raise ValueError(
"This `DeterministicRandomTestTool` "
"object is trying to re-use the "
+ f"already-used operation seed {op_seed}. "
+ "It cannot guarantee random numbers will match "
+ "between eager and sessions when an operation seed "
+ "is reused. You most likely set "
+ "`operation_seed` explicitly but used a value that "
+ "caused the naturally-incrementing operation seed "
+ "sequences to overlap with an already-used seed."
)
self._observed_seeds.add(op_seed)
self._operation_seed += 1
return (self._seed, op_seed)
# mock.patch internal symbols to modify the behavior of TF APIs relying
# on them
return tf.compat.v1.test.mock.patch.object(
self.seed_implementation, "get_seed", wraps=_get_seed
)
| tf-keras/tf_keras/legacy_tf_layers/migration_utils.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/migration_utils.py",
"repo_id": "tf-keras",
"token_count": 1778
} | 223 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Description:
# Contains the TF-Keras Mixed Precision API (TensorFlow version).
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test")
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") # buildifier: disable=same-origin-load
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
# TODO(scottzhu): Remove these two deps and convert the test to integration test.
"//third_party/tensorflow/python/distribute:__pkg__", # For collective_all_reduce_strategy_test
"//tf_keras:friends",
"//third_party/tensorflow/tools/pip_package:__pkg__",
],
licenses = ["notice"],
)
py_library(
name = "mixed_precision_experimental",
srcs = ["__init__.py"],
srcs_version = "PY3",
deps = [
":loss_scale_optimizer",
":policy",
],
)
py_library(
name = "policy",
srcs = [
"policy.py",
],
srcs_version = "PY3",
deps = [
":device_compatibility_check",
"//:expect_tensorflow_installed",
],
)
tf_py_test(
name = "policy_test",
size = "medium",
srcs = [
"policy_test.py",
],
python_version = "PY3",
srcs_version = "PY3",
tags = ["no_rocm"],
deps = [
":policy",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/testing_infra:test_combinations",
],
)
py_library(
name = "device_compatibility_check",
srcs = ["device_compatibility_check.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
],
)
cuda_py_test(
name = "device_compatibility_check_test",
srcs = ["device_compatibility_check_test.py"],
srcs_version = "PY3",
deps = [
":device_compatibility_check",
"//:expect_tensorflow_installed",
"//tf_keras/testing_infra:test_combinations",
],
)
py_library(
name = "autocast_variable",
srcs = [
"autocast_variable.py",
],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/distribute",
],
)
tf_py_test(
name = "autocast_variable_test",
size = "medium",
srcs = ["autocast_variable_test.py"],
python_version = "PY3",
deps = [
":autocast_variable",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/layers",
"//tf_keras/optimizers/legacy:optimizers",
],
)
py_library(
name = "loss_scale_optimizer",
srcs = ["loss_scale_optimizer.py"],
srcs_version = "PY3",
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/utils:generic_utils",
],
)
cuda_py_test(
name = "loss_scale_optimizer_test",
size = "medium",
srcs = ["loss_scale_optimizer_test.py"],
python_version = "PY3",
shard_count = 4,
deps = [
":loss_scale_optimizer",
":test_util",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
cuda_py_test(
name = "mixed_precision_graph_rewrite_test",
size = "small",
srcs = ["mixed_precision_graph_rewrite_test.py"],
python_version = "PY3",
deps = [
":loss_scale_optimizer",
":policy",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
py_library(
name = "test_util",
srcs = ["test_util.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
cuda_py_test(
name = "layer_test",
size = "medium",
srcs = ["layer_test.py"],
python_version = "PY3",
tags = [
"no_pip",
"no_windows", # b/139083295: bfloat16 tests fail on Windows
],
deps = [
":test_util",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
cuda_py_test(
name = "model_test",
size = "medium",
srcs = ["model_test.py"],
data = [
"//tf_keras/mixed_precision/testdata:lso_ckpt_tf2.2",
"//tf_keras/mixed_precision/testdata:lso_savedmodel_tf2.2",
],
python_version = "PY3",
shard_count = 5,
tags = [
"no_pip",
"no_windows", # b/139083295: bfloat16 tests fail on Windows
],
deps = [
":test_util",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
cuda_py_test(
name = "layer_correctness_test",
size = "medium",
srcs = ["layer_correctness_test.py"],
python_version = "PY3",
shard_count = 10,
tags = [
"no_rocm",
],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
| tf-keras/tf_keras/mixed_precision/BUILD/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/BUILD",
"repo_id": "tf-keras",
"token_count": 2820
} | 224 |
model_checkpoint_path: "ckpt"
all_model_checkpoint_paths: "ckpt"
| tf-keras/tf_keras/mixed_precision/testdata/lso_ckpt_tf2.2/checkpoint/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/testdata/lso_ckpt_tf2.2/checkpoint",
"repo_id": "tf-keras",
"token_count": 27
} | 225 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in optimizer classes.
For more examples see the base class `tf.keras.optimizers.Optimizer`.
"""
# Imports needed for deserialization.
import platform
import warnings
import tensorflow.compat.v2 as tf
from absl import logging
from tf_keras import backend
from tf_keras.optimizers import adadelta
from tf_keras.optimizers import adafactor
from tf_keras.optimizers import adagrad
from tf_keras.optimizers import adam
from tf_keras.optimizers import adamax
from tf_keras.optimizers import adamw
from tf_keras.optimizers import ftrl
from tf_keras.optimizers import lion
from tf_keras.optimizers import nadam
from tf_keras.optimizers import optimizer as base_optimizer
from tf_keras.optimizers import rmsprop
from tf_keras.optimizers import sgd
from tf_keras.optimizers.legacy import adadelta as adadelta_legacy
from tf_keras.optimizers.legacy import adagrad as adagrad_legacy
from tf_keras.optimizers.legacy import adam as adam_legacy
from tf_keras.optimizers.legacy import adamax as adamax_legacy
from tf_keras.optimizers.legacy import ftrl as ftrl_legacy
from tf_keras.optimizers.legacy import (
gradient_descent as gradient_descent_legacy,
)
from tf_keras.optimizers.legacy import nadam as nadam_legacy
from tf_keras.optimizers.legacy import optimizer_v2 as base_optimizer_legacy
from tf_keras.optimizers.legacy import rmsprop as rmsprop_legacy
from tf_keras.optimizers.legacy.adadelta import Adadelta
from tf_keras.optimizers.legacy.adagrad import Adagrad
from tf_keras.optimizers.legacy.adam import Adam
from tf_keras.optimizers.legacy.adamax import Adamax
from tf_keras.optimizers.legacy.ftrl import Ftrl
# Symbols to be accessed under keras.optimizers. To be replaced with
# optimizers v2022 when they graduate out of experimental.
from tf_keras.optimizers.legacy.gradient_descent import SGD
from tf_keras.optimizers.legacy.nadam import Nadam
from tf_keras.optimizers.legacy.rmsprop import RMSprop
from tf_keras.optimizers.optimizer_v1 import Optimizer
from tf_keras.optimizers.optimizer_v1 import TFOptimizer
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.saving.serialization_lib import deserialize_keras_object
from tf_keras.saving.serialization_lib import serialize_keras_object
# isort: off
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=line-too-long
@keras_export("keras.optimizers.serialize")
def serialize(optimizer, use_legacy_format=False):
"""Serialize the optimizer configuration to JSON compatible python dict.
The configuration can be used for persistence and reconstruct the
`Optimizer` instance again.
>>> tf.keras.optimizers.serialize(tf.keras.optimizers.legacy.SGD())
{'module': 'keras.optimizers.legacy', 'class_name': 'SGD', 'config': {'name': 'SGD', 'learning_rate': 0.01, 'decay': 0.0, 'momentum': 0.0, 'nesterov': False}, 'registered_name': None}
""" # noqa: E501
"""
Args:
optimizer: An `Optimizer` instance to serialize.
Returns:
Python dict which contains the configuration of the input optimizer.
"""
if optimizer is None:
return None
if not isinstance(
optimizer,
(
base_optimizer.Optimizer,
Optimizer,
base_optimizer_legacy.OptimizerV2,
),
):
warnings.warn(
"The `keras.optimizers.serialize()` API should only be used for "
"objects of type `keras.optimizers.Optimizer`. Found an instance "
f"of type {type(optimizer)}, which may lead to improper "
"serialization."
)
if use_legacy_format:
return legacy_serialization.serialize_keras_object(optimizer)
return serialize_keras_object(optimizer)
def is_arm_mac():
return platform.system() == "Darwin" and platform.processor() == "arm"
@keras_export("keras.optimizers.deserialize")
def deserialize(config, custom_objects=None, use_legacy_format=False, **kwargs):
"""Inverse of the `serialize` function.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A TF-Keras Optimizer instance.
"""
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from tf_keras.mixed_precision import (
loss_scale_optimizer,
)
use_legacy_optimizer = kwargs.pop("use_legacy_optimizer", False)
if kwargs:
raise TypeError(f"Invalid keyword arguments: {kwargs}")
if len(config["config"]) > 0:
# If the optimizer config is not empty, then we use the value of
# `is_legacy_optimizer` to override `use_legacy_optimizer`. If
# `is_legacy_optimizer` does not exist in config, it means we are
# using the legacy optimzier.
use_legacy_optimizer = config["config"].get("is_legacy_optimizer", True)
if (
tf.__internal__.tf2.enabled()
and tf.executing_eagerly()
and not is_arm_mac()
and not use_legacy_optimizer
):
# We observed a slowdown of optimizer on M1 Mac, so we fall back to the
# legacy optimizer for M1 users now, see b/263339144 for more context.
all_classes = {
"adadelta": adadelta.Adadelta,
"adagrad": adagrad.Adagrad,
"adam": adam.Adam,
"adamw": adamw.AdamW,
"adamax": adamax.Adamax,
"experimentaladadelta": adadelta.Adadelta,
"experimentaladagrad": adagrad.Adagrad,
"experimentaladam": adam.Adam,
"experimentalsgd": sgd.SGD,
"nadam": nadam.Nadam,
"rmsprop": rmsprop.RMSprop,
"sgd": sgd.SGD,
"ftrl": ftrl.Ftrl,
"lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizerV3,
"lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3,
# LossScaleOptimizerV1 was an old version of LSO that was removed.
# Deserializing it turns it into a LossScaleOptimizer
"lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer,
}
else:
all_classes = {
"adadelta": adadelta_legacy.Adadelta,
"adagrad": adagrad_legacy.Adagrad,
"adam": adam_legacy.Adam,
"adamax": adamax_legacy.Adamax,
"experimentaladadelta": adadelta.Adadelta,
"experimentaladagrad": adagrad.Adagrad,
"experimentaladam": adam.Adam,
"experimentalsgd": sgd.SGD,
"nadam": nadam_legacy.Nadam,
"rmsprop": rmsprop_legacy.RMSprop,
"sgd": gradient_descent_legacy.SGD,
"ftrl": ftrl_legacy.Ftrl,
"lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizer,
"lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3,
# LossScaleOptimizerV1 was an old version of LSO that was removed.
# Deserializing it turns it into a LossScaleOptimizer
"lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config["class_name"].lower() in all_classes:
config["class_name"] = config["class_name"].lower()
if use_legacy_format:
return legacy_serialization.deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name="optimizer",
)
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name="optimizer",
)
@keras_export(
"keras.__internal__.optimizers.convert_to_legacy_optimizer", v1=[]
)
def convert_to_legacy_optimizer(optimizer):
"""Convert experimental optimizer to legacy optimizer.
This function takes in a `keras.optimizers.Optimizer`
instance and converts it to the corresponding
`keras.optimizers.legacy.Optimizer` instance.
For example, `keras.optimizers.Adam(...)` to
`keras.optimizers.legacy.Adam(...)`.
Args:
optimizer: An instance of `keras.optimizers.Optimizer`.
"""
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from tf_keras.mixed_precision import (
loss_scale_optimizer,
)
if not isinstance(optimizer, base_optimizer.Optimizer):
raise ValueError(
"`convert_to_legacy_optimizer` should only be called "
"on instances of `tf.keras.optimizers.Optimizer`, but "
f"received {optimizer} of type {type(optimizer)}."
)
optimizer_name = optimizer.__class__.__name__.lower()
config = optimizer.get_config()
# Remove fields that only exist in experimental optimizer.
keys_to_remove = [
"weight_decay",
"use_ema",
"ema_momentum",
"ema_overwrite_frequency",
"jit_compile",
"is_legacy_optimizer",
]
for key in keys_to_remove:
config.pop(key, None)
if isinstance(optimizer, loss_scale_optimizer.LossScaleOptimizerV3):
# For LossScaleOptimizers, recursively convert the inner optimizer
config["inner_optimizer"] = convert_to_legacy_optimizer(
optimizer.inner_optimizer
)
if optimizer_name == "lossscaleoptimizerv3":
optimizer_name = "lossscaleoptimizer"
# Learning rate can be a custom LearningRateSchedule, which is stored as
# a dict in config, and cannot be deserialized.
if hasattr(optimizer, "_learning_rate") and isinstance(
optimizer._learning_rate, learning_rate_schedule.LearningRateSchedule
):
config["learning_rate"] = optimizer._learning_rate
legacy_optimizer_config = {
"class_name": optimizer_name,
"config": config,
}
return deserialize(
legacy_optimizer_config,
use_legacy_optimizer=True,
use_legacy_format=True,
)
@keras_export("keras.optimizers.get")
def get(identifier, **kwargs):
"""Retrieves a TF-Keras Optimizer instance.
Args:
identifier: Optimizer identifier, one of - String: name of an optimizer
- Dictionary: configuration dictionary. - TF-Keras Optimizer instance
(it will be returned unchanged). - TensorFlow Optimizer instance (it
will be wrapped as a TF-Keras Optimizer).
Returns:
A TF-Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
use_legacy_optimizer = kwargs.pop("use_legacy_optimizer", False)
if kwargs:
raise TypeError(f"Invalid keyword arguments: {kwargs}")
if isinstance(
identifier,
(
Optimizer,
base_optimizer_legacy.OptimizerV2,
),
):
return identifier
elif isinstance(identifier, base_optimizer.Optimizer):
if tf.__internal__.tf2.enabled():
return identifier
else:
# If TF2 is disabled, we convert to the legacy
# optimizer.
return convert_to_legacy_optimizer(identifier)
# Wrap legacy TF optimizer instances
elif isinstance(identifier, tf.compat.v1.train.Optimizer):
opt = TFOptimizer(identifier)
backend.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
use_legacy_format = "module" not in identifier
return deserialize(
identifier,
use_legacy_optimizer=use_legacy_optimizer,
use_legacy_format=use_legacy_format,
)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
return get(
config,
use_legacy_optimizer=use_legacy_optimizer,
)
else:
raise ValueError(
f"Could not interpret optimizer identifier: {identifier}"
)
| tf-keras/tf_keras/optimizers/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/__init__.py",
"repo_id": "tf-keras",
"token_count": 5195
} | 226 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adamax optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras import backend_config
from tf_keras.optimizers.legacy import optimizer_v2
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.optimizers.legacy.Adamax",
v1=["keras.optimizers.Adamax", "keras.optimizers.legacy.Adamax"],
)
class Adamax(optimizer_v2.OptimizerV2):
"""Optimizer that implements the Adamax algorithm.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Adamax is sometimes superior to adam, specially in models with embeddings.
Initialization:
```python
m = 0 # Initialize initial 1st moment vector
v = 0 # Initialize the exponentially weighted infinity norm
t = 0 # Initialize timestep
```
The update rule for parameter `w` with gradient `g` is
described at the end of section 7.1 of the paper:
```python
t += 1
m = beta1 * m + (1 - beta) * g
v = max(beta2 * v, abs(g))
current_lr = learning_rate / (1 - beta1 ** t)
w = w - current_lr * m / (v + epsilon)
```
Similarly to `Adam`, the epsilon is added for numerical stability
(especially to get rid of division by zero when `v_t == 0`).
In contrast to `Adam`, the sparse implementation of this algorithm
(used when the gradient is an IndexedSlices object, typically because of
`tf.gather` or an embedding lookup in the forward pass) only updates
variable slices and corresponding `m_t`, `v_t` terms when that part of
the variable was used in the forward pass. This means that the sparse
behavior is contrast to the dense behavior (similar to some momentum
implementations which ignore momentum unless a variable slice was actually
used).
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
name: Optional name for the operations created when applying gradients.
Defaults to `"Adamax"`.
**kwargs: keyword arguments. Allowed arguments are `clipvalue`,
`clipnorm`, `global_clipnorm`.
If `clipvalue` (float) is set, the gradient of each weight
is clipped to be no higher than this value.
If `clipnorm` (float) is set, the gradient of each weight
is individually clipped so that its norm is no higher than this value.
If `global_clipnorm` (float) is set the gradient of all weights is
clipped so that their global norm is no higher than this value.
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
name="Adamax",
**kwargs
):
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta_1)
self._set_hyper("beta_2", beta_2)
self.epsilon = epsilon or backend_config.epsilon()
def _create_slots(self, var_list):
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, "m") # Create slots for the first moments.
for var in var_list:
self.add_slot(var, "v") # Create slots for the second moments.
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_t = tf.identity(self._get_hyper("beta_1", var_dtype))
beta_2_t = tf.identity(self._get_hyper("beta_2", var_dtype))
beta_1_power = tf.pow(beta_1_t, local_step)
lr_t = apply_state[(var_device, var_dtype)]["lr_t"]
apply_state[(var_device, var_dtype)].update(
dict(
neg_scaled_lr=-lr_t / (1 - beta_1_power),
epsilon=tf.convert_to_tensor(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
zero=tf.zeros((), dtype=tf.int64),
)
)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
return tf.raw_ops.ResourceApplyAdaMax(
var=var.handle,
m=m.handle,
v=v.handle,
beta1_power=coefficients["beta_1_power"],
lr=coefficients["lr_t"],
beta1=coefficients["beta_1_t"],
beta2=coefficients["beta_2_t"],
epsilon=coefficients["epsilon"],
grad=grad,
use_locking=self._use_locking,
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_slice = tf.gather(m, indices, axis=coefficients["zero"])
m_t_slice = (
m_slice * coefficients["beta_1_t"]
+ grad * coefficients["one_minus_beta_1_t"]
)
with tf.control_dependencies([m_t_slice]):
m_t = self._resource_scatter_update(m, indices, m_t_slice)
# u_t = max(beta2 * u, abs(g_t))
v = self.get_slot(var, "v")
v_slice = tf.gather(v, indices, axis=coefficients["zero"])
v_t_slice = tf.maximum(v_slice * coefficients["beta_2_t"], tf.abs(grad))
with tf.control_dependencies([v_t_slice]):
v_t = self._resource_scatter_update(v, indices, v_t_slice)
# theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
var_slice = coefficients["neg_scaled_lr"] * (
m_t_slice / (v_t_slice + coefficients["epsilon"])
)
with tf.control_dependencies([var_slice]):
var_update = self._resource_scatter_add(var, indices, var_slice)
return tf.group(*[var_update, m_t, v_t])
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
"learning_rate"
),
"decay": self._initial_decay,
"beta_1": self._serialize_hyperparameter("beta_1"),
"beta_2": self._serialize_hyperparameter("beta_2"),
"epsilon": self.epsilon,
}
)
return config
| tf-keras/tf_keras/optimizers/legacy/adamax.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/adamax.py",
"repo_id": "tf-keras",
"token_count": 3441
} | 227 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Nadam optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers import optimizer
from tf_keras.saving.object_registration import register_keras_serializable
# isort: off
from tensorflow.python.util.tf_export import keras_export
@register_keras_serializable()
@keras_export(
"keras.optimizers.Nadam", "keras.optimizers.experimental.Nadam", v1=[]
)
class Nadam(optimizer.Optimizer):
r"""Optimizer that implements the Nadam algorithm.
Much like Adam is essentially RMSprop with momentum, Nadam is Adam with
Nesterov momentum.
Args:
learning_rate: A `tf.Tensor`, floating point value, a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to `1e-7`.
{{base_optimizer_keyword_args}}
Reference:
- [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=True,
name="Nadam",
**kwargs
):
super().__init__(
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
**kwargs
)
self._learning_rate = self._build_learning_rate(learning_rate)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Nadam optimizer has 2 types of variables: momentums and velocities.
Args:
var_list: list of model variables to build Nadam variables on.
"""
super().build(var_list)
if getattr(self, "_built", False):
return
self._built = True
self._momentums = []
self._velocities = []
self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype)
# Keep a counter on how many times of _u_product has been computed to
# avoid duplicated computations.
self._u_product_counter = 1
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(
model_variable=var, variable_name="m"
)
)
self._velocities.append(
self.add_variable_from_reference(
model_variable=var, variable_name="v"
)
)
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
var_dtype = variable.dtype
lr = tf.cast(self.learning_rate, var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
next_step = tf.cast(self.iterations + 2, var_dtype)
decay = tf.cast(0.96, var_dtype)
beta_1 = tf.cast(self.beta_1, var_dtype)
beta_2 = tf.cast(self.beta_2, var_dtype)
u_t = beta_1 * (1.0 - 0.5 * (tf.pow(decay, local_step)))
u_t_1 = beta_1 * (1.0 - 0.5 * (tf.pow(decay, next_step)))
def get_cached_u_product():
return self._u_product
def compute_new_u_product():
u_product_t = self._u_product * u_t
self._u_product.assign(u_product_t)
self._u_product_counter += 1
return u_product_t
u_product_t = tf.cond(
self._u_product_counter == (self.iterations + 2),
true_fn=get_cached_u_product,
false_fn=compute_new_u_product,
)
u_product_t_1 = u_product_t * u_t_1
beta_2_power = tf.pow(beta_2, local_step)
var_key = self._var_key(variable)
m = self._momentums[self._index_dict[var_key]]
v = self._velocities[self._index_dict[var_key]]
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients.
m.assign_add(-m * (1 - beta_1))
m.scatter_add(
tf.IndexedSlices(
gradient.values * (1 - beta_1), gradient.indices
)
)
v.assign_add(-v * (1 - beta_2))
v.scatter_add(
tf.IndexedSlices(
tf.square(gradient.values) * (1 - beta_2), gradient.indices
)
)
m_hat = u_t_1 * m / (1 - u_product_t_1) + (1 - u_t) * gradient / (
1 - u_product_t
)
v_hat = v / (1 - beta_2_power)
variable.assign_sub((m_hat * lr) / (tf.sqrt(v_hat) + self.epsilon))
else:
# Dense gradients.
m.assign_add((gradient - m) * (1 - beta_1))
v.assign_add((tf.square(gradient) - v) * (1 - beta_2))
m_hat = u_t_1 * m / (1 - u_product_t_1) + (1 - u_t) * gradient / (
1 - u_product_t
)
v_hat = v / (1 - beta_2_power)
variable.assign_sub((m_hat * lr) / (tf.sqrt(v_hat) + self.epsilon))
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
self._learning_rate
),
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Nadam.__doc__ = Nadam.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| tf-keras/tf_keras/optimizers/nadam.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/nadam.py",
"repo_id": "tf-keras",
"token_count": 3559
} | 228 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras Premade Linear models."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import losses
from tf_keras.engine import input_layer
from tf_keras.engine import sequential
from tf_keras.engine import training
from tf_keras.feature_column import dense_features_v2
from tf_keras.layers import core
from tf_keras.optimizers.legacy import gradient_descent
from tf_keras.premade_models import linear
from tf_keras.testing_infra import test_combinations
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class LinearModelTest(test_combinations.TestCase):
def test_linear_model_with_single_input(self):
model = linear.LinearModel()
inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2))
output = 0.3 * inp[:, 0] + 0.2 * inp[:, 1]
model.compile("sgd", "mse", [])
model.fit(inp, output, epochs=5)
self.assertTrue(model.built)
def test_linear_model_with_list_input(self):
model = linear.LinearModel()
input_a = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output = 0.3 * input_a + 0.2 * input_b
model.compile("sgd", "mse", [])
model.fit([input_a, input_b], output, epochs=5)
def test_linear_model_with_mismatched_dict_inputs(self):
model = linear.LinearModel()
input_a = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output = 0.3 * input_a + 0.2 * input_b
model.compile("sgd", "mse", [])
model.build(
{"a": tf.TensorShape([None, 1]), "b": tf.TensorShape([None, 1])}
)
with self.assertRaisesRegex(ValueError, "Missing keys"):
model.fit({"c": input_a, "b": input_b}, output, epochs=5)
def test_linear_model_with_dict_input(self):
model = linear.LinearModel()
input_a = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output = 0.3 * input_a + 0.2 * input_b
model.compile("sgd", "mse", [])
model.fit({"a": input_a, "b": input_b}, output, epochs=5)
def test_linear_model_as_layer(self):
input_a = input_layer.Input(shape=(1,), name="a")
output_a = linear.LinearModel()(input_a)
input_b = input_layer.Input(shape=(1,), name="b")
output_b = core.Dense(units=1)(input_b)
output = output_a + output_b
model = training.Model(inputs=[input_a, input_b], outputs=[output])
input_a_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
input_b_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 1))
output_np = 0.3 * input_a_np + 0.2 * input_b_np
model.compile("sgd", "mse", [])
model.fit([input_a_np, input_b_np], output_np, epochs=5)
def test_linear_model_with_sparse_input(self):
indices = tf.constant([[0, 0], [0, 2], [1, 0], [1, 1]], dtype=tf.int64)
values = tf.constant([0.4, 0.6, 0.8, 0.5])
shape = tf.constant([2, 3], dtype=tf.int64)
model = linear.LinearModel()
inp = tf.SparseTensor(indices, values, shape)
output = model(inp)
self.evaluate(tf.compat.v1.global_variables_initializer())
if tf.executing_eagerly():
weights = model.get_weights()
weights[0] = np.ones((3, 1))
model.set_weights(weights)
output = model(inp)
self.assertAllClose([[1.0], [1.3]], self.evaluate(output))
def test_linear_model_with_sparse_input_and_custom_training(self):
batch_size = 64
indices = []
values = []
target = np.zeros((batch_size, 1))
for i in range(64):
rand_int = np.random.randint(3)
if rand_int == 0:
indices.append((i, 0))
val = np.random.uniform(low=-5.0, high=5.0)
values.append(val)
target[i] = 0.3 * val
elif rand_int == 1:
indices.append((i, 1))
val = np.random.uniform(low=-5.0, high=5.0)
values.append(val)
target[i] = 0.2 * val
else:
indices.append((i, 0))
indices.append((i, 1))
val_1 = np.random.uniform(low=-5.0, high=5.0)
val_2 = np.random.uniform(low=-5.0, high=5.0)
values.append(val_1)
values.append(val_2)
target[i] = 0.3 * val_1 + 0.2 * val_2
indices = np.asarray(indices)
values = np.asarray(values)
shape = tf.constant([batch_size, 2], dtype=tf.int64)
inp = tf.SparseTensor(indices, values, shape)
model = linear.LinearModel(use_bias=False)
opt = gradient_descent.SGD()
for _ in range(20):
with tf.GradientTape() as t:
output = model(inp)
loss = backend.mean(losses.mean_squared_error(target, output))
grads = t.gradient(loss, model.trainable_variables)
grads_and_vars = zip(grads, model.trainable_variables)
opt.apply_gradients(grads_and_vars)
# This test is an example for a regression on categorical inputs, i.e.,
# the output is 0.4, 0.6, 0.9 when input is 'alpha', 'beta', 'gamma'
# separately.
def test_linear_model_with_feature_column(self):
vocab_list = ["alpha", "beta", "gamma"]
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape
)
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="symbol", vocabulary_list=vocab_list
)
ind_column = tf.feature_column.indicator_column(cat_column)
dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer="zeros"
)
combined = sequential.Sequential([dense_feature_layer, linear_model])
opt = gradient_descent.SGD(learning_rate=0.1)
combined.compile(opt, "mse", [])
combined.fit(x={"symbol": data}, y=y, batch_size=32, epochs=10)
self.assertAllClose(
[[0.4], [0.6], [0.9]],
combined.layers[1].dense_layers[0].kernel.numpy(),
atol=0.01,
)
def test_config(self):
linear_model = linear.LinearModel(units=3, use_bias=True)
config = linear_model.get_config()
cloned_linear_model = linear.LinearModel.from_config(config)
self.assertEqual(linear_model.units, cloned_linear_model.units)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/premade_models/linear_test.py/0 | {
"file_path": "tf-keras/tf_keras/premade_models/linear_test.py",
"repo_id": "tf-keras",
"token_count": 3597
} | 229 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras regularizers."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import regularizers
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import np_utils
DATA_DIM = 5
NUM_CLASSES = 2
class KerasRegularizersTest(test_combinations.TestCase, parameterized.TestCase):
def create_model(
self,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
):
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,),
)
)
return model
def regularizer_fn_tensor(x):
return tf.constant(0.0)
def regularizer_fn_scalar(x):
return 0.0
class RegularizerTensor(regularizers.Regularizer):
def __call__(self, x):
return tf.constant(0.0)
class RegularizerScalar(regularizers.Regularizer):
def __call__(self, x):
return 0.0
def get_data(self):
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES,
)
y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
def create_multi_input_model_from(self, layer1, layer2):
input_1 = keras.layers.Input(shape=(DATA_DIM,))
input_2 = keras.layers.Input(shape=(DATA_DIM,))
out1 = layer1(input_1)
out2 = layer2(input_2)
out = keras.layers.Average()([out1, out2])
model = keras.models.Model([input_1, input_2], out)
model.add_loss(keras.backend.mean(out2))
model.add_loss(tf.reduce_sum(input_1))
return model
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
("l1", regularizers.l1()),
("l2", regularizers.l2()),
("l1_l2", regularizers.l1_l2()),
("l2_zero", keras.regularizers.l2(0.0)),
("function_tensor", regularizer_fn_tensor),
("function_scalar", regularizer_fn_scalar),
("lambda_tensor", lambda x: tf.constant(0.0)),
("lambda_scalar", lambda x: 0.0),
("regularizer_base_class", regularizers.Regularizer()),
("regularizer_custom_class_tensor", RegularizerTensor()),
("regularizer_custom_class_scalar", RegularizerScalar()),
]
)
def test_kernel_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(kernel_regularizer=regularizer)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.assertEqual(len(model.losses), 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
("l1", regularizers.l1()),
("l2", regularizers.l2()),
("l1_l2", regularizers.l1_l2()),
("l2_zero", keras.regularizers.l2(0.0)),
("function_tensor", regularizer_fn_tensor),
("function_scalar", regularizer_fn_scalar),
("lambda_tensor", lambda x: tf.constant(0.0)),
("lambda_scalar", lambda x: 0.0),
("regularizer_base_class", regularizers.Regularizer()),
("regularizer_custom_class_tensor", RegularizerTensor()),
("regularizer_custom_class_scalar", RegularizerScalar()),
]
)
def test_bias_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(bias_regularizer=regularizer)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.assertEqual(len(model.losses), 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
("l1", regularizers.l1()),
("l2", regularizers.l2()),
("l1_l2", regularizers.l1_l2()),
("l2_zero", keras.regularizers.l2(0.0)),
("function_tensor", regularizer_fn_tensor),
("function_scalar", regularizer_fn_scalar),
("lambda_tensor", lambda x: tf.constant(0.0)),
("lambda_scalar", lambda x: 0.0),
("regularizer_base_class", regularizers.Regularizer()),
("regularizer_custom_class_tensor", RegularizerTensor()),
("regularizer_custom_class_scalar", RegularizerScalar()),
]
)
def test_activity_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(activity_regularizer=regularizer)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.assertEqual(len(model.losses), 1 if tf.executing_eagerly() else 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types
def test_zero_regularization(self):
# Verifies that training with zero regularization works.
x, y = np.ones((10, 10)), np.ones((10, 3))
model = test_utils.get_model_from_layers(
[
keras.layers.Dense(
3, kernel_regularizer=keras.regularizers.l2(0)
)
],
input_shape=(10,),
)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
model.fit(x, y, batch_size=5, epochs=1)
def test_custom_regularizer_saving(self):
def my_regularizer(weights):
return tf.reduce_sum(tf.abs(weights))
inputs = keras.Input((10,))
outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(
inputs
)
model = keras.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(),
custom_objects={"my_regularizer": my_regularizer},
)
self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
("l1", regularizers.l1()),
("l2", regularizers.l2()),
("l1_l2", regularizers.l1_l2()),
]
)
def test_regularization_shared_layer(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer,
)
model = self.create_multi_input_model_from(dense_layer, dense_layer)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.assertLen(model.losses, 5)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
("l1", regularizers.l1()),
("l2", regularizers.l2()),
("l1_l2", regularizers.l1_l2()),
]
)
def test_regularization_shared_model(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer,
)
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
dummy_model = keras.models.Model(
input_tensor, dense_layer(input_tensor)
)
model = self.create_multi_input_model_from(dummy_model, dummy_model)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.assertLen(model.losses, 6)
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
[
("l1", regularizers.l1()),
("l2", regularizers.l2()),
("l1_l2", regularizers.l1_l2()),
]
)
def test_regularization_shared_layer_in_different_models(self, regularizer):
shared_dense = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer,
)
models = []
for _ in range(2):
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
unshared_dense = keras.layers.Dense(
NUM_CLASSES, kernel_regularizer=regularizer
)
out = unshared_dense(shared_dense(input_tensor))
models.append(keras.models.Model(input_tensor, out))
model = self.create_multi_input_model_from(
layer1=models[0], layer2=models[1]
)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
# We expect to see 9 losses on the model:
# - 2 from the 2 add_loss calls on the outer model.
# - 3 from the weight regularizers on the shared_dense layer,
# unshared_dense in inner model 1, unshared_dense in inner model 2.
# - 4 from activity regularizers on the shared_dense layer.
self.assertLen(model.losses, 9)
def test_deserialization_error(self):
with self.assertRaisesRegex(
ValueError, "Could not interpret regularizer"
):
keras.regularizers.get(0)
@parameterized.named_parameters(
[
("l1", regularizers.l1(l1=None), 0.01),
("l2", regularizers.l2(l2=None), 0.01),
("l1_l2", regularizers.l1_l2(l1=None, l2=None), 0.0),
]
)
def test_default_value_when_init_with_none(
self, regularizer, expected_value
):
expected_value = np.asarray(expected_value)
if hasattr(regularizer, "l1"):
self.assertAllClose(regularizer.l1, expected_value)
if hasattr(regularizer, "l2"):
self.assertAllClose(regularizer.l2, expected_value)
@test_utils.run_v2_only
def test_orthogonal_regularizer(self):
# Test correctness.
factor = 0.1
reg_rows = regularizers.OrthogonalRegularizer(
factor=factor, mode="rows"
)
reg_cols = regularizers.OrthogonalRegularizer(
factor=factor, mode="columns"
)
# Test with square matrix
inputs = tf.constant(
[[1, 1, 1, 1], [2, 0, 0, 0], [0, 0, 3, 1]], dtype="float32"
)
normalized_rows = tf.math.l2_normalize(inputs, axis=1)
normalized_cols = tf.math.l2_normalize(inputs, axis=0)
rows_pairs = [
tf.reduce_sum(normalized_rows[0] * normalized_rows[1]),
tf.reduce_sum(normalized_rows[0] * normalized_rows[2]),
tf.reduce_sum(normalized_rows[1] * normalized_rows[2]),
]
col_pairs = [
tf.reduce_sum(normalized_cols[:, 0] * normalized_cols[:, 1]),
tf.reduce_sum(normalized_cols[:, 0] * normalized_cols[:, 2]),
tf.reduce_sum(normalized_cols[:, 0] * normalized_cols[:, 3]),
tf.reduce_sum(normalized_cols[:, 1] * normalized_cols[:, 2]),
tf.reduce_sum(normalized_cols[:, 1] * normalized_cols[:, 3]),
tf.reduce_sum(normalized_cols[:, 2] * normalized_cols[:, 3]),
]
num_row_pairs = 3
num_col_pairs = 6
# Expected: factor * sum(pairwise_dot_products_of_rows) / num_row_pairs
self.assertAllClose(
reg_rows(inputs), factor * sum(rows_pairs) / num_row_pairs
)
# Expected: factor * sum(pairwise_dot_products_of_columns) /
# num_col_pairs
self.assertAllClose(
reg_cols(inputs), factor * sum(col_pairs) / num_col_pairs
)
# Test incorrect usage.
with self.assertRaisesRegex(ValueError, "must have rank 2"):
reg_rows(tf.constant([1, 1], dtype="float32"))
# Test serialization
self.assertDictEqual(
reg_cols.get_config(), {"factor": factor, "mode": "columns"}
)
# Test usage in model.
model_inputs = keras.Input((3,))
model_outputs = keras.layers.Dense(4, kernel_regularizer=reg_rows)(
model_inputs
)
model = keras.Model(model_inputs, model_outputs)
model.compile(optimizer="rmsprop", loss="mse")
model.fit(
np.random.random((16, 3)), np.random.random((16, 4)), epochs=1
)
# Test serialization and deserialiation as part of model.
inputs = tf.constant([[1, 1, 1], [2, 0, 0], [0, 0, 3]], dtype="float32")
outputs = model(inputs)
config = model.get_config()
weights = model.get_weights()
model = keras.Model.from_config(config)
model.set_weights(weights)
self.assertAllClose(model(inputs), outputs, atol=1e-5)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/regularizers_test.py/0 | {
"file_path": "tf-keras/tf_keras/regularizers_test.py",
"repo_id": "tf-keras",
"token_count": 6897
} | 230 |
"""A binary that creates a serialized SavedModel from a keras model.
This is used in tests to ensure that model serialization is deterministic across
different processes.
"""
import tensorflow.compat.v2 as tf
from absl import app
from absl import flags
from tf_keras import regularizers
from tf_keras.testing_infra import test_utils
flags.DEFINE_string("output_path", "", "The path to write the SavedModel at.")
FLAGS = flags.FLAGS
def main(_) -> None:
with test_utils.model_type_scope("functional"):
model = test_utils.get_small_mlp(1, 4, input_dim=3)
model.layers[-1].activity_regularizer = regularizers.get("l2")
model.activity_regularizer = regularizers.get("l2")
model.compile(loss="mse", optimizer="rmsprop")
def callable_loss():
return tf.reduce_sum(model.weights[0])
model.add_loss(callable_loss)
print(f"_____Writing saved model to: {FLAGS.output_path}")
model.save(FLAGS.output_path)
if __name__ == "__main__":
app.run(main)
| tf-keras/tf_keras/saving/legacy/saved_model/create_test_saved_model.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/create_test_saved_model.py",
"repo_id": "tf-keras",
"token_count": 389
} | 231 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils related to keras model saving."""
import copy
import os
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras import backend
from tf_keras import losses
from tf_keras import optimizers
from tf_keras.engine import base_layer_utils
from tf_keras.optimizers import optimizer_v1
from tf_keras.saving.legacy import serialization
from tf_keras.utils import version_utils
from tf_keras.utils.io_utils import ask_to_proceed_with_overwrite
# isort: off
from tensorflow.python.platform import tf_logging as logging
def extract_model_metrics(model):
"""Convert metrics from a TF-Keras model `compile` API to dictionary.
This is used for converting TF-Keras models to Estimators and SavedModels.
Args:
model: A `tf.keras.Model` object.
Returns:
Dictionary mapping metric names to metric instances. May return `None` if
the model does not contain any metrics.
"""
if getattr(model, "_compile_metrics", None):
# TODO(psv/kathywu): use this implementation in model to estimator flow.
# We are not using model.metrics here because we want to exclude the
# metrics added using `add_metric` API.
return {m.name: m for m in model._compile_metric_functions}
return None
def model_call_inputs(model, keep_original_batch_size=False):
"""Inspect model to get its input signature.
The model's input signature is a list with a single (possibly-nested)
object. This is due to the Keras-enforced restriction that tensor inputs
must be passed in as the first argument.
For example, a model with input {'feature1': <Tensor>, 'feature2': <Tensor>}
will have input signature:
[{'feature1': TensorSpec, 'feature2': TensorSpec}]
Args:
model: TF-Keras Model object.
keep_original_batch_size: A boolean indicating whether we want to keep
using the original batch size or set it to None. Default is `False`,
which means that the batch dim of the returned input signature will
always be set to `None`.
Returns:
A tuple containing `(args, kwargs)` TensorSpecs of the model call function
inputs.
`kwargs` does not contain the `training` argument.
"""
input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)
if input_specs is None:
return None, None
input_specs = _enforce_names_consistency(input_specs)
return input_specs
def raise_model_input_error(model):
if isinstance(model, keras.models.Sequential):
raise ValueError(
f"Model {model} cannot be saved because the input shape is not "
"available. Please specify an input shape either by calling "
"`build(input_shape)` directly, or by calling the model on actual "
"data using `Model()`, `Model.fit()`, or `Model.predict()`."
)
# If the model is not a `Sequential`, it is intended to be a subclassed
# model.
raise ValueError(
f"Model {model} cannot be saved either because the input shape is not "
"available or because the forward pass of the model is not defined."
"To define a forward pass, please override `Model.call()`. To specify "
"an input shape, either call `build(input_shape)` directly, or call "
"the model on actual data using `Model()`, `Model.fit()`, or "
"`Model.predict()`. If you have a custom training step, please make "
"sure to invoke the forward pass in train step through "
"`Model.__call__`, i.e. `model(inputs)`, as opposed to `model.call()`."
)
def trace_model_call(model, input_signature=None):
"""Trace the model call to create a tf.function for exporting a TF-Keras
model.
Args:
model: A TF-Keras model.
input_signature: optional, a list of tf.TensorSpec objects specifying the
inputs to the model.
Returns:
A tf.function wrapping the model's call function with input signatures
set.
Raises:
ValueError: if input signature cannot be inferred from the model.
"""
if input_signature is None:
if isinstance(model.call, tf.__internal__.function.Function):
input_signature = model.call.input_signature
if input_signature:
model_args = input_signature
model_kwargs = {}
else:
model_args, model_kwargs = model_call_inputs(model)
if model_args is None:
raise_model_input_error(model)
@tf.function
def _wrapped_model(*args, **kwargs):
"""A concrete tf.function that wraps the model's call function."""
(args, kwargs,) = model._call_spec.set_arg_value(
"training", False, args, kwargs, inputs_in_args=True
)
with base_layer_utils.call_context().enter(
model, inputs=None, build_graph=False, training=False, saving=True
):
outputs = model(*args, **kwargs)
# Outputs always has to be a flat dict.
output_names = model.output_names # Functional Model.
if output_names is None: # Subclassed Model.
from tf_keras.engine import compile_utils
output_names = compile_utils.create_pseudo_output_names(outputs)
outputs = tf.nest.flatten(outputs)
return {name: output for name, output in zip(output_names, outputs)}
return _wrapped_model.get_concrete_function(*model_args, **model_kwargs)
def model_metadata(model, include_optimizer=True, require_config=True):
"""Returns a dictionary containing the model metadata."""
from tf_keras import __version__ as keras_version
from tf_keras.optimizers.legacy import optimizer_v2
model_config = {"class_name": model.__class__.__name__}
try:
model_config["config"] = model.get_config()
except NotImplementedError as e:
if require_config:
raise e
metadata = dict(
keras_version=str(keras_version),
backend=backend.backend(),
model_config=model_config,
)
if model.optimizer and include_optimizer:
if isinstance(model.optimizer, optimizer_v1.TFOptimizer):
logging.warning(
"TensorFlow optimizers do not "
"make it possible to access "
"optimizer attributes or optimizer state "
"after instantiation. "
"As a result, we cannot save the optimizer "
"as part of the model save file. "
"You will have to compile your model again after loading it. "
"Prefer using a TF-Keras optimizer instead "
"(see keras.io/optimizers)."
)
elif model._compile_was_called:
training_config = model._get_compile_args(user_metrics=False)
training_config.pop("optimizer", None) # Handled separately.
metadata["training_config"] = _serialize_nested_config(
training_config
)
if isinstance(model.optimizer, optimizer_v2.RestoredOptimizer):
raise NotImplementedError(
"Optimizers loaded from a SavedModel cannot be saved. "
"If you are calling `model.save` or "
"`tf.keras.models.save_model`, "
"please set the `include_optimizer` option to `False`. For "
"`tf.saved_model.save`, "
"delete the optimizer from the model."
)
else:
optimizer_config = {
"class_name": keras.utils.get_registered_name(
model.optimizer.__class__
),
"config": model.optimizer.get_config(),
}
metadata["training_config"]["optimizer_config"] = optimizer_config
return metadata
def should_overwrite(filepath, overwrite):
"""Returns whether the filepath should be overwritten."""
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
return ask_to_proceed_with_overwrite(filepath)
return True
def compile_args_from_training_config(training_config, custom_objects=None):
"""Return model.compile arguments from training config."""
if custom_objects is None:
custom_objects = {}
with keras.utils.CustomObjectScope(custom_objects):
optimizer_config = training_config["optimizer_config"]
optimizer = optimizers.deserialize(optimizer_config)
# Recover losses.
loss = None
loss_config = training_config.get("loss", None)
if loss_config is not None:
loss = _deserialize_nested_config(losses.deserialize, loss_config)
# Recover metrics.
metrics = None
metrics_config = training_config.get("metrics", None)
if metrics_config is not None:
metrics = _deserialize_nested_config(
_deserialize_metric, metrics_config
)
# Recover weighted metrics.
weighted_metrics = None
weighted_metrics_config = training_config.get("weighted_metrics", None)
if weighted_metrics_config is not None:
weighted_metrics = _deserialize_nested_config(
_deserialize_metric, weighted_metrics_config
)
sample_weight_mode = (
training_config["sample_weight_mode"]
if hasattr(training_config, "sample_weight_mode")
else None
)
loss_weights = training_config["loss_weights"]
return dict(
optimizer=optimizer,
loss=loss,
metrics=metrics,
weighted_metrics=weighted_metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode,
)
def _deserialize_nested_config(deserialize_fn, config):
"""Deserializes arbitrary TF-Keras `config` using `deserialize_fn`."""
def _is_single_object(obj):
if isinstance(obj, dict) and "class_name" in obj:
return True # Serialized TF-Keras object.
if isinstance(obj, str):
return True # Serialized function or string.
return False
if config is None:
return None
if _is_single_object(config):
return deserialize_fn(config)
elif isinstance(config, dict):
return {
k: _deserialize_nested_config(deserialize_fn, v)
for k, v in config.items()
}
elif isinstance(config, (tuple, list)):
return [
_deserialize_nested_config(deserialize_fn, obj) for obj in config
]
raise ValueError(
"Saved configuration not understood. Configuration should be a "
f"dictionary, string, tuple or list. Received: config={config}."
)
def _serialize_nested_config(config):
"""Serialized a nested structure of TF-Keras objects."""
def _serialize_fn(obj):
if callable(obj):
return serialization.serialize_keras_object(obj)
return obj
return tf.nest.map_structure(_serialize_fn, config)
def _deserialize_metric(metric_config):
"""Deserialize metrics, leaving special strings untouched."""
from tf_keras import metrics as metrics_module
if metric_config in ["accuracy", "acc", "crossentropy", "ce"]:
# Do not deserialize accuracy and cross-entropy strings as we have
# special case handling for these in compile, based on model output
# shape.
return metric_config
return metrics_module.deserialize(metric_config)
def _enforce_names_consistency(specs):
"""Enforces that either all specs have names or none do."""
def _has_name(spec):
return spec is None or (hasattr(spec, "name") and spec.name is not None)
def _clear_name(spec):
spec = copy.deepcopy(spec)
if hasattr(spec, "name"):
spec._name = None
return spec
flat_specs = tf.nest.flatten(specs)
name_inconsistency = any(_has_name(s) for s in flat_specs) and not all(
_has_name(s) for s in flat_specs
)
if name_inconsistency:
specs = tf.nest.map_structure(_clear_name, specs)
return specs
def try_build_compiled_arguments(model):
if (
not version_utils.is_v1_layer_or_model(model)
and model.outputs is not None
):
try:
if not model.compiled_loss.built:
model.compiled_loss.build(model.outputs)
if not model.compiled_metrics.built:
model.compiled_metrics.build(model.outputs, model.outputs)
except: # noqa: E722
logging.warning(
"Compiled the loaded model, but the compiled metrics have "
"yet to be built. `model.compile_metrics` will be empty "
"until you train or evaluate the model."
)
def is_hdf5_filepath(filepath):
return (
filepath.endswith(".h5")
or filepath.endswith(".keras")
or filepath.endswith(".hdf5")
)
| tf-keras/tf_keras/saving/legacy/saving_utils.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saving_utils.py",
"repo_id": "tf-keras",
"token_count": 5500
} | 232 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing TF-Keras."""
import collections
import functools
import itertools
import unittest
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.testing_infra import test_utils
try:
import h5py
except ImportError:
h5py = None
KERAS_MODEL_TYPES = ["functional", "subclass", "sequential"]
class TestCase(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
keras.backend.clear_session()
super().tearDown()
def run_with_all_saved_model_formats(test_or_class=None, exclude_formats=None):
"""Execute the decorated test with all TF-Keras saved model formats).
This decorator is intended to be applied either to individual test methods
in a `test_combinations.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test method
(or all test methods in the class) to be executed multiple times - once for
each TF-Keras saved model format.
The TF-Keras saved model formats include:
1. HDF5: 'h5'
2. SavedModel: 'tf'
Note: if stacking this decorator with absl.testing's parameterized
decorators, those should be at the bottom of the stack.
Various methods in `testing_utils` to get file path for saved models will
auto-generate a string of the two saved model formats. This allows unittests
to confirm the equivalence between the two TF-Keras saved model formats.
For example, consider the following unittest:
```python
class MyTests(test_utils.KerasTestCase):
@test_utils.run_with_all_saved_model_formats
def test_foo(self):
save_format = test_utils.get_save_format()
saved_model_dir = '/tmp/saved_model/'
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if __name__ == "__main__":
tf.test.main()
```
This test tries to save the model into the formats of 'hdf5', 'h5', 'keras',
'tensorflow', and 'tf'.
We can also annotate the whole class if we want this to apply to all tests
in the class:
```python
@test_utils.run_with_all_saved_model_formats
class MyTests(test_utils.KerasTestCase):
def test_foo(self):
save_format = test_utils.get_save_format()
saved_model_dir = '/tmp/saved_model/'
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = tf.keras.models.load_model(saved_model_dir)
if __name__ == "__main__":
tf.test.main()
```
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
exclude_formats: A collection of TF-Keras saved model formats to not run.
(May also be a single format not wrapped in a collection).
Defaults to `None`.
Returns:
Returns a decorator that will run the decorated test method multiple
times: once for each desired TF-Keras saved model format.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
# Exclude h5 save format if H5py isn't available.
if h5py is None:
exclude_formats.append(["h5"])
saved_model_formats = ["h5", "tf", "tf_no_traces"]
params = [
(f"_{saved_format}", saved_format)
for saved_format in saved_model_formats
if saved_format not in tf.nest.flatten(exclude_formats)
]
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command
# line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, saved_format, *args, **kwargs):
"""A run of a single test case w/ the specified model type."""
if saved_format == "h5":
_test_h5_saved_model_format(f, self, *args, **kwargs)
elif saved_format == "tf":
_test_tf_saved_model_format(f, self, *args, **kwargs)
elif saved_format == "tf_no_traces":
_test_tf_saved_model_format_no_traces(f, self, *args, **kwargs)
else:
raise ValueError(f"Unknown model type: {saved_format}")
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _test_h5_saved_model_format(f, test_or_class, *args, **kwargs):
with test_utils.saved_model_format_scope("h5"):
f(test_or_class, *args, **kwargs)
def _test_tf_saved_model_format(f, test_or_class, *args, **kwargs):
with test_utils.saved_model_format_scope("tf"):
f(test_or_class, *args, **kwargs)
def _test_tf_saved_model_format_no_traces(f, test_or_class, *args, **kwargs):
with test_utils.saved_model_format_scope("tf", save_traces=False):
f(test_or_class, *args, **kwargs)
def run_with_all_weight_formats(test_or_class=None, exclude_formats=None):
"""Runs all tests with the supported formats for saving weights."""
exclude_formats = exclude_formats or []
exclude_formats.append("tf_no_traces") # Only applies to saving models
return run_with_all_saved_model_formats(test_or_class, exclude_formats)
# TODO(kaftan): Possibly enable 'subclass_custom_build' when tests begin to pass
# it. Or perhaps make 'subclass' always use a custom build method.
def run_with_all_model_types(test_or_class=None, exclude_models=None):
"""Execute the decorated test with all TF-Keras model types.
This decorator is intended to be applied either to individual test methods
in a `test_combinations.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test method
(or all test methods in the class) to be executed multiple times - once for
each TF-Keras model type.
The TF-Keras model types are: ['functional', 'subclass', 'sequential']
Note: if stacking this decorator with absl.testing's parameterized
decorators, those should be at the bottom of the stack.
Various methods in `testing_utils` to get models will auto-generate a model
of the currently active TF-Keras model type. This allows unittests to
confirm the equivalence between different TF-Keras models.
For example, consider the following unittest:
```python
class MyTests(test_utils.KerasTestCase):
@test_utils.run_with_all_model_types(
exclude_models = ['sequential'])
def test_foo(self):
model = test_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test tries building a small mlp as both a functional model and as a
subclass model.
We can also annotate the whole class if we want this to apply to all tests
in the class:
```python
@test_utils.run_with_all_model_types(exclude_models = ['sequential'])
class MyTests(test_utils.KerasTestCase):
def test_foo(self):
model = test_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
exclude_models: A collection of TF-Keras model types to not run.
(May also be a single model type not wrapped in a collection).
Defaults to `None`.
Returns:
Returns a decorator that will run the decorated test method multiple
times: once for each desired TF-Keras model type.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
model_types = ["functional", "subclass", "sequential"]
params = [
(f"_{model}", model)
for model in model_types
if model not in tf.nest.flatten(exclude_models)
]
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command
# line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, model_type, *args, **kwargs):
"""A run of a single test case w/ the specified model type."""
if model_type == "functional":
_test_functional_model_type(f, self, *args, **kwargs)
elif model_type == "subclass":
_test_subclass_model_type(f, self, *args, **kwargs)
elif model_type == "sequential":
_test_sequential_model_type(f, self, *args, **kwargs)
else:
raise ValueError(f"Unknown model type: {model_type}")
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _test_functional_model_type(f, test_or_class, *args, **kwargs):
with test_utils.model_type_scope("functional"):
f(test_or_class, *args, **kwargs)
def _test_subclass_model_type(f, test_or_class, *args, **kwargs):
with test_utils.model_type_scope("subclass"):
f(test_or_class, *args, **kwargs)
def _test_sequential_model_type(f, test_or_class, *args, **kwargs):
with test_utils.model_type_scope("sequential"):
f(test_or_class, *args, **kwargs)
def run_all_keras_modes(
test_or_class=None,
config=None,
always_skip_v1=False,
always_skip_eager=False,
**kwargs,
):
"""Execute the decorated test with all keras execution modes.
This decorator is intended to be applied either to individual test methods
in a `test_combinations.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test method
(or all test methods in the class) to be executed multiple times - once
executing in legacy graph mode, once running eagerly and with
`should_run_eagerly` returning True, and once running eagerly with
`should_run_eagerly` returning False.
If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and
the test will only run twice.
Note: if stacking this decorator with absl.testing's parameterized
decorators, those should be at the bottom of the stack.
For example, consider the following unittest:
```python
class MyTests(test_utils.KerasTestCase):
@test_utils.run_all_keras_modes
def test_foo(self):
model = test_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer, loss, metrics=metrics,
run_eagerly=test_utils.should_run_eagerly())
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test will try compiling & fitting the small functional mlp using all
three TF-Keras execution modes.
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
always_skip_v1: If True, does not try running the legacy graph mode even
when Tensorflow v2 behavior is not enabled.
always_skip_eager: If True, does not execute the decorated test
with eager execution modes.
**kwargs: Additional kwargs for configuring tests for
in-progress TF-Keras behaviors/ refactorings that we haven't fully
rolled out yet
Returns:
Returns a decorator that will run the decorated test method multiple
times.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
if kwargs:
raise ValueError(f"Unrecognized keyword args: {kwargs}")
params = [("_v2_function", "v2_function")]
if not always_skip_eager:
params.append(("_v2_eager", "v2_eager"))
if not (always_skip_v1 or tf.__internal__.tf2.enabled()):
params.append(("_v1_session", "v1_session"))
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command
# line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
"""A run of a single test case w/ specified run mode."""
if run_mode == "v1_session":
_v1_session_test(f, self, config, *args, **kwargs)
elif run_mode == "v2_eager":
_v2_eager_test(f, self, *args, **kwargs)
elif run_mode == "v2_function":
_v2_function_test(f, self, *args, **kwargs)
else:
return ValueError(f"Unknown run mode {run_mode}")
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _v1_session_test(f, test_or_class, config, *args, **kwargs):
with tf.compat.v1.get_default_graph().as_default():
with test_utils.run_eagerly_scope(False):
with test_or_class.test_session(config=config):
f(test_or_class, *args, **kwargs)
def _v2_eager_test(f, test_or_class, *args, **kwargs):
with tf.__internal__.eager_context.eager_mode():
with test_utils.run_eagerly_scope(True):
f(test_or_class, *args, **kwargs)
def _v2_function_test(f, test_or_class, *args, **kwargs):
with tf.__internal__.eager_context.eager_mode():
with test_utils.run_eagerly_scope(False):
f(test_or_class, *args, **kwargs)
def _test_or_class_decorator(test_or_class, single_method_decorator):
"""Decorate a test or class with a decorator intended for one method.
If the test_or_class is a class:
This will apply the decorator to all test methods in the class.
If the test_or_class is an iterable of already-parameterized test cases:
This will apply the decorator to all the cases, and then flatten the
resulting cross-product of test cases. This allows stacking the Keras
parameterized decorators w/ each other, and to apply them to test methods
that have already been marked with an absl parameterized decorator.
Otherwise, treat the obj as a single method and apply the decorator
directly.
Args:
test_or_class: A test method (that may have already been decorated with a
parameterized decorator, or a test class that extends
test_combinations.TestCase
single_method_decorator:
A parameterized decorator intended for a single test method.
Returns:
The decorated result.
"""
def _decorate_test_or_class(obj):
if isinstance(obj, collections.abc.Iterable):
return itertools.chain.from_iterable(
single_method_decorator(method) for method in obj
)
if isinstance(obj, type):
cls = obj
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix
):
setattr(cls, name, single_method_decorator(value))
cls = type(cls).__new__(
type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy()
)
return cls
return single_method_decorator(obj)
if test_or_class is not None:
return _decorate_test_or_class(test_or_class)
return _decorate_test_or_class
def keras_mode_combinations(mode=None, run_eagerly=None):
"""Returns the default test combinations for tf.keras tests.
Note that if tf2 is enabled, then v1 session test will be skipped.
Args:
mode: List of modes to run the tests. The valid options are 'graph' and
'eager'. If None, uses ['graph', 'eager']. If an empty
list is provided, then the test will run under the context based on
tensorflow's version, e.g., graph for v1 and eager for v2. Defaults to
`None`.
run_eagerly: List of `run_eagerly` value to be run with the tests.
When None, uses [True, False]. Note that for `graph` mode,
run_eagerly value will only be False. Defaults to `None`.
Returns:
A list contains all the combinations to be used to generate test cases.
"""
if mode is None:
mode = (
["eager"] if tf.__internal__.tf2.enabled() else ["graph", "eager"]
)
if run_eagerly is None:
run_eagerly = [True, False]
result = []
if "eager" in mode:
result += tf.__internal__.test.combinations.combine(
mode=["eager"], run_eagerly=run_eagerly
)
if "graph" in mode:
result += tf.__internal__.test.combinations.combine(
mode=["graph"], run_eagerly=[False]
)
return result
def keras_model_type_combinations():
return tf.__internal__.test.combinations.combine(
model_type=KERAS_MODEL_TYPES
)
class KerasModeCombination(tf.__internal__.test.combinations.TestCombination):
"""Combination for TF-Keras test mode.
It by default includes v1_session, v2_eager and v2_tf_function.
"""
def context_managers(self, kwargs):
run_eagerly = kwargs.pop("run_eagerly", None)
if run_eagerly is not None:
return [test_utils.run_eagerly_scope(run_eagerly)]
else:
return []
def parameter_modifiers(self):
return [
tf.__internal__.test.combinations.OptionalParameter("run_eagerly")
]
class KerasModelTypeCombination(
tf.__internal__.test.combinations.TestCombination
):
"""Combination for TF-Keras model types when doing model test.
It by default includes 'functional', 'subclass', 'sequential'.
Various methods in `testing_utils` to get models will auto-generate a model
of the currently active TF-Keras model type. This allows unittests to
confirm the equivalence between different TF-Keras models.
"""
def context_managers(self, kwargs):
model_type = kwargs.pop("model_type", None)
if model_type in KERAS_MODEL_TYPES:
return [test_utils.model_type_scope(model_type)]
else:
return []
def parameter_modifiers(self):
return [
tf.__internal__.test.combinations.OptionalParameter("model_type")
]
_defaults = tf.__internal__.test.combinations.generate.keywords[
"test_combinations"
]
generate = functools.partial(
tf.__internal__.test.combinations.generate,
test_combinations=_defaults
+ (KerasModeCombination(), KerasModelTypeCombination()),
)
combine = tf.__internal__.test.combinations.combine
times = tf.__internal__.test.combinations.times
NamedObject = tf.__internal__.test.combinations.NamedObject
| tf-keras/tf_keras/testing_infra/test_combinations.py/0 | {
"file_path": "tf-keras/tf_keras/testing_infra/test_combinations.py",
"repo_id": "tf-keras",
"token_count": 8580
} | 233 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving/loading function for keras Model."""
import collections
import tf_keras as keras
# Declaring namedtuple()
ModelFn = collections.namedtuple(
"ModelFn", ["model", "input_shape", "target_shape"]
)
def basic_sequential():
"""Basic sequential model."""
model = keras.Sequential(
[
keras.layers.Dense(3, activation="relu", input_shape=(3,)),
keras.layers.Dense(2, activation="softmax"),
]
)
return ModelFn(model, (None, 3), (None, 2))
def basic_sequential_deferred():
"""Sequential model with deferred input shape."""
model = keras.Sequential(
[
keras.layers.Dense(3, activation="relu"),
keras.layers.Dense(2, activation="softmax"),
]
)
return ModelFn(model, (None, 3), (None, 2))
def stacked_rnn():
"""Stacked RNN model."""
inputs = keras.Input((None, 3))
layer = keras.layers.RNN([keras.layers.LSTMCell(2) for _ in range(3)])
x = layer(inputs)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 4, 3), (None, 2))
def lstm():
"""LSTM model."""
inputs = keras.Input((None, 3))
x = keras.layers.LSTM(4, return_sequences=True)(inputs)
x = keras.layers.LSTM(3, return_sequences=True)(x)
x = keras.layers.LSTM(2, return_sequences=False)(x)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 4, 3), (None, 2))
def multi_input_multi_output():
"""Multi-input Multi-output model."""
body_input = keras.Input(shape=(None,), name="body")
tags_input = keras.Input(shape=(2,), name="tags")
x = keras.layers.Embedding(10, 4)(body_input)
body_features = keras.layers.LSTM(5)(x)
x = keras.layers.concatenate([body_features, tags_input])
pred_1 = keras.layers.Dense(2, activation="sigmoid", name="priority")(x)
pred_2 = keras.layers.Dense(3, activation="softmax", name="department")(x)
model = keras.Model(
inputs=[body_input, tags_input], outputs=[pred_1, pred_2]
)
return ModelFn(model, [(None, 1), (None, 2)], [(None, 2), (None, 3)])
def nested_sequential_in_functional():
"""A sequential model nested in a functional model."""
inner_model = keras.Sequential(
[
keras.layers.Dense(3, activation="relu", input_shape=(3,)),
keras.layers.Dense(2, activation="relu"),
]
)
inputs = keras.Input(shape=(3,))
x = inner_model(inputs)
outputs = keras.layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 3), (None, 2))
def seq_to_seq():
"""Sequence to sequence model."""
num_encoder_tokens = 3
num_decoder_tokens = 3
latent_dim = 2
encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))
encoder = keras.layers.LSTM(latent_dim, return_state=True)
_, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))
decoder_lstm = keras.layers.LSTM(
latent_dim, return_sequences=True, return_state=True
)
decoder_outputs, _, _ = decoder_lstm(
decoder_inputs, initial_state=encoder_states
)
decoder_dense = keras.layers.Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
return ModelFn(
model,
[(None, 2, num_encoder_tokens), (None, 2, num_decoder_tokens)],
(None, 2, num_decoder_tokens),
)
def shared_layer_functional():
"""Shared layer in a functional model."""
main_input = keras.Input(shape=(10,), dtype="int32", name="main_input")
x = keras.layers.Embedding(output_dim=5, input_dim=4, input_length=10)(
main_input
)
lstm_out = keras.layers.LSTM(3)(x)
auxiliary_output = keras.layers.Dense(
1, activation="sigmoid", name="aux_output"
)(lstm_out)
auxiliary_input = keras.Input(shape=(5,), name="aux_input")
x = keras.layers.concatenate([lstm_out, auxiliary_input])
x = keras.layers.Dense(2, activation="relu")(x)
main_output = keras.layers.Dense(
1, activation="sigmoid", name="main_output"
)(x)
model = keras.Model(
inputs=[main_input, auxiliary_input],
outputs=[main_output, auxiliary_output],
)
return ModelFn(model, [(None, 10), (None, 5)], [(None, 1), (None, 1)])
def shared_sequential():
"""Shared sequential model in a functional model."""
inner_model = keras.Sequential(
[
keras.layers.Conv2D(2, 3, activation="relu"),
keras.layers.Conv2D(2, 3, activation="relu"),
]
)
inputs_1 = keras.Input((5, 5, 3))
inputs_2 = keras.Input((5, 5, 3))
x1 = inner_model(inputs_1)
x2 = inner_model(inputs_2)
x = keras.layers.concatenate([x1, x2])
outputs = keras.layers.GlobalAveragePooling2D()(x)
model = keras.Model([inputs_1, inputs_2], outputs)
return ModelFn(model, [(None, 5, 5, 3), (None, 5, 5, 3)], (None, 4))
class MySubclassModel(keras.Model):
"""A subclass model."""
def __init__(self, input_dim=3):
super().__init__(name="my_subclass_model")
self._config = {"input_dim": input_dim}
self.dense1 = keras.layers.Dense(8, activation="relu")
self.dense2 = keras.layers.Dense(2, activation="softmax")
self.bn = keras.layers.BatchNormalization()
self.dp = keras.layers.Dropout(0.5)
def call(self, inputs, **kwargs):
x = self.dense1(inputs)
x = self.dp(x)
x = self.bn(x)
return self.dense2(x)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config):
return cls(**config)
def nested_subclassed_model():
"""A subclass model nested in another subclass model."""
class NestedSubclassModel(keras.Model):
"""A nested subclass model."""
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(4, activation="relu")
self.dense2 = keras.layers.Dense(2, activation="relu")
self.bn = keras.layers.BatchNormalization()
self.inner_subclass_model = MySubclassModel()
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.inner_subclass_model(x)
return self.dense2(x)
return ModelFn(NestedSubclassModel(), (None, 3), (None, 2))
def nested_subclassed_in_functional_model():
"""A subclass model nested in a functional model."""
inner_subclass_model = MySubclassModel()
inputs = keras.Input(shape=(3,))
x = inner_subclass_model(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 3), (None, 2))
def nested_functional_in_subclassed_model():
"""A functional model nested in a subclass model."""
def get_functional_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(4, activation="relu")(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2)(x)
return keras.Model(inputs, outputs)
class NestedFunctionalInSubclassModel(keras.Model):
"""A functional nested in subclass model."""
def __init__(self):
super().__init__(name="nested_functional_in_subclassed_model")
self.dense1 = keras.layers.Dense(4, activation="relu")
self.dense2 = keras.layers.Dense(2, activation="relu")
self.inner_functional_model = get_functional_model()
def call(self, inputs):
x = self.dense1(inputs)
x = self.inner_functional_model(x)
return self.dense2(x)
return ModelFn(NestedFunctionalInSubclassModel(), (None, 3), (None, 2))
def shared_layer_subclassed_model():
"""Shared layer in a subclass model."""
class SharedLayerSubclassModel(keras.Model):
"""A subclass model with shared layers."""
def __init__(self):
super().__init__(name="shared_layer_subclass_model")
self.dense = keras.layers.Dense(3, activation="relu")
self.dp = keras.layers.Dropout(0.5)
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense(inputs)
x = self.dp(x)
x = self.bn(x)
return self.dense(x)
return ModelFn(SharedLayerSubclassModel(), (None, 3), (None, 3))
def functional_with_keyword_args():
"""A functional model with keyword args."""
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(4)(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs, name="m", trainable=False)
return ModelFn(model, (None, 3), (None, 2))
ALL_MODELS = [
("basic_sequential", basic_sequential),
("basic_sequential_deferred", basic_sequential_deferred),
("stacked_rnn", stacked_rnn),
("lstm", lstm),
("multi_input_multi_output", multi_input_multi_output),
("nested_sequential_in_functional", nested_sequential_in_functional),
("seq_to_seq", seq_to_seq),
("shared_layer_functional", shared_layer_functional),
("shared_sequential", shared_sequential),
("nested_subclassed_model", nested_subclassed_model),
(
"nested_subclassed_in_functional_model",
nested_subclassed_in_functional_model,
),
(
"nested_functional_in_subclassed_model",
nested_functional_in_subclassed_model,
),
("shared_layer_subclassed_model", shared_layer_subclassed_model),
("functional_with_keyword_args", functional_with_keyword_args),
]
def get_models(exclude_models=None):
"""Get all models excluding the specified ones."""
models = [model for model in ALL_MODELS if model[0] not in exclude_models]
return models
| tf-keras/tf_keras/tests/model_architectures.py/0 | {
"file_path": "tf-keras/tf_keras/tests/model_architectures.py",
"repo_id": "tf-keras",
"token_count": 4595
} | 234 |
#!/usr/bin/env bash
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
#
# A script to run multiple GPU tests in parallel controlled with an environment
# variable.
#
# Required environment variables:
# TF_GPU_COUNT = Number of GPUs available.
TF_GPU_COUNT=${TF_GPU_COUNT:-4}
TF_TESTS_PER_GPU=${TF_TESTS_PER_GPU:-8}
export TF_PER_DEVICE_MEMORY_LIMIT_MB=${TF_PER_DEVICE_MEMORY_LIMIT_MB:-1024}
# *******************************************************************
# This section of the script is needed to
# make things work on windows under msys.
# *******************************************************************
RUNFILES_MANIFEST_FILE="${TEST_SRCDIR}/MANIFEST"
function rlocation() {
if is_absolute "$1" ; then
# If the file path is already fully specified, simply return it.
echo "$1"
elif [[ -e "$TEST_SRCDIR/$1" ]]; then
# If the file exists in the $TEST_SRCDIR then just use it.
echo "$TEST_SRCDIR/$1"
elif [[ -e "$RUNFILES_MANIFEST_FILE" ]]; then
# If a runfiles manifest file exists then use it.
echo "$(grep "^$1 " "$RUNFILES_MANIFEST_FILE" | sed 's/[^ ]* //')"
fi
}
TEST_BINARY="$(rlocation $TEST_WORKSPACE/${1#./})"
shift
# *******************************************************************
mkdir -p /var/lock
# Try to acquire any of the TF_GPU_COUNT * TF_TESTS_PER_GPU
# slots to run a test at.
#
# Prefer to allocate 1 test per GPU over 4 tests on 1 GPU.
# So, we iterate over TF_TESTS_PER_GPU first.
for j in `seq 0 $((TF_TESTS_PER_GPU-1))`; do
for i in `seq 0 $((TF_GPU_COUNT-1))`; do
exec {lock_fd}>/var/lock/gpulock${i}_${j} || exit 1
if flock -n "$lock_fd";
then
(
# This export only works within the brackets, so it is isolated to one
# single command.
export CUDA_VISIBLE_DEVICES=$i
export HIP_VISIBLE_DEVICES=$i
echo "Running test $TEST_BINARY $* on GPU $CUDA_VISIBLE_DEVICES"
"$TEST_BINARY" $@
)
return_code=$?
flock -u "$lock_fd"
exit $return_code
fi
done
done
echo "Cannot find a free GPU to run the test $* on, exiting with failure..."
exit 1
| tf-keras/tf_keras/tools/gpu_build/parallel_gpu_execute.sh/0 | {
"file_path": "tf-keras/tf_keras/tools/gpu_build/parallel_gpu_execute.sh",
"repo_id": "tf-keras",
"token_count": 973
} | 235 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input dataset creator for `model.fit`."""
import tensorflow.compat.v2 as tf
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.utils.experimental.DatasetCreator", v1=[])
class DatasetCreator:
"""Object that returns a `tf.data.Dataset` upon invoking.
`tf.keras.utils.experimental.DatasetCreator` is designated as a supported
type for `x`, or the input, in `tf.keras.Model.fit`. Pass an instance of
this class to `fit` when using a callable (with a `input_context` argument)
that returns a `tf.data.Dataset`.
```python
model = tf.keras.Sequential([tf.keras.layers.Dense(10)])
model.compile(tf.keras.optimizers.SGD(), loss="mse")
def dataset_fn(input_context):
global_batch_size = 64
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat()
dataset = dataset.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2)
return dataset
input_options = tf.distribute.InputOptions(
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2)
model.fit(tf.keras.utils.experimental.DatasetCreator(
dataset_fn, input_options=input_options), epochs=10, steps_per_epoch=10)
```
`Model.fit` usage with `DatasetCreator` is intended to work across all
`tf.distribute.Strategy`s, as long as `Strategy.scope` is used at model
creation:
```python
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(10)])
model.compile(tf.keras.optimizers.SGD(), loss="mse")
def dataset_fn(input_context):
...
input_options = ...
model.fit(tf.keras.utils.experimental.DatasetCreator(
dataset_fn, input_options=input_options), epochs=10, steps_per_epoch=10)
```
Note: When using `DatasetCreator`, `steps_per_epoch` argument in `Model.fit`
must be provided as the cardinality of such input cannot be inferred.
Args:
dataset_fn: A callable that takes a single argument of type
`tf.distribute.InputContext`, which is used for batch size calculation
and cross-worker input pipeline sharding (if neither is needed, the
`InputContext` parameter can be ignored in the `dataset_fn`), and
returns a `tf.data.Dataset`.
input_options: Optional `tf.distribute.InputOptions`, used for specific
options when used with distribution, for example, whether to prefetch
dataset elements to accelerator device memory or host device memory, and
prefetch buffer size in the replica device memory. No effect if not used
with distributed training. See `tf.distribute.InputOptions` for more
information.
"""
def __init__(self, dataset_fn, input_options=None):
if not callable(dataset_fn):
raise TypeError(
"`dataset_fn` for `DatasetCreator` must be a `callable`. "
f"Received: {dataset_fn}"
)
if input_options and (
not isinstance(input_options, tf.distribute.InputOptions)
):
raise TypeError(
"`input_options` for `DatasetCreator` must be a "
f"`tf.distribute.InputOptions`. Received: {input_options}"
)
self.dataset_fn = dataset_fn
self.input_options = input_options
def __call__(self, *args, **kwargs):
# When a `DatasetCreator` is invoked, it forwards args/kwargs straight
# to the callable.
dataset = self.dataset_fn(*args, **kwargs)
if not isinstance(dataset, tf.data.Dataset):
raise TypeError(
"The `callable` provided to `DatasetCreator` must return "
f'a Dataset. It returns "{dataset}"'
)
return dataset
| tf-keras/tf_keras/utils/dataset_creator.py/0 | {
"file_path": "tf-keras/tf_keras/utils/dataset_creator.py",
"repo_id": "tf-keras",
"token_count": 1802
} | 236 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized_utils.py."""
import functools
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.utils import kernelized_utils
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev
)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev
)
class KernelizedUtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("gaussian", _exact_gaussian(stddev=10.0), [[1.0]]),
("laplacian", _exact_laplacian(stddev=50.0), [[1.0]]),
)
def test_equal_vectors(self, exact_kernel_fn, expected_values):
"""Identical vectors give exactly the identity kernel value."""
x = tf.constant([0.5, -0.5, -0.5, 0.5])
y = tf.constant([0.5, -0.5, -0.5, 0.5])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are identical and therefore K(x, y) will be precisely equal to
# the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-6)
@parameterized.named_parameters(
("gaussian", _exact_gaussian(stddev=10.0), [[1.0]]),
("laplacian", _exact_laplacian(stddev=50.0), [[1.0]]),
)
def test_almost_identical_vectors(self, exact_kernel_fn, expected_values):
"""Almost identical vectors give the identity kernel value."""
x = tf.constant([1.0, 0.4, -2.1, -1.1])
y = tf.constant([1.01, 0.39, -2.099, -1.101])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are almost identical and therefore K(x, y) will be almost
# equal to the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-3)
@parameterized.named_parameters(
("gaussian", _exact_gaussian(stddev=1.0), [[0.99], [0.977]]),
("laplacian", _exact_laplacian(stddev=5.0), [[0.96], [0.94]]),
)
def test_similar_matrices(self, exact_kernel_fn, expected_values):
"""Pairwise "close" vectors give high kernel values (similarity
scores)."""
x = tf.constant([1.0, 3.4, -2.1, 0.9, 3.3, -2.0], shape=[2, 3])
y = tf.constant([1.1, 3.35, -2.05])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# The 2 rows of x are close to y. The pairwise kernel values (similarity
# scores) are somewhat close to the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
@parameterized.named_parameters(
(
"gaussian",
_exact_gaussian(stddev=2.0),
[[0.997, 0.279], [0.251, 1.0], [0.164, 0.019]],
),
(
"laplacian",
_exact_laplacian(stddev=2.0),
[[0.904, 0.128], [0.116, 1.0], [0.07, 0.027]],
),
)
def test_matrices_varying_similarity(
self, exact_kernel_fn, expected_values
):
"""Test matrices with row vectors of varying pairwise similarity."""
x = tf.constant([1.0, 2.0, -2.0, 0.9, 3.3, -1.0], shape=[3, 2])
y = tf.constant([1.1, 2.1, -2.0, 0.9], shape=[2, 2])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
@parameterized.named_parameters(
("gaussian", _exact_gaussian(stddev=1.0), [[0.0]]),
("laplacian", _exact_laplacian(stddev=1.0), [[0.0]]),
)
def test_completely_dissimilar_vectors(
self, exact_kernel_fn, expected_values
):
"""Very dissimilar vectors give very low similarity scores."""
x = tf.constant([1.0, 3.4, -2.1, -5.1])
y = tf.constant([0.5, 2.1, 1.0, 3.0])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are very "far" from each other and so the corresponding kernel
# value will be very low.
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/kernelized_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/kernelized_utils_test.py",
"repo_id": "tf-keras",
"token_count": 2223
} | 237 |
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test steps_per_execution_tuning."""
import time
import tensorflow.compat.v2 as tf
from tf_keras import Input
from tf_keras import Model
from tf_keras import losses
from tf_keras import optimizers
from tf_keras.layers import Dense
from tf_keras.testing_infra import test_combinations
from tf_keras.utils import steps_per_execution_tuning
class mockOptimizer:
def __init__(self, iterations):
self.iterations = tf.Variable(iterations)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class StepsPerExecutionTuningTest(test_combinations.TestCase):
def test_variables(self):
spe_variable = tf.Variable(1)
tuner = steps_per_execution_tuning.StepsPerExecutionTuner(
mockOptimizer(5), spe_variable, 5, 50, 0.5
)
assert tuner.optimizer.iterations.numpy() == 5
assert tuner._steps_per_execution.numpy().item() == 1
assert tuner.interval == 5
assert tuner.change_spe_interval == 50
assert tuner.spe_change_threshold == 0.5
assert not tuner.steps_per_execution_stop_event.is_set()
def test_start_stop(self):
spe_variable = tf.Variable(1)
tuner = steps_per_execution_tuning.StepsPerExecutionTuner(
mockOptimizer(5), spe_variable, interval=0.2
)
tuner.start()
assert not tuner.steps_per_execution_stop_event.is_set()
assert tuner.start_time > 0
time.sleep(0.5) # should be enough time for 2 measurements
tuner.stop()
assert tuner.steps_per_execution_stop_event.is_set()
assert tuner.spe_measurement_count > 0
def test_settable_steps_per_execution(self):
spe_variable = tf.Variable(1)
tuner = steps_per_execution_tuning.StepsPerExecutionTuner(
mockOptimizer(5), spe_variable, interval=0.2
)
tuner.start()
tuner.stop()
assert tuner.init_spe == 1
tuner.steps_per_execution = 5
assert spe_variable.numpy().item() == 5
assert tuner.init_spe == 5
def test_custom_training_loop(self):
dataset = _get_dataset()
iterator = iter(dataset)
inputs = Input(shape=(784,), name="digits")
x = Dense(64, activation="relu", name="dense_1")(inputs)
x = Dense(64, activation="relu", name="dense_2")(x)
outputs = Dense(10, name="predictions")(x)
model = Model(inputs=inputs, outputs=outputs)
optimizer = optimizers.SGD(learning_rate=1e-3)
loss_fn = losses.SparseCategoricalCrossentropy(from_logits=True)
# Create our steps per execution variable
steps_per_execution = tf.Variable(
1,
dtype="int64",
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
# Create the tuner
tuner = steps_per_execution_tuning.StepsPerExecutionTuner(
optimizer, steps_per_execution
)
# Create a step function that runs a single training step
@tf.function
def step_fn(iterator):
batch_data, labels = next(iterator)
print(batch_data.shape, labels.shape)
with tf.GradientTape() as tape:
logits = model(batch_data, training=True)
loss_value = loss_fn(labels, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# We can now pack multiple execution steps into one call
@tf.function
def multi_step_train_fn(iterator, steps_per_execution):
for _ in tf.range(steps_per_execution):
step_fn(iterator)
return
steps_per_epoch = 10
epochs = 2
# Start the tuner before training
tuner.start()
for _ in range(epochs):
for _ in range(steps_per_epoch):
multi_step_train_fn(iterator, steps_per_execution)
# End the tuner after training
tuner.stop()
def _get_dataset():
inputs = tf.zeros((1000, 784), dtype=tf.float32)
targets = tf.zeros((1000,), dtype=tf.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
return dataset
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/steps_per_execution_tuning_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/steps_per_execution_tuning_test.py",
"repo_id": "tf-keras",
"token_count": 2098
} | 238 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras Vis utils."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.applications import efficientnet
from tf_keras.utils import layer_utils
from tf_keras.utils import vis_utils
class ModelToDotFormatTest(tf.test.TestCase, parameterized.TestCase):
def test_plot_model_cnn(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2,
kernel_size=(2, 3),
input_shape=(3, 5, 5),
name="conv",
)
)
model.add(keras.layers.Flatten(name="flat"))
model.add(keras.layers.Dense(5, name="dense"))
dot_img_file = "model_1.png"
try:
vis_utils.plot_model(
model, to_file=dot_img_file, show_shapes=True, show_dtype=True
)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
def test_plot_model_with_wrapped_layers_and_models(self):
inputs = keras.Input(shape=(None, 3))
lstm = keras.layers.LSTM(6, return_sequences=True, name="lstm")
x = lstm(inputs)
# Add layer inside a Wrapper
bilstm = keras.layers.Bidirectional(
keras.layers.LSTM(16, return_sequences=True, name="bilstm")
)
x = bilstm(x)
# Add model inside a Wrapper
submodel = keras.Sequential(
[keras.layers.Dense(32, name="dense", input_shape=(None, 32))]
)
wrapped_dense = keras.layers.TimeDistributed(submodel)
x = wrapped_dense(x)
# Add shared submodel
outputs = submodel(x)
model = keras.Model(inputs, outputs)
dot_img_file = "model_2.png"
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True,
)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
def test_plot_model_with_add_loss(self):
inputs = keras.Input(shape=(None, 3))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.add_loss(tf.reduce_mean(outputs))
dot_img_file = "model_3.png"
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True,
)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
model = keras.Sequential(
[keras.Input(shape=(None, 3)), keras.layers.Dense(1)]
)
model.add_loss(tf.reduce_mean(model.output))
dot_img_file = "model_4.png"
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
expand_nested=True,
)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
@parameterized.parameters(
{"show_shapes": False, "show_dtype": False},
{"show_shapes": False, "show_dtype": True},
{"show_shapes": True, "show_dtype": False},
{"show_shapes": True, "show_dtype": True},
)
def test_plot_model_cnn_with_activations(self, show_shapes, show_dtype):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2,
kernel_size=2,
input_shape=(9, 9, 3),
activation="relu",
)
)
model.add(
keras.layers.Conv2D(
filters=4, kernel_size=2, strides=(2, 2), activation="relu"
)
)
model.add(keras.layers.Flatten(name="flat"))
model.add(keras.layers.Dense(5, name="head", activation="softmax"))
dot_img_file = "model_5.png"
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_activations=True,
)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
@parameterized.parameters(
{"layer_range": ["block1a_project_conv", "block1a_activation"]},
{"layer_range": ["block1a_activation", "block1a_project_conv"]},
{"layer_range": [r"block*", "block2a_se_excite"]},
{"layer_range": [r"block\da_activation", r"block\da_project_bn"]},
)
def test_dot_layer_range(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
layer_ids_from_model = get_layer_ids_from_model(model, layer_range)
try:
dot = vis_utils.model_to_dot(model, layer_range=layer_range)
dot_edges = dot.get_edges()
layer_ids_from_dot = get_layer_ids_from_dot(dot_edges)
self.assertAllEqual(
sorted(layer_ids_from_model), sorted(layer_ids_from_dot)
)
except ImportError:
pass
@parameterized.parameters(
{"layer_range": ["block1a_project_conv", "block1a_activation"]},
{"layer_range": ["block1a_activation", "block1a_project_conv"]},
{"layer_range": [r"block*", "block2a_se_excite"]},
{"layer_range": [r"block\da_activation", r"block\da_project_bn"]},
)
def test_plot_layer_range(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
effnet_subplot = "model_effnet.png"
try:
vis_utils.plot_model(
model, to_file=effnet_subplot, layer_range=layer_range
)
self.assertTrue(tf.io.gfile.exists(effnet_subplot))
except ImportError:
pass
finally:
if tf.io.gfile.exists(effnet_subplot):
tf.io.gfile.remove(effnet_subplot)
@parameterized.parameters(
{"layer_range": ["block1a_se_squeeze", "block2a_project_conv"]},
{"layer_range": [r"block\da_se_reshape", r"block*"]},
)
def test_layer_range_assertion_fail(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
try:
with self.assertRaises(AssertionError):
vis_utils.model_to_dot(model, layer_range=layer_range)
with self.assertRaises(AssertionError):
vis_utils.plot_model(model, layer_range=layer_range)
except ImportError:
pass
@parameterized.parameters(
{"layer_range": ["block1a_activation"]},
{"layer_range": []},
{
"layer_range": [
"input",
"block1a_activation",
"block1a_project_conv",
]
},
{"layer_range": [9, "block1a_activation"]},
{"layer_range": [29, 9]},
{"layer_range": ["block8a_se_reshape", "block*"]},
)
def test_layer_range_value_fail(self, layer_range):
model = efficientnet.EfficientNetB0(weights=None)
try:
with self.assertRaises(ValueError):
vis_utils.model_to_dot(model, layer_range=layer_range)
with self.assertRaises(ValueError):
vis_utils.plot_model(model, layer_range=layer_range)
except ImportError:
pass
def test_model_with_tf_op(self):
# Test fix for a bug in which inputs to a TFOp layer past the 1st one
# were not connected in the TF-Keras model plot.
a = keras.Input((2,))
b = keras.Input((2,))
model = keras.Model(inputs=[a, b], outputs=a + b)
try:
dot = vis_utils.model_to_dot(model)
self.assertLen(dot.get_edges(), 2) # This model has 2 edges.
except ImportError:
pass
def test_model_with_brackets_in_shape(self):
# Test fix for a bug in which plotting the model shapes fails if
# any labels contain brackets
class DictLayer(keras.layers.Layer):
def call(self, inputs) -> tf.Tensor:
tensor_input, dict_input = inputs
return tf.concat(list(dict_input.values()), axis=1)
inputs = {
"a": keras.Input(name="a", shape=(1), dtype=tf.float32),
"b": keras.Input(name="b", shape=(1), dtype=tf.float32),
}
outputs = DictLayer()((inputs["a"], inputs))
model = keras.Model(
inputs=inputs,
outputs=outputs,
)
try:
vis_utils.model_to_dot(
model, show_shapes=True, show_dtype=True, show_layer_names=True
)
except ImportError:
pass
def test_plot_model_with_show_trainable(self):
model = keras.Sequential(name="trainable")
untrained = keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name="conv"
)
model.add(untrained)
model.add(keras.layers.Flatten(name="flat"))
model.add(keras.layers.Dense(5, name="dense"))
# Should display as Non Trainable
untrained.trainable = False
dot_img_file = "model_trainable.png"
try:
vis_utils.plot_model(
model,
to_file=dot_img_file,
show_shapes=True,
show_dtype=True,
show_trainable=True,
)
self.assertTrue(tf.io.gfile.exists(dot_img_file))
tf.io.gfile.remove(dot_img_file)
except ImportError:
pass
def get_layer_ids_from_model(model, layer_range):
layer_range = layer_utils.get_layer_index_bound_by_layer_name(
model, layer_range
)
layer_ids_from_model = [
str(id(layer))
for layer in model.layers[layer_range[0] : layer_range[1]]
]
return layer_ids_from_model
def get_layer_ids_from_dot(dot_edges):
layer_ids_from_dot = []
for edge in dot_edges:
for pt in edge.obj_dict["points"]:
if pt not in layer_ids_from_dot:
layer_ids_from_dot.append(pt)
return layer_ids_from_dot
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/vis_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/vis_utils_test.py",
"repo_id": "tf-keras",
"token_count": 5725
} | 239 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import keras_tuner
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
import autokeras as ak
from autokeras import hyper_preprocessors
from autokeras import nodes as input_module
from autokeras import preprocessors
from autokeras import test_utils
from autokeras.blocks import heads as head_module
def test_two_classes_infer_binary_crossentropy():
dataset = np.array(["a", "a", "a", "b"])
head = head_module.ClassificationHead(name="a", shape=(1,))
adapter = head.get_adapter()
dataset = adapter.adapt(dataset, batch_size=32)
analyser = head.get_analyser()
for data in dataset:
analyser.update(data)
analyser.finalize()
head.config_from_analyser(analyser)
head.build(
keras_tuner.HyperParameters(),
input_module.Input(shape=(32,)).build_node(
keras_tuner.HyperParameters()
),
)
assert head.loss.name == "binary_crossentropy"
def test_three_classes_infer_categorical_crossentropy():
dataset = np.array(["a", "a", "c", "b"])
head = head_module.ClassificationHead(name="a", shape=(1,))
adapter = head.get_adapter()
dataset = adapter.adapt(dataset, batch_size=32)
analyser = head.get_analyser()
for data in dataset:
analyser.update(data)
analyser.finalize()
head.config_from_analyser(analyser)
head.build(
keras_tuner.HyperParameters(),
input_module.Input(shape=(32,)).build_node(
keras_tuner.HyperParameters()
),
)
assert head.loss.name == "categorical_crossentropy"
def test_multi_label_loss():
head = head_module.ClassificationHead(
name="a", multi_label=True, num_classes=8, shape=(8,)
)
input_node = keras.Input(shape=(5,))
output_node = head.build(keras_tuner.HyperParameters(), input_node)
model = keras.Model(input_node, output_node)
assert model.layers[-1].activation.__name__ == "sigmoid"
assert head.loss.name == "binary_crossentropy"
def test_clf_head_get_sigmoid_postprocessor():
head = head_module.ClassificationHead(name="a", multi_label=True)
head._encoded = True
head._encoded_for_sigmoid = True
assert isinstance(
head.get_hyper_preprocessors()[0].preprocessor,
preprocessors.SigmoidPostprocessor,
)
def test_clf_head_with_2_clases_get_label_encoder():
head = head_module.ClassificationHead(name="a", num_classes=2)
head._encoded = False
head._labels = ["a", "b"]
assert isinstance(
head.get_hyper_preprocessors()[-1].preprocessor,
preprocessors.LabelEncoder,
)
def test_clf_head_build_with_zero_dropout_return_tensor():
block = head_module.ClassificationHead(dropout=0, shape=(8,))
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(5,), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_clf_head_hpps_with_uint8_contain_cast_to_int32():
dataset = test_utils.generate_one_hot_labels(100, 10, "dataset")
dataset = dataset.map(lambda x: tf.cast(x, tf.uint8))
head = head_module.ClassificationHead(shape=(8,))
analyser = head.get_analyser()
for data in dataset:
analyser.update(data)
analyser.finalize()
head.config_from_analyser(analyser)
assert any(
[
isinstance(hpp, hyper_preprocessors.DefaultHyperPreprocessor)
and isinstance(hpp.preprocessor, preprocessors.CastToInt32)
for hpp in head.get_hyper_preprocessors()
]
)
def test_reg_head_build_with_zero_dropout_return_tensor():
block = head_module.RegressionHead(dropout=0, shape=(8,))
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(5,), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_segmentation():
dataset = np.array(["a", "a", "c", "b"])
head = head_module.SegmentationHead(name="a", shape=(1,))
adapter = head.get_adapter()
dataset = adapter.adapt(dataset, batch_size=32)
analyser = head.get_analyser()
for data in dataset:
analyser.update(data)
analyser.finalize()
head.config_from_analyser(analyser)
head.build(
keras_tuner.HyperParameters(),
ak.Input(shape=(32,)).build_node(keras_tuner.HyperParameters()),
)
| autokeras/autokeras/blocks/heads_test.py/0 | {
"file_path": "autokeras/autokeras/blocks/heads_test.py",
"repo_id": "autokeras",
"token_count": 1943
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import autokeras as ak
from autokeras import test_utils
NUM_INSTANCES = 3
BATCH_SIZE = 2
def test_image_classifier(tmp_path):
train_x = test_utils.generate_data(
num_instances=NUM_INSTANCES, shape=(32, 32)
)
train_y = test_utils.generate_one_hot_labels(
num_instances=NUM_INSTANCES, num_classes=10
)
clf = ak.ImageClassifier(
directory=tmp_path,
max_trials=2,
seed=test_utils.SEED,
distribution_strategy=tf.distribute.MirroredStrategy(),
)
clf.fit(
train_x, train_y, epochs=1, validation_split=0.2, batch_size=BATCH_SIZE
)
keras_model = clf.export_model()
clf.evaluate(train_x, train_y)
assert clf.predict(train_x).shape == (len(train_x), 10)
assert isinstance(keras_model, keras.Model)
def test_image_regressor(tmp_path):
train_x = test_utils.generate_data(
num_instances=NUM_INSTANCES, shape=(32, 32, 3)
)
train_y = test_utils.generate_data(num_instances=NUM_INSTANCES, shape=(1,))
clf = ak.ImageRegressor(
directory=tmp_path, max_trials=2, seed=test_utils.SEED
)
clf.fit(
train_x, train_y, epochs=1, validation_split=0.2, batch_size=BATCH_SIZE
)
clf.export_model()
assert clf.predict(train_x).shape == (len(train_x), 1)
def test_text_classifier(tmp_path):
train_x = test_utils.generate_text_data(num_instances=NUM_INSTANCES)
train_y = np.array([0, 1] * ((NUM_INSTANCES + 1) // 2))[:NUM_INSTANCES]
test_x = train_x
test_y = train_y
clf = ak.TextClassifier(
directory=tmp_path,
max_trials=2,
seed=test_utils.SEED,
metrics=["accuracy"],
objective="accuracy",
)
clf.fit(
train_x,
train_y,
epochs=2,
validation_data=(test_x, test_y),
batch_size=BATCH_SIZE,
)
clf.export_model()
assert clf.predict(test_x).shape == (len(test_x), 1)
assert clf.tuner._get_best_trial_epochs() <= 2
def test_text_regressor(tmp_path):
train_x = test_utils.generate_text_data(num_instances=NUM_INSTANCES)
test_x = train_x
train_y = test_utils.generate_data(num_instances=NUM_INSTANCES, shape=(1,))
test_y = train_y
clf = ak.TextRegressor(
directory=tmp_path, max_trials=2, seed=test_utils.SEED
)
clf.fit(
train_x,
train_y,
epochs=1,
validation_data=(test_x, test_y),
batch_size=BATCH_SIZE,
)
clf.export_model()
assert clf.predict(test_x).shape == (len(test_x), 1)
def test_structured_data_regressor(tmp_path):
num_data = NUM_INSTANCES * 2
num_train = NUM_INSTANCES
data = (
pd.read_csv(test_utils.TRAIN_CSV_PATH).to_numpy().astype(str)[:num_data]
)
x_train, x_test = data[:num_train], data[num_train:]
y = test_utils.generate_data(num_instances=num_data, shape=tuple())
y_train, y_test = y[:num_train], y[num_train:]
clf = ak.StructuredDataRegressor(
directory=tmp_path, max_trials=2, seed=test_utils.SEED
)
clf.fit(
x_train,
y_train,
epochs=11,
validation_data=(x_train, y_train),
batch_size=BATCH_SIZE,
)
clf.export_model()
assert clf.predict(x_test).shape == (len(y_test), 1)
def test_structured_data_classifier(tmp_path):
num_data = NUM_INSTANCES * 2
num_train = NUM_INSTANCES
data = (
pd.read_csv(test_utils.TRAIN_CSV_PATH).to_numpy().astype(str)[:num_data]
)
x_train, x_test = data[:num_train], data[num_train:]
y = test_utils.generate_one_hot_labels(
num_instances=num_data, num_classes=3
)
y_train, y_test = y[:num_train], y[num_train:]
clf = ak.StructuredDataClassifier(
directory=tmp_path, max_trials=1, seed=test_utils.SEED
)
clf.fit(
x_train,
y_train,
epochs=2,
validation_data=(x_train, y_train),
batch_size=BATCH_SIZE,
)
clf.export_model()
assert clf.predict(x_test).shape == (len(y_test), 3)
def test_timeseries_forecaster(tmp_path):
lookback = 2
predict_from = 1
predict_until = 10
train_x = test_utils.generate_data_with_categorical(num_instances=100)
train_y = test_utils.generate_data(num_instances=80, shape=(1,))
clf = ak.TimeseriesForecaster(
lookback=lookback,
directory=tmp_path,
predict_from=predict_from,
predict_until=predict_until,
max_trials=2,
seed=test_utils.SEED,
)
clf.fit(train_x, train_y, epochs=1, validation_data=(train_x, train_y))
keras_model = clf.export_model()
clf.evaluate(train_x, train_y)
assert clf.predict(train_x).shape == (predict_until - predict_from + 1, 1)
assert clf.fit_and_predict(
train_x, train_y, epochs=1, validation_split=0.2
).shape == (predict_until - predict_from + 1, 1)
assert isinstance(keras_model, keras.Model)
| autokeras/autokeras/integration_tests/task_api_test.py/0 | {
"file_path": "autokeras/autokeras/integration_tests/task_api_test.py",
"repo_id": "autokeras",
"token_count": 2503
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import Union
import pandas as pd
from autokeras import blocks
from autokeras import nodes as input_module
from autokeras.engine import tuner
from autokeras.tasks import structured_data
from autokeras.tuners import greedy
from autokeras.utils import types
class SupervisedTimeseriesDataPipeline(
structured_data.BaseStructuredDataPipeline
):
def __init__(
self,
outputs,
column_names=None,
column_types=None,
lookback=None,
predict_from=1,
predict_until=None,
**kwargs
):
inputs = input_module.TimeseriesInput(
lookback=lookback,
column_names=column_names,
column_types=column_types,
)
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
self.predict_from = predict_from
self.predict_until = predict_until
self._target_col_name = None
self.train_len = 0
@staticmethod
def _read_from_csv(x, y):
df = pd.read_csv(x)
target = df.pop(y).dropna().to_numpy()
return df, target
def fit(
self,
x=None,
y=None,
epochs=None,
callbacks=None,
validation_split=0.2,
validation_data=None,
**kwargs
):
# x is file path of training data
if isinstance(x, str):
self._target_col_name = y
x, y = self._read_from_csv(x, y)
if validation_data:
x_val, y_val = validation_data
if isinstance(x_val, str):
validation_data = self._read_from_csv(x_val, y_val)
self.check_in_fit(x)
self.train_len = len(y)
if validation_data:
x_val, y_val = validation_data
train_len = len(y_val)
x_val = x_val[:train_len]
y_val = y_val[self.lookback - 1 :]
validation_data = x_val, y_val
history = super().fit(
x=x[: self.train_len],
y=y[self.lookback - 1 :],
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs
)
return history
def predict(self, x, **kwargs):
x = self.read_for_predict(x)
if len(x) < self.train_len:
raise ValueError(
"The prediction data requires the original training"
" data to make predictions on subsequent data points"
)
y_pred = super().predict(x=x, **kwargs)
lower_bound = self.train_len + self.predict_from
if self.predict_until is None:
self.predict_until = len(y_pred)
upper_bound = min(self.train_len + self.predict_until + 1, len(y_pred))
return y_pred[lower_bound:upper_bound]
def evaluate(self, x, y=None, **kwargs):
"""Evaluate the best model for the given data.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Testing data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the testing data.
y: String, numpy.ndarray, or tensorflow.Dataset. Testing data y.
If the data is from a csv file, it should be a string
corresponding to the label column.
**kwargs: Any arguments supported by keras.Model.evaluate.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs and/or
metrics). The attribute model.metrics_names will give you the
display labels for the scalar outputs.
"""
if isinstance(x, str):
x, y = self._read_from_csv(x, y)
return super().evaluate(
x=x[: len(y)], y=y[self.lookback - 1 :], **kwargs
)
class TimeseriesForecaster(SupervisedTimeseriesDataPipeline):
"""AutoKeras time series data forecast class.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the
data. Defaults to None. If None, it will be obtained from the
header of the csv file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should
either be 'numerical' or 'categorical', indicating the type of that
column. Defaults to None. If not None, the column_names need to be
specified. If None, it will be inferred from the data.
lookback: Int. The range of history steps to consider for each
prediction. For example, if lookback=n, the data in the range of [i
- n, i - 1] is used to predict the value of step i. If unspecified,
it will be tuned automatically.
predict_from: Int. The starting point of the forecast for each sample
(in number of steps) after the last time step in the input. If N is
the last step in the input, then the first step of the predicted
output will be N + predict_from. Defaults to 1 (which corresponds to
starting the forecast immediately after the last step in the input).
predict_until: Int. The end point of the forecast for each sample (in
number of steps) after the last time step in the input. If N is the
last step in the input, then the last step of the predicted output
will be N + predict_until. If unspecified, it will predict till end
of dataset. Defaults to None.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
project_name: String. The name of the AutoModel. Defaults to
'time_series_forecaster'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to
100.
directory: String. The path to a directory for storing the search
outputs. Defaults to None, which would create a folder with the
name of the AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
tuner: String or subclass of AutoTuner. If string, it should be one of
'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a
subclass of AutoTuner. If left unspecified, it uses a task specific
tuner, which first evaluates the most commonly used models for the
task before exploring other models.
overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing
project of the same name if one is found. Otherwise, overwrites the
project.
seed: Int. Random seed.
max_model_size: Int. Maximum number of scalars in the parameters of a
model. Models larger than this are rejected.
**kwargs: Any arguments supported by AutoModel.
"""
def __init__(
self,
output_dim: Optional[int] = None,
column_names: Optional[List[str]] = None,
column_types: Optional[Dict[str, str]] = None,
lookback: Optional[int] = None,
predict_from: int = 1,
predict_until: Optional[int] = None,
loss: types.LossType = "mean_squared_error",
metrics: Optional[types.MetricsType] = None,
project_name: str = "time_series_forecaster",
max_trials: int = 100,
directory: Union[str, Path, None] = None,
objective: str = "val_loss",
tuner: Union[str, Type[tuner.AutoTuner]] = None,
overwrite: bool = False,
seed: Optional[int] = None,
max_model_size: Optional[int] = None,
**kwargs
):
if tuner is None:
tuner = greedy.Greedy
super().__init__(
outputs=blocks.RegressionHead(
output_dim=output_dim, loss=loss, metrics=metrics
),
column_names=column_names,
column_types=column_types,
lookback=lookback,
predict_from=predict_from,
predict_until=predict_until,
project_name=project_name,
max_trials=max_trials,
directory=directory,
objective=objective,
tuner=tuner,
overwrite=overwrite,
seed=seed,
max_model_size=max_model_size,
**kwargs
)
self.lookback = lookback
self.predict_from = predict_from
self.predict_until = predict_until
def fit(
self,
x=None,
y=None,
validation_split=0.2,
validation_data=None,
**kwargs
):
"""Search for the best model and hyperparameters for the AutoModel.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the training data.
y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or
tensorflow.Dataset. Training data y.
If the data is from a csv file, it should be a list of string(s)
specifying the name(s) of the column(s) need to be forecasted.
If it is multivariate forecasting, y should be a list of more
than one column names. If it is univariate forecasting, y should
be a string or a list of one string.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire
dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
"""
super().fit(
x=x,
y=y,
validation_split=validation_split,
validation_data=validation_data,
**kwargs
)
def predict(self, x=None, **kwargs):
"""Predict the output for a given testing data.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Testing data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the testing data.
**kwargs: Any arguments supported by keras.Model.predict.
# Returns
A list of numpy.ndarray objects or a single numpy.ndarray.
The predicted results.
"""
return super().predict(x=x, **kwargs)
def fit_and_predict(
self,
x=None,
y=None,
validation_split=0.2,
validation_data=None,
**kwargs
):
"""Search for the best model and then predict for remaining data points.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the training data.
y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or
tensorflow.Dataset. Training data y. If the data is from a csv
file, it should be a list of string(s) specifying the name(s) of
the column(s) need to be forecasted. If it is multivariate
forecasting, y should be a list of more than one column names.
If it is univariate forecasting, y should be a string or a list
of one string.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire
dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
"""
self.fit(
x=x,
y=y,
validation_split=validation_split,
validation_data=validation_data,
**kwargs
)
return self.predict(x=x)
class TimeseriesClassifier(SupervisedTimeseriesDataPipeline):
""" "AutoKeras time series data classification class.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the
data. Defaults to None. If None, it will be obtained from the
header of the csv file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should
either be 'numerical' or 'categorical', indicating the type of that
column. Defaults to None. If not None, the column_names need to be
specified. If None, it will be inferred from the data.
lookback: Int. The range of history steps to consider for each
prediction. For example, if lookback=n, the data in the range of [i
- n, i - 1] is used to predict the value of step i. If unspecified,
it will be tuned automatically.
predict_from: Int. The starting point of the forecast for each sample
(in number of steps) after the last time step in the input. If N is
the last step in the input, then the first step of the predicted
output will be N + predict_from. Defaults to 1 (which corresponds to
starting the forecast immediately after the last step in the input).
predict_until: Int. The end point of the forecast for each sample (in
number of steps) after the last time step in the input. If N is the
last step in the input, then the last step of the predicted output
will be N + predict_until. If unspecified, it will predict till end
of dataset. Defaults to None.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
project_name: String. The name of the AutoModel. Defaults to
'time_series_forecaster'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to
100.
directory: String. The path to a directory for storing the search
outputs. Defaults to None, which would create a folder with the
name of the AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing
project of the same name if one is found. Otherwise, overwrites the
project.
seed: Int. Random seed.
max_model_size: Int. Maximum number of scalars in the parameters of a
model. Models larger than this are rejected.
**kwargs: Any arguments supported by AutoModel.
"""
def __init__(
self,
output_dim=None,
column_names=None,
column_types=None,
lookback=None,
predict_from=1,
predict_until=None,
loss="mean_squared_error",
metrics=None,
project_name="time_series_classifier",
max_trials=100,
directory=None,
objective="val_loss",
overwrite=False,
seed=None,
max_model_size: Optional[int] = None,
**kwargs
):
raise NotImplementedError
def fit(
self,
x=None,
y=None,
validation_split=0.2,
validation_data=None,
**kwargs
):
"""Search for the best model and hyperparameters for the AutoModel.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the training data.
y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or
tensorflow.Dataset. Training data y. If the data is from a csv
file, it should be a list of string(s) specifying the name(s) of
the column(s) need to be forecasted. If it is multivariate
forecasting, y should be a list of more than one column names.
If it is univariate forecasting, y should be a string or a list
of one string.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire
dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
"""
raise NotImplementedError
def predict(self, x=None, **kwargs):
"""Predict the output for a given testing data.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Testing data x, it should also contain the training data used
as, subsequent predictions depend on them. If the data is from a
csv file, it should be a string specifying the path of the csv
file of the testing data.
**kwargs: Any arguments supported by keras.Model.predict.
# Returns
A list of numpy.ndarray objects or a single numpy.ndarray.
The predicted results.
"""
raise NotImplementedError
def fit_and_predict(
self,
x=None,
y=None,
validation_split=0.2,
validation_data=None,
**kwargs
):
"""Search for the best model and then predict for remaining data points.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training and Test data x. If the data is from a csv file, it
should be a string specifying the path of the csv file of the
training data.
y: String, a list of string(s), numpy.ndarray, pandas.DataFrame or
tensorflow.Dataset. Training data y. If the data is from a csv
file, it should be a list of string(s) specifying the name(s) of
the column(s) need to be forecasted. If it is multivariate
forecasting, y should be a list of more than one column names.
If it is univariate forecasting, y should be a string or a list
of one string.
validation_split: Float between 0 and 1. Defaults to 0.2. Fraction
of the training data to be used as validation data. The model
will set apart this fraction of the training data, will not
train on it, and will evaluate the loss and any model metrics on
this data at the end of each epoch. The validation data is
selected from the last samples in the `x` and `y` data provided,
before shuffling. This argument is not supported when `x` is a
dataset. The best model found would be fit on the entire
dataset including the validation data.
validation_data: Data on which to evaluate the loss and any model
metrics at the end of each epoch. The model will not be trained
on this data. `validation_data` will override
`validation_split`. The type of the validation data should be
the same as the training data. The best model found would be
fit on the training dataset without the validation data.
**kwargs: Any arguments supported by
[keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit).
"""
raise NotImplementedError
| autokeras/autokeras/tasks/time_series_forecaster.py/0 | {
"file_path": "autokeras/autokeras/tasks/time_series_forecaster.py",
"repo_id": "autokeras",
"token_count": 10018
} | 2 |
coverage:
status:
project:
default:
target: 100%
patch:
default:
target: 100%
| autokeras/codecov.yml/0 | {
"file_path": "autokeras/codecov.yml",
"repo_id": "autokeras",
"token_count": 60
} | 3 |
<jupyter_start><jupyter_code>!pip install autokeras
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
import autokeras as ak<jupyter_output><empty_output><jupyter_text>A Simple ExampleThe first step is to prepare your data. Here we use the [California housingdataset](https://scikit-learn.org/stable/datasets/real_world.htmlcalifornia-housing-dataset)as an example.<jupyter_code>house_dataset = fetch_california_housing()
df = pd.DataFrame(
np.concatenate(
(house_dataset.data, house_dataset.target.reshape(-1, 1)), axis=1
),
columns=house_dataset.feature_names + ["Price"],
)
train_size = int(df.shape[0] * 0.9)
df[:train_size].to_csv("train.csv", index=False)
df[train_size:].to_csv("eval.csv", index=False)
train_file_path = "train.csv"
test_file_path = "eval.csv"<jupyter_output><empty_output><jupyter_text>The second step is to run the[StructuredDataRegressor](/structured_data_regressor).As a quick demo, we set epochs to 10.You can also leave the epochs unspecified for an adaptive number of epochs.<jupyter_code># Initialize the structured data regressor.
reg = ak.StructuredDataRegressor(
overwrite=True, max_trials=3
) # It tries 3 different models.
# Feed the structured data regressor with training data.
reg.fit(
# The path to the train.csv file.
train_file_path,
# The name of the label column.
"Price",
epochs=10,
)
# Predict with the best model.
predicted_y = reg.predict(test_file_path)
# Evaluate the best model with testing data.
print(reg.evaluate(test_file_path, "Price"))<jupyter_output><empty_output><jupyter_text>Data FormatThe AutoKeras StructuredDataRegressor is quite flexible for the data format.The example above shows how to use the CSV files directly. Besides CSV files,it also supports numpy.ndarray, pandas.DataFrame or [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable). Thedata should be two-dimensional with numerical or categorical values.For the regression targets, it should be a vector of numerical values.AutoKeras accepts numpy.ndarray, pandas.DataFrame, or pandas.Series.The following examples show how the data can be prepared with numpy.ndarray,pandas.DataFrame, and tensorflow.data.Dataset.<jupyter_code># x_train as pandas.DataFrame, y_train as pandas.Series
x_train = pd.read_csv(train_file_path)
print(type(x_train)) # pandas.DataFrame
y_train = x_train.pop("Price")
print(type(y_train)) # pandas.Series
# You can also use pandas.DataFrame for y_train.
y_train = pd.DataFrame(y_train)
print(type(y_train)) # pandas.DataFrame
# You can also use numpy.ndarray for x_train and y_train.
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
print(type(x_train)) # numpy.ndarray
print(type(y_train)) # numpy.ndarray
# Preparing testing data.
x_test = pd.read_csv(test_file_path)
y_test = x_test.pop("Price")
# It tries 10 different models.
reg = ak.StructuredDataRegressor(max_trials=3, overwrite=True)
# Feed the structured data regressor with training data.
reg.fit(x_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = reg.predict(x_test)
# Evaluate the best model with testing data.
print(reg.evaluate(x_test, y_test))<jupyter_output><empty_output><jupyter_text>The following code shows how to convert numpy.ndarray to tf.data.Dataset.<jupyter_code>train_set = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_set = tf.data.Dataset.from_tensor_slices((x_test, y_test))
reg = ak.StructuredDataRegressor(max_trials=3, overwrite=True)
# Feed the tensorflow Dataset to the regressor.
reg.fit(train_set, epochs=10)
# Predict with the best model.
predicted_y = reg.predict(test_set)
# Evaluate the best model with testing data.
print(reg.evaluate(test_set))<jupyter_output><empty_output><jupyter_text>You can also specify the column names and types for the data as follows. The`column_names` is optional if the training data already have the column names,e.g. pandas.DataFrame, CSV file. Any column, whose type is not specified willbe inferred from the training data.<jupyter_code># Initialize the structured data regressor.
reg = ak.StructuredDataRegressor(
column_names=[
"MedInc",
"HouseAge",
"AveRooms",
"AveBedrms",
"Population",
"AveOccup",
"Latitude",
"Longitude",
],
column_types={"MedInc": "numerical", "Latitude": "numerical"},
max_trials=10, # It tries 10 different models.
overwrite=True,
)<jupyter_output><empty_output><jupyter_text>Validation DataBy default, AutoKeras use the last 20% of training data as validation data. Asshown in the example below, you can use `validation_split` to specify thepercentage.<jupyter_code>reg.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=10,
)<jupyter_output><empty_output><jupyter_text>You can also use your own validation setinstead of splitting it from the training data with `validation_data`.<jupyter_code>split = 500
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
reg.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=10,
)<jupyter_output><empty_output><jupyter_text>Customized Search SpaceFor advanced users, you may customize your search space by using[AutoModel](/auto_model/automodel-class) instead of[StructuredDataRegressor](/structured_data_regressor). You can configure the[StructuredDataBlock](/block/structureddatablock-class) for some high-levelconfigurations, e.g., `categorical_encoding` for whether to use the[CategoricalToNumerical](/block/categoricaltonumerical-class). You can also donot specify these arguments, which would leave the different choices to betuned automatically. See the following example for detail.<jupyter_code>input_node = ak.StructuredDataInput()
output_node = ak.StructuredDataBlock(categorical_encoding=True)(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=3
)
reg.fit(x_train, y_train, epochs=10)<jupyter_output><empty_output><jupyter_text>The usage of [AutoModel](/auto_model/automodel-class) is similar to the[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.Basically, you are building a graph, whose edges are blocks and the nodes areintermediate outputs of blocks. To add an edge from `input_node` to`output_node` with `output_node = ak.[some_block]([block_args])(input_node)`.You can even also use more fine grained blocks to customize the search spaceeven further. See the following example.<jupyter_code>input_node = ak.StructuredDataInput()
output_node = ak.CategoricalToNumerical()(input_node)
output_node = ak.DenseBlock()(output_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, max_trials=3, overwrite=True
)
reg.fit(x_train, y_train, epochs=10)<jupyter_output><empty_output><jupyter_text>You can also export the best model found by AutoKeras as a Keras Model.<jupyter_code>model = reg.export_model()
model.summary()
# numpy array in object (mixed type) is not supported.
# you need convert it to unicode or float first.
model.predict(x_train)<jupyter_output><empty_output> | autokeras/docs/ipynb/structured_data_regression.ipynb/0 | {
"file_path": "autokeras/docs/ipynb/structured_data_regression.ipynb",
"repo_id": "autokeras",
"token_count": 2565
} | 4 |
"""shell
pip install autokeras
"""
import os
import shutil
import numpy as np
import tensorflow as tf
import autokeras as ak
"""
## Load Images from Disk
If the data is too large to put in memory all at once, we can load it batch by
batch into memory from disk with tf.data.Dataset. This
[function](
https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/
image_dataset_from_directory)
can help you build such a tf.data.Dataset for image data.
First, we download the data and extract the files.
"""
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" # noqa: E501
local_file_path = tf.keras.utils.get_file(
origin=dataset_url, fname="image_data", extract=True
)
# The file is extracted in the same directory as the downloaded file.
local_dir_path = os.path.dirname(local_file_path)
# After check mannually, we know the extracted data is in 'flower_photos'.
data_dir = os.path.join(local_dir_path, "flower_photos")
print(data_dir)
"""
The directory should look like this. Each folder contains the images in the
same class.
```
flowers_photos/
daisy/
dandelion/
roses/
sunflowers/
tulips/
```
We can split the data into training and testing as we load them.
"""
batch_size = 32
img_height = 180
img_width = 180
train_data = ak.image_dataset_from_directory(
data_dir,
# Use 20% data as testing data.
validation_split=0.2,
subset="training",
# Set seed to ensure the same split when loading testing data.
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
test_data = ak.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
"""
Then we just do one quick demo of AutoKeras to make sure the dataset works.
"""
clf = ak.ImageClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=1)
print(clf.evaluate(test_data))
"""
## Load Texts from Disk
You can also load text datasets in the same way.
"""
dataset_url = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
local_file_path = tf.keras.utils.get_file(
fname="text_data",
origin=dataset_url,
extract=True,
)
# The file is extracted in the same directory as the downloaded file.
local_dir_path = os.path.dirname(local_file_path)
# After check mannually, we know the extracted data is in 'aclImdb'.
data_dir = os.path.join(local_dir_path, "aclImdb")
# Remove the unused data folder.
shutil.rmtree(os.path.join(data_dir, "train/unsup"))
"""
For this dataset, the data is already split into train and test.
We just load them separately.
"""
print(data_dir)
train_data = ak.text_dataset_from_directory(
os.path.join(data_dir, "train"), batch_size=batch_size
)
test_data = ak.text_dataset_from_directory(
os.path.join(data_dir, "test"), shuffle=False, batch_size=batch_size
)
clf = ak.TextClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=2)
print(clf.evaluate(test_data))
"""
## Load Data with Python Generators
If you want to use generators, you can refer to the following code.
"""
N_BATCHES = 30
BATCH_SIZE = 100
N_FEATURES = 10
def get_data_generator(n_batches, batch_size, n_features):
"""Get a generator returning n_batches random data.
The shape of the data is (batch_size, n_features).
"""
def data_generator():
for _ in range(n_batches * batch_size):
x = np.random.randn(n_features)
y = x.sum(axis=0) / n_features > 0.5
yield x, y
return data_generator
dataset = tf.data.Dataset.from_generator(
get_data_generator(N_BATCHES, BATCH_SIZE, N_FEATURES),
output_types=(tf.float32, tf.float32),
output_shapes=((N_FEATURES,), tuple()),
).batch(BATCH_SIZE)
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=1, seed=5)
clf.fit(x=dataset, validation_data=dataset, batch_size=BATCH_SIZE)
print(clf.evaluate(dataset))
"""
## Reference
[image_dataset_from_directory](utils/#image_dataset_from_directory-function)
[text_dataset_from_directory](utils/#text_dataset_from_directory-function)
"""
| autokeras/docs/py/load.py/0 | {
"file_path": "autokeras/docs/py/load.py",
"repo_id": "autokeras",
"token_count": 1554
} | 5 |
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg id="logo" width="100" height="100" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g fill="#d00000">
<g transform="translate(-45, -20) scale(0.62)">
<path d="M150 50 L150 174.8528137423857 L130 154.8528137423857 L130 70 "/>
<path d="M131 69 L73.4314575050762 126.5685424949238 L73.4314575050762 154.8528137423857 L131 97.2842712474619 "/>
<path d="M131 155.8528137423857 L105.04906208587143 129.90187582825715 L119.19119770960238 115.75974020452618 L131 127.5685424949238 "/>
<path d="M73.4314575050762 98.2842712474619 L73.4314575050762 70 L98.38239541920477 94.95093791412857 L84.24025979547382 109.09307353785952 "/>
<path d="M154.71404520791032 50 L154.71404520791032 174.8528137423857 L174.71404520791032 154.8528137423857 L174.71404520791032 70 "/>
<path d="M173.71404520791032 127.5685424949238 L231.28258770283412 70 L202.99831645537222 70 L173.71404520791032 99.2842712474619 "/>
<path d="M206.33164978870556 101.61760458079523 L231.28258770283412 126.5685424949238 L231.28258770283412 154.8528137423857 L192.1895141649746 115.75974020452618 "/>
</g>
</g>
</svg>
| autokeras/docs/templates/img/logo_red.svg/0 | {
"file_path": "autokeras/docs/templates/img/logo_red.svg",
"repo_id": "autokeras",
"token_count": 585
} | 6 |
"""shell
!pip install -q -U pip
!pip install -q -U autokeras==1.0.8
!pip install -q git+https://github.com/keras-team/[email protected]
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import reuters
import autokeras as ak
"""
Search for a good model for the
[Reuters](https://keras.io/ja/datasets/#_5) dataset.
"""
# Prepare the dataset.
def reuters_raw(max_features=20000):
index_offset = 3 # word index offset
(x_train, y_train), (x_test, y_test) = reuters.load_data(
num_words=max_features, index_from=index_offset
)
x_train = x_train
y_train = y_train.reshape(-1, 1)
x_test = x_test
y_test = y_test.reshape(-1, 1)
word_to_id = reuters.get_word_index()
word_to_id = {k: (v + index_offset) for k, v in word_to_id.items()}
word_to_id["<PAD>"] = 0
word_to_id["<START>"] = 1
word_to_id["<UNK>"] = 2
id_to_word = {value: key for key, value in word_to_id.items()}
x_train = list(
map(lambda sentence: " ".join(id_to_word[i] for i in sentence), x_train)
)
x_test = list(
map(lambda sentence: " ".join(id_to_word[i] for i in sentence), x_test)
)
x_train = np.array(x_train, dtype=np.str)
x_test = np.array(x_test, dtype=np.str)
return (x_train, y_train), (x_test, y_test)
# Prepare the data.
(x_train, y_train), (x_test, y_test) = reuters_raw()
print(x_train.shape) # (8982,)
print(y_train.shape) # (8982, 1)
print(x_train[0][:50]) # <START> <UNK> <UNK> said as a result of its decemb
# Initialize the TextClassifier
clf = ak.TextClassifier(
max_trials=5,
overwrite=True,
)
# Callback to avoid overfitting with the EarlyStopping.
cbs = [
tf.keras.callbacks.EarlyStopping(patience=3),
]
# Search for the best model.
clf.fit(x_train, y_train, epochs=10, callback=cbs)
# Evaluate on the testing data.
print("Accuracy: {accuracy}".format(accuracy=clf.evaluate(x_test, y_test)))
| autokeras/examples/reuters.py/0 | {
"file_path": "autokeras/examples/reuters.py",
"repo_id": "autokeras",
"token_count": 846
} | 7 |
#!/usr/bin/env bash
set -e
export DOCKER_BUILDKIT=1
docker build -t autokeras_formatting -f docker/pre-commit.Dockerfile .
docker run --rm -t -v "$(pwd -P):/autokeras" autokeras_formatting
| autokeras/shell/pre-commit.sh/0 | {
"file_path": "autokeras/shell/pre-commit.sh",
"repo_id": "autokeras",
"token_count": 77
} | 8 |
# Keras Preprocessing API
| Status | Proposed |
:-------------- |:---------------------------------------------------- |
| **Author(s)** | Francois Chollet ([email protected]), Frederic Branchaud-Charron ([email protected])|
| **Updated** | 2019-08-21 |
## Context
`tf.data.Dataset` is the main API for data loading and preprocessing in TensorFLow. It has two advantages:
- It supports GPU prefetching
- It supports distribution via the Distribution Strategies API
Meanwhile, `keras.preprocessing` is a major API for data loading and preprocessing in Keras. It is based
on Numpy and Scipy, and it produces instances of the `keras.utils.Sequence` class, which are finite-length,
resettable Python generators that yield batches of data.
Some features of `keras.preprocessing` are highly useful and don't have straightforward equivalents in `tf.data`
(in particular image data augmentation and dynamic time series iteration).
Ideally, the utilities in `keras.preprocessing` should be made compatible with `tf.data`.
This presents the opportunity to improve on the existing API. In particular we don't have good support
for image segmentation use cases today.
Some features are also being supplanted by [preprocessing layers](https://github.com/keras-team/governance/blob/master/rfcs/20190502-preprocessing-layers.md), in particular text processing.
As a result we may want move the current API to an API similar to Layers.
## Goals
- Unify "keras.preprocessing" and the recently-introduced [Preprocessing Layers API](https://github.com/keras-team/governance/blob/master/rfcs/20190502-preprocessing-layers.md).
- Make all features of `keras.preprocessing` compatible with `tf.data`.
- As a by-product, add required ops to TensorFlow (`tf.image`).
## Proposed changes at a high-level
- Deprecate `ImagePipelineGenerator` in favor of new `ImagePipeline` class similar to a `Sequential` model.
- Inherits from `keras.layers.PreprocessingLayer` for all image transformations.
- Deprecate `Tokenizer` class in favor of `TextVectorization` preprocessing layer.
- Replace `TimeseriesGenerator` with a function-based API.
## Detailed API changes
### ImagePipeline
#### Constructor
`ImagePipeline` inherits from `PreprocessingLayer` (or alternatively `keras.model.Sequential`, whose behavior is similar) and takes a list of layers as inputs. In the future it will inherit from `PreprocessingStage`.
`ImagePipeline` is a preprocessing layer that encapsulate a series of image transformations. Since some of these transformations may be trained (featurewise normalization), it exposes the method `adapt`, like all other preprocessing layers.
```python
class ImagePipeline(Sequential):
def __init__(self, layers:List[Layer]):
...
```
#### Example usage
```python
preprocessor = ImagePipeline([
RandomFlip(horizontal=True),
RandomRotation(0.2, fill_mode='constant'),
RandomZoom(0.2, fill_mode='constant'),
RandomTranslation(0.2, fill_mode='constant'),
Normalization(), # This is the same Normalization introduced in preprocessing layers
])
preprocessor.adapt(sample_data) # optional step in case the object needs to be trained
dataset = preprocessor.from_directory(dir_name, image_size=(512, 512))
model.fit(dataset, epochs=10)
```
#### Methods
```python
def from_directory(
self,
directory,
targets='inferred',
target_mode='categorical',
class_names='inferred',
color_mode='rgb',
batch_size=32,
image_size=(255, 255),
shuffle=True,
seed=None,
follow_links=False,
validation_split=None,
subset='training',
subset=None):
"""Generates a Dataset from files in a directory.
# Arguments:
directory: Directory where the data is located.
If `targets` is "inferred", it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
targets: Either
"inferred" (targets are generated from the directory structure),
None (no targets),
or a list of integer labels of the same size as the number of image
files found in the directory.
target_mode:
- 'categorical' means that the inferred labels are
encoded as a categorical vector (e.g. for categorical_crossentropy).
- 'binary' means that the inferred labels (there can be only 2)
are encoded as binary scalars (e.g. for binary_crossentropy).
class_names: Only valid if "targets" is "inferred". This is the explict
list of class names (must match names of subdirectories). Used
to control the order of the classes (otherwise alphanumerical order is used).
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
batch_size: Size of the batches of data (default: 32).
image_size: Size to resize images to after they are read from disk.
Since the pipeline processes batches of images that must all have the same size,
this must be provided.
shuffle: Whether to shuffle the data (default: True)
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
follow_links: Whether to follow links inside
subdirectories (default: False).
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation". Only used if `validation_split` is set.
"""
def from_dataframe(
self,
dataframe,
directory=None,
data_column='filename',
target_column='class',
target_mode='categorical',
weight_column=None,
color_mode='rgb',
batch_size=32,
image_size=(255, 255),
shuffle=True,
seed=None,
validation_split=None,
subset=None):
"""Generates a Dataset from a Pandas dataframe.
# Arguments:
dataframe: Pandas dataframe instance.
directory: The directory that image paths refer to.
data_column: Name of column with the paths for the input images.
target_column: Name of column with the class information.
target_mode:
- 'categorical' means that the inferred labels are
encoded as a categorical vector (e.g. for categorical_crossentropy).
- 'binary' means that the inferred labels (there can be only 2)
are encoded as binary scalars (e.g. for binary_crossentropy).
weight_column: Name of column with sample weight information.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
batch_size: Size of the batches of data (default: 32).
image_size: Size to resize images to after they are read from disk.
Since the pipeline processes batches of images that must all have the same size,
this must be provided.
shuffle: Whether to shuffle the data (default: True)
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation". Only used if `validation_split` is set.
"""
def preview(self, data, save_to_directory=None, save_prefix=None, save_format='png'):
"""Enables users to preview the image augmentation configuration.
# Arguments
data: Image data. Could be strings (a list of image paths), a list of PIL image instances,
a list of arrays, or a list of eager tensors.
save_to_directory: Directory to save transformed images. Mandatory if not in a notebook.
If in a notebook and this is not specified, images are displayed in-line.
save_prefix: String, filename prefix for saved images.
save_format: String, extension for saved images.
"""
```
**Note:** `from_arrays` is not included since it is possible to transform Numpy data simply by calling the `ImagePipeline` object (like a layer).
### Layers
The new data augmentation layers will inherit `keras.layers.Layer` and work in a similar way.
```python
Resizing(height, width) # Resize while distorting aspect ratio
CenterCrop(height, width) # Resize without distorting aspect ratio
RandomCrop(height, width, seed=None) # Return a (height, width) crop from a random location
Rescaling(value) # Divide by `value`
RandomFlip(horizontal=False, vertical=False, seed=None)
RandomTranslation(amplitude=0., fill_mode='constant', fill_value=0., seed=None)
RandomRotation(amplitude=0., fill_mode='constant', fill_value=0., seed=None)
RandomZoom(amplitude=0., fill_mode='constant', fill_value=0., seed=None)
RandomBrightness(amplitude=0., seed=None)
RandomContrast(amplitude=0., seed=None)
RandomSaturation(amplitude=0., seed=None)
RandomWidth(amplitude=0., seed=None) # Expand / shrink width while distorting aspect ratio
RandomHeight(amplitude=0., seed=None) # Expand / shrink height while distorting aspect ratio
```
The `amplitude` argument may be:
- a positive float: it is understood as "fraction of total" (total is the current width, or height, or 180 degrees in the case `RandomRotation`). E.g. `0.2` results in variations in the [-20%, +20%] range. If larger than 1, it is rounded to one for the lower boundary (but not the higher boundary).
- a tuple of 2 positive floats: understood as a fractional range, e.g. `(0.2, 0.4)` is interpreted as the [-20%, +40%] range. The first float may not be larger than 1.
To do a random center crop that zooms in and discards part of the image, you would do:
```python
preprocessor = ImagePipeline([
RandomZoom([0., 0.2]),
CenterCrop(height, width),
])
```
#### Notes
- We are dropping support for ZCA whitening as it is no longer popular in the computer vision community.
- We don't have immediate support for random translations along only one axis.
- We only plan on implementing support for `data_format='channels_last'`. As such this argument does not appear in the API.
#### Example implementation
```python
class RandomFlip(PreprocessingLayer):
def __init__(self, horizontal=False, vertical=False, seed=None):
self.horizontal = horizontal
self.vertical = vertical
self.seed = seed or random_int()
self._rng = rng_from_seed(seed)
def call(self, inputs, training=None, seed=None):
seed = seed or self._rng.sample()
if training:
if self.horizontal:
inputs = tf.image.random_flip_left_right(inputs, seed=seed)
if self.vertical:
inputs = tf.image.random_flip_up_down(inputs, seed=seed)
return inputs
```
#### Question: how to support image segmentation in a simple way?
**Requirements:**
- Image loading and image augmentation should be synced across inputs and targets
- It should be possible to use different standardization preprocessing (outside of augmentation) across inputs and targets
**Proposal:**
```python
# Shared spatial transformations for inputs and targets
augmenter = ImagePipeline([
RandomRotation(0.5),
RandomFlip(vertical=True)
])
input_pipeline = ImagePipeline([
augmenter,
RandomBrightness(0.2),
RandomContrast(0.2),
RandomSaturation(0.2),
])
target_pipeline = ImagePipeline([
augmenter,
OneHot(num_classes)
])
input_ds = input_pipeline.from_directory(
input_dir, targets=None, image_size=(150, 150), batch_size=32,
seed=123) # This seed supercedes the per-layer seed in all transformations
target_ds = target_pipeline.from_directory(
target_dir, # target_dir should have same structure as input_dir.
targets=None, image_size=(150, 150), batch_size=32, seed=123)
ds = tf.data.Dataset.zip((input_ds, target_ds))
model.fit(ds)
```
Note that the behavior of having the `seed` argument in `from_directory` supercedes the per-layer argument is achieved by using the seed
to sample new random ints (scalar tensors from `tf.random.experimental.Generator`) to serve as the `call` argument to each underlying layer.
### TimeseriesGenerator
- Deprecate existing `TimeSeriesGenerator` class
- Introduce functional replacement `timeseries_dataset`:
```python
def timeseries_dataset(
data, targets, length,
sampling_rate=1,
stride=1,
start_index=0,
end_index=None,
shuffle=False,
reverse=False,
batch_size=128):
"""Utility function for generating batches of temporal data.
This function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
stride, length of history, etc., to produce batches for
training/validation.
# Arguments
data: Indexable generator (such as list or Numpy array)
containing consecutive data points (timesteps).
The data should be at 2D, and axis 0 is expected
to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`.
length: Length of the output sequences (in number of timesteps).
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i]`, `data[i-r]`, ... `data[i - length]`
are used for create a sample sequence.
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
in reverse chronological order.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
# Returns
A Dataset instance.
"""
```
| governance/rfcs/20190729-keras-preprocessing-redesign.md/0 | {
"file_path": "governance/rfcs/20190729-keras-preprocessing-redesign.md",
"repo_id": "governance",
"token_count": 5002
} | 9 |
"""Inception V3 model for Keras.
Note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function is also different (same as Xception).
# Reference
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
WEIGHTS_PATH = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
backend = None
layers = None
models = None
keras_utils = None
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if backend.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = layers.Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = layers.Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool],
axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2],
axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='inception_v3')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
| keras-applications/keras_applications/inception_v3.py/0 | {
"file_path": "keras-applications/keras_applications/inception_v3.py",
"repo_id": "keras-applications",
"token_count": 7299
} | 10 |
# examples
# activations
keras_contrib/activations/squash.py @SriRangaTarun
# applications
keras_contrib/applications/densenet.py @titu1994
keras_contrib/applications/nasnet.py @titu1994
keras_contrib/applications/wide_resnet.py @titu1994
# backend
# callbacks
keras_contrib/callbacks/tensorboard.py @gabrieldemarmiesse
keras_contrib/callbacks/snapshot.py @titu1994
# constraints
# datasets
# initializers
# layers
keras_contrib/layers/advanced_activations/sinerelu.py @wilderrodrigues
keras_contrib/layers/advanced_activations/swish.py @gabrieldemarmiesse
keras_contrib/layers/convolutional/subpixelupscaling.py @titu1994
keras_contrib/layers/normalization/groupnormalization.py @titu1994
keras_contrib/layers/capsule.py @SriRangaTarun
# losses
# metrics
# optimizers
keras_contrib/optimizers/yogi.py @MarcoAndreaBuchmann
keras_contrib/optimizers/padam.py @MFreidank
# preprocessing
# regularizers
# utils
# wrappers
| keras-contrib/CODEOWNERS/0 | {
"file_path": "keras-contrib/CODEOWNERS",
"repo_id": "keras-contrib",
"token_count": 359
} | 11 |
"""An implementation of the improved WGAN described in https://arxiv.org/abs/1704.00028
The improved WGAN has a term in the loss function which penalizes the network if its
gradient norm moves away from 1. This is included because the Earth Mover (EM) distance
used in WGANs is only easy to calculate for 1-Lipschitz functions (i.e. functions where
the gradient norm has a constant upper bound of 1).
The original WGAN paper enforced this by clipping weights to very small values
[-0.01, 0.01]. However, this drastically reduced network capacity. Penalizing the
gradient norm is more natural, but this requires second-order gradients. These are not
supported for some tensorflow ops (particularly MaxPool and AveragePool) in the current
release (1.0.x), but they are supported in the current nightly builds
(1.1.0-rc1 and higher).
To avoid this, this model uses strided convolutions instead of Average/Maxpooling for
downsampling. If you wish to use pooling operations in your discriminator, please ensure
you update Tensorflow to 1.1.0-rc1 or higher. I haven't tested this with Theano at all.
The model saves images using pillow. If you don't have pillow, either install it or
remove the calls to generate_images.
"""
import argparse
import os
import numpy as np
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers.merge import _Merge
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras.datasets import mnist
from keras import backend as K
from functools import partial
try:
from PIL import Image
except ImportError:
print('This script depends on pillow! '
'Please install it (e.g. with pip install pillow)')
exit()
BATCH_SIZE = 64
# The training ratio is the number of discriminator updates
# per generator update. The paper uses 5.
TRAINING_RATIO = 5
GRADIENT_PENALTY_WEIGHT = 10 # As per the paper
def wasserstein_loss(y_true, y_pred):
"""Calculates the Wasserstein loss for a sample batch.
The Wasserstein loss function is very simple to calculate. In a standard GAN, the
discriminator has a sigmoid output, representing the probability that samples are
real or generated. In Wasserstein GANs, however, the output is linear with no
activation function! Instead of being constrained to [0, 1], the discriminator wants
to make the distance between its output for real and generated samples as
large as possible.
The most natural way to achieve this is to label generated samples -1 and real
samples 1, instead of the 0 and 1 used in normal GANs, so that multiplying the
outputs by the labels will give you the loss immediately.
Note that the nature of this loss means that it can be (and frequently will be)
less than 0."""
return K.mean(y_true * y_pred)
def gradient_penalty_loss(y_true, y_pred, averaged_samples,
gradient_penalty_weight):
"""Calculates the gradient penalty loss for a batch of "averaged" samples.
In Improved WGANs, the 1-Lipschitz constraint is enforced by adding a term to the
loss function that penalizes the network if the gradient norm moves away from 1.
However, it is impossible to evaluate this function at all points in the input
space. The compromise used in the paper is to choose random points on the lines
between real and generated samples, and check the gradients at these points. Note
that it is the gradient w.r.t. the input averaged samples, not the weights of the
discriminator, that we're penalizing!
In order to evaluate the gradients, we must first run samples through the generator
and evaluate the loss. Then we get the gradients of the discriminator w.r.t. the
input averaged samples. The l2 norm and penalty can then be calculated for this
gradient.
Note that this loss function requires the original averaged samples as input, but
Keras only supports passing y_true and y_pred to loss functions. To get around this,
we make a partial() of the function with the averaged_samples argument, and use that
for model training."""
# first get the gradients:
# assuming: - that y_pred has dimensions (batch_size, 1)
# - averaged_samples has dimensions (batch_size, nbr_features)
# gradients afterwards has dimension (batch_size, nbr_features), basically
# a list of nbr_features-dimensional gradient vectors
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
def make_generator():
"""Creates a generator model that takes a 100-dimensional noise vector as a "seed",
and outputs images of size 28x28x1."""
model = Sequential()
model.add(Dense(1024, input_dim=100))
model.add(LeakyReLU())
model.add(Dense(128 * 7 * 7))
model.add(BatchNormalization())
model.add(LeakyReLU())
if K.image_data_format() == 'channels_first':
model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
bn_axis = 1
else:
model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
bn_axis = -1
model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same'))
model.add(BatchNormalization(axis=bn_axis))
model.add(LeakyReLU())
model.add(Convolution2D(64, (5, 5), padding='same'))
model.add(BatchNormalization(axis=bn_axis))
model.add(LeakyReLU())
model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
model.add(BatchNormalization(axis=bn_axis))
model.add(LeakyReLU())
# Because we normalized training inputs to lie in the range [-1, 1],
# the tanh function should be used for the output of the generator to ensure
# its output also lies in this range.
model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))
return model
def make_discriminator():
"""Creates a discriminator model that takes an image as input and outputs a single
value, representing whether the input is real or generated. Unlike normal GANs, the
output is not sigmoid and does not represent a probability! Instead, the output
should be as large and negative as possible for generated inputs and as large and
positive as possible for real inputs.
Note that the improved WGAN paper suggests that BatchNormalization should not be
used in the discriminator."""
model = Sequential()
if K.image_data_format() == 'channels_first':
model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28)))
else:
model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
model.add(LeakyReLU())
model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal',
strides=[2, 2]))
model.add(LeakyReLU())
model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same',
strides=[2, 2]))
model.add(LeakyReLU())
model.add(Flatten())
model.add(Dense(1024, kernel_initializer='he_normal'))
model.add(LeakyReLU())
model.add(Dense(1, kernel_initializer='he_normal'))
return model
def tile_images(image_stack):
"""Given a stacked tensor of images, reshapes them into a horizontal tiling for
display."""
assert len(image_stack.shape) == 3
image_list = [image_stack[i, :, :] for i in range(image_stack.shape[0])]
tiled_images = np.concatenate(image_list, axis=1)
return tiled_images
class RandomWeightedAverage(_Merge):
"""Takes a randomly-weighted average of two tensors. In geometric terms, this
outputs a random point on the line between each pair of input points.
Inheriting from _Merge is a little messy but it was the quickest solution I could
think of. Improvements appreciated."""
def _merge_function(self, inputs):
weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
return (weights * inputs[0]) + ((1 - weights) * inputs[1])
def generate_images(generator_model, output_dir, epoch):
"""Feeds random seeds into the generator and tiles and saves the output to a PNG
file."""
test_image_stack = generator_model.predict(np.random.rand(10, 100))
test_image_stack = (test_image_stack * 127.5) + 127.5
test_image_stack = np.squeeze(np.round(test_image_stack).astype(np.uint8))
tiled_output = tile_images(test_image_stack)
tiled_output = Image.fromarray(tiled_output, mode='L') # L specifies greyscale
outfile = os.path.join(output_dir, 'epoch_{}.png'.format(epoch))
tiled_output.save(outfile)
parser = argparse.ArgumentParser(description="Improved Wasserstein GAN "
"implementation for Keras.")
parser.add_argument("--output_dir", "-o", required=True,
help="Directory to output generated files to")
args = parser.parse_args()
# First we load the image data, reshape it and normalize it to the range [-1, 1]
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = np.concatenate((X_train, X_test), axis=0)
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1], X_train.shape[2]))
else:
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], X_train.shape[2], 1))
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
# Now we initialize the generator and discriminator.
generator = make_generator()
discriminator = make_discriminator()
# The generator_model is used when we want to train the generator layers.
# As such, we ensure that the discriminator layers are not trainable.
# Note that once we compile this model, updating .trainable will have no effect within
# it. As such, it won't cause problems if we later set discriminator.trainable = True
# for the discriminator_model, as long as we compile the generator_model first.
for layer in discriminator.layers:
layer.trainable = False
discriminator.trainable = False
generator_input = Input(shape=(100,))
generator_layers = generator(generator_input)
discriminator_layers_for_generator = discriminator(generator_layers)
generator_model = Model(inputs=[generator_input],
outputs=[discriminator_layers_for_generator])
# We use the Adam paramaters from Gulrajani et al.
generator_model.compile(optimizer=Adam(0.0001, beta_1=0.5, beta_2=0.9),
loss=wasserstein_loss)
# Now that the generator_model is compiled, we can make the discriminator
# layers trainable.
for layer in discriminator.layers:
layer.trainable = True
for layer in generator.layers:
layer.trainable = False
discriminator.trainable = True
generator.trainable = False
# The discriminator_model is more complex. It takes both real image samples and random
# noise seeds as input. The noise seed is run through the generator model to get
# generated images. Both real and generated images are then run through the
# discriminator. Although we could concatenate the real and generated images into a
# single tensor, we don't (see model compilation for why).
real_samples = Input(shape=X_train.shape[1:])
generator_input_for_discriminator = Input(shape=(100,))
generated_samples_for_discriminator = generator(generator_input_for_discriminator)
discriminator_output_from_generator = discriminator(generated_samples_for_discriminator)
discriminator_output_from_real_samples = discriminator(real_samples)
# We also need to generate weighted-averages of real and generated samples,
# to use for the gradient norm penalty.
averaged_samples = RandomWeightedAverage()([real_samples,
generated_samples_for_discriminator])
# We then run these samples through the discriminator as well. Note that we never
# really use the discriminator output for these samples - we're only running them to
# get the gradient norm for the gradient penalty loss.
averaged_samples_out = discriminator(averaged_samples)
# The gradient penalty loss function requires the input averaged samples to get
# gradients. However, Keras loss functions can only have two arguments, y_true and
# y_pred. We get around this by making a partial() of the function with the averaged
# samples here.
partial_gp_loss = partial(gradient_penalty_loss,
averaged_samples=averaged_samples,
gradient_penalty_weight=GRADIENT_PENALTY_WEIGHT)
# Functions need names or Keras will throw an error
partial_gp_loss.__name__ = 'gradient_penalty'
# Keras requires that inputs and outputs have the same number of samples. This is why
# we didn't concatenate the real samples and generated samples before passing them to
# the discriminator: If we had, it would create an output with 2 * BATCH_SIZE samples,
# while the output of the "averaged" samples for gradient penalty
# would have only BATCH_SIZE samples.
# If we don't concatenate the real and generated samples, however, we get three
# outputs: One of the generated samples, one of the real samples, and one of the
# averaged samples, all of size BATCH_SIZE. This works neatly!
discriminator_model = Model(inputs=[real_samples,
generator_input_for_discriminator],
outputs=[discriminator_output_from_real_samples,
discriminator_output_from_generator,
averaged_samples_out])
# We use the Adam paramaters from Gulrajani et al. We use the Wasserstein loss for both
# the real and generated samples, and the gradient penalty loss for the averaged samples
discriminator_model.compile(optimizer=Adam(0.0001, beta_1=0.5, beta_2=0.9),
loss=[wasserstein_loss,
wasserstein_loss,
partial_gp_loss])
# We make three label vectors for training. positive_y is the label vector for real
# samples, with value 1. negative_y is the label vector for generated samples, with
# value -1. The dummy_y vector is passed to the gradient_penalty loss function and
# is not used.
positive_y = np.ones((BATCH_SIZE, 1), dtype=np.float32)
negative_y = -positive_y
dummy_y = np.zeros((BATCH_SIZE, 1), dtype=np.float32)
for epoch in range(100):
np.random.shuffle(X_train)
print("Epoch: ", epoch)
print("Number of batches: ", int(X_train.shape[0] // BATCH_SIZE))
discriminator_loss = []
generator_loss = []
minibatches_size = BATCH_SIZE * TRAINING_RATIO
for i in range(int(X_train.shape[0] // (BATCH_SIZE * TRAINING_RATIO))):
discriminator_minibatches = X_train[i * minibatches_size:
(i + 1) * minibatches_size]
for j in range(TRAINING_RATIO):
image_batch = discriminator_minibatches[j * BATCH_SIZE:
(j + 1) * BATCH_SIZE]
noise = np.random.rand(BATCH_SIZE, 100).astype(np.float32)
discriminator_loss.append(discriminator_model.train_on_batch(
[image_batch, noise],
[positive_y, negative_y, dummy_y]))
generator_loss.append(generator_model.train_on_batch(np.random.rand(BATCH_SIZE,
100),
positive_y))
# Still needs some code to display losses from the generator and discriminator,
# progress bars, etc.
generate_images(generator, args.output_dir, epoch)
| keras-contrib/examples/improved_wgan.py/0 | {
"file_path": "keras-contrib/examples/improved_wgan.py",
"repo_id": "keras-contrib",
"token_count": 5747
} | 12 |
from keras.callbacks import Callback
from keras import backend as K
import numpy as np
class CyclicLR(Callback):
"""This callback implements a cyclical learning rate policy (CLR).
The method cycles the learning rate between two boundaries with
some constant frequency.
# Arguments
base_lr: initial learning rate which is the
lower boundary in the cycle.
max_lr: upper boundary in the cycle. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size: number of training iterations per
half cycle. Authors suggest setting step_size
2-8 x training iterations in epoch.
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
gamma: constant in 'exp_range' scaling function:
gamma**(cycle iterations)
scale_fn: Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
mode paramater is ignored
scale_mode: {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle). Default is 'cycle'.
The amplitude of the cycle can be scaled on a per-iteration or
per-cycle basis.
This class has three built-in policies, as put forth in the paper.
"triangular":
A basic triangular cycle w/ no amplitude scaling.
"triangular2":
A basic triangular cycle that scales initial amplitude by half each cycle.
"exp_range":
A cycle that scales initial amplitude by gamma**(cycle iterations) at each
cycle iteration.
For more detail, please see paper.
# Example for CIFAR-10 w/ batch size 100:
```python
clr = CyclicLR(base_lr=0.001, max_lr=0.006,
step_size=2000., mode='triangular')
model.fit(X_train, Y_train, callbacks=[clr])
```
Class also supports custom scaling functions:
```python
clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.))
clr = CyclicLR(base_lr=0.001, max_lr=0.006,
step_size=2000., scale_fn=clr_fn,
scale_mode='cycle')
model.fit(X_train, Y_train, callbacks=[clr])
```
# References
- [Cyclical Learning Rates for Training Neural Networks](
https://arxiv.org/abs/1506.01186)
"""
def __init__(
self,
base_lr=0.001,
max_lr=0.006,
step_size=2000.,
mode='triangular',
gamma=1.,
scale_fn=None,
scale_mode='cycle'):
super(CyclicLR, self).__init__()
if mode not in ['triangular', 'triangular2',
'exp_range']:
raise KeyError("mode must be one of 'triangular', "
"'triangular2', or 'exp_range'")
self.base_lr = base_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
self.gamma = gamma
if scale_fn is None:
if self.mode == 'triangular':
self.scale_fn = lambda x: 1.
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self.scale_fn = lambda x: 1 / (2.**(x - 1))
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self.scale_fn = lambda x: gamma ** x
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.clr_iterations = 0.
self.trn_iterations = 0.
self.history = {}
self._reset()
def _reset(self, new_base_lr=None, new_max_lr=None,
new_step_size=None):
"""Resets cycle iterations.
Optional boundary/step size adjustment.
"""
if new_base_lr is not None:
self.base_lr = new_base_lr
if new_max_lr is not None:
self.max_lr = new_max_lr
if new_step_size is not None:
self.step_size = new_step_size
self.clr_iterations = 0.
def clr(self):
cycle = np.floor(1 + self.clr_iterations / (2 * self.step_size))
x = np.abs(self.clr_iterations / self.step_size - 2 * cycle + 1)
if self.scale_mode == 'cycle':
return self.base_lr + (self.max_lr - self.base_lr) * \
np.maximum(0, (1 - x)) * self.scale_fn(cycle)
else:
return self.base_lr + (self.max_lr - self.base_lr) * \
np.maximum(0, (1 - x)) * self.scale_fn(self.clr_iterations)
def on_train_begin(self, logs={}):
logs = logs or {}
if self.clr_iterations == 0:
K.set_value(self.model.optimizer.lr, self.base_lr)
else:
K.set_value(self.model.optimizer.lr, self.clr())
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.trn_iterations += 1
self.clr_iterations += 1
K.set_value(self.model.optimizer.lr, self.clr())
self.history.setdefault(
'lr', []).append(
K.get_value(
self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.trn_iterations)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
| keras-contrib/keras_contrib/callbacks/cyclical_learning_rate.py/0 | {
"file_path": "keras-contrib/keras_contrib/callbacks/cyclical_learning_rate.py",
"repo_id": "keras-contrib",
"token_count": 2807
} | 13 |
from keras.layers import Layer, InputSpec
from keras import initializers
import keras.backend as K
from keras_contrib.utils.test_utils import to_tuple
class SReLU(Layer):
"""S-shaped Rectified Linear Unit.
It follows:
`f(x) = t^r + a^r(x - t^r) for x >= t^r`,
`f(x) = x for t^r > x > t^l`,
`f(x) = t^l + a^l(x - t^l) for x <= t^l`.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
t_left_initializer: initializer function for the left part intercept
a_left_initializer: initializer function for the left part slope
t_right_initializer: initializer function for the right part intercept
a_right_initializer: initializer function for the right part slope
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
# References
- [Deep Learning with S-shaped Rectified Linear Activation Units](
http://arxiv.org/abs/1512.07030)
"""
def __init__(self, t_left_initializer='zeros',
a_left_initializer=initializers.RandomUniform(minval=0, maxval=1),
t_right_initializer=initializers.RandomUniform(minval=0, maxval=5),
a_right_initializer='ones',
shared_axes=None,
**kwargs):
super(SReLU, self).__init__(**kwargs)
self.supports_masking = True
self.t_left_initializer = initializers.get(t_left_initializer)
self.a_left_initializer = initializers.get(a_left_initializer)
self.t_right_initializer = initializers.get(t_right_initializer)
self.a_right_initializer = initializers.get(a_right_initializer)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
input_shape = to_tuple(input_shape)
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
param_shape = tuple(param_shape)
self.t_left = self.add_weight(shape=param_shape,
name='t_left',
initializer=self.t_left_initializer)
self.a_left = self.add_weight(shape=param_shape,
name='a_left',
initializer=self.a_left_initializer)
self.t_right = self.add_weight(shape=param_shape,
name='t_right',
initializer=self.t_right_initializer)
self.a_right = self.add_weight(shape=param_shape,
name='a_right',
initializer=self.a_right_initializer)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, x, mask=None):
# ensure the the right part is always to the right of the left
t_right_actual = self.t_left + K.abs(self.t_right)
if K.backend() == 'theano':
t_left = K.pattern_broadcast(self.t_left, self.param_broadcast)
a_left = K.pattern_broadcast(self.a_left, self.param_broadcast)
a_right = K.pattern_broadcast(self.a_right, self.param_broadcast)
t_right_actual = K.pattern_broadcast(t_right_actual,
self.param_broadcast)
else:
t_left = self.t_left
a_left = self.a_left
a_right = self.a_right
y_left_and_center = t_left + K.relu(x - t_left,
a_left,
t_right_actual - t_left)
y_right = K.relu(x - t_right_actual) * a_right
return y_left_and_center + y_right
def get_config(self):
config = {
't_left_initializer': self.t_left_initializer,
'a_left_initializer': self.a_left_initializer,
't_right_initializer': self.t_right_initializer,
'a_right_initializer': self.a_right_initializer,
'shared_axes': self.shared_axes
}
base_config = super(SReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
| keras-contrib/keras_contrib/layers/advanced_activations/srelu.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/advanced_activations/srelu.py",
"repo_id": "keras-contrib",
"token_count": 2664
} | 14 |
from keras import backend as K
def _get_accuracy(y_true, y_pred, mask, sparse_target=False):
y_pred = K.argmax(y_pred, -1)
if sparse_target:
y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred))
else:
y_true = K.argmax(y_true, -1)
judge = K.cast(K.equal(y_pred, y_true), K.floatx())
if mask is None:
return K.mean(judge)
else:
mask = K.cast(mask, K.floatx())
return K.sum(judge * mask) / K.sum(mask)
def crf_viterbi_accuracy(y_true, y_pred):
'''Use Viterbi algorithm to get best path, and compute its accuracy.
`y_pred` must be an output from CRF.'''
crf, idx = y_pred._keras_history[:2]
X = crf._inbound_nodes[idx].input_tensors[0]
mask = crf._inbound_nodes[idx].input_masks[0]
y_pred = crf.viterbi_decoding(X, mask)
return _get_accuracy(y_true, y_pred, mask, crf.sparse_target)
def crf_marginal_accuracy(y_true, y_pred):
'''Use time-wise marginal argmax as prediction.
`y_pred` must be an output from CRF with `learn_mode="marginal"`.'''
crf, idx = y_pred._keras_history[:2]
X = crf._inbound_nodes[idx].input_tensors[0]
mask = crf._inbound_nodes[idx].input_masks[0]
y_pred = crf.get_marginal_prob(X, mask)
return _get_accuracy(y_true, y_pred, mask, crf.sparse_target)
def crf_accuracy(y_true, y_pred):
'''Ge default accuracy based on CRF `test_mode`.'''
crf, idx = y_pred._keras_history[:2]
if crf.test_mode == 'viterbi':
return crf_viterbi_accuracy(y_true, y_pred)
else:
return crf_marginal_accuracy(y_true, y_pred)
| keras-contrib/keras_contrib/metrics/crf_accuracies.py/0 | {
"file_path": "keras-contrib/keras_contrib/metrics/crf_accuracies.py",
"repo_id": "keras-contrib",
"token_count": 738
} | 15 |
"""Utilities related to Keras unit tests."""
import sys
import numpy as np
from numpy.testing import assert_allclose
import inspect
import keras
from keras.layers import Input
from keras.models import Model
from keras import backend as K
def get_test_data(num_train=1000, num_test=500, input_shape=(10,),
output_shape=(2,),
classification=True, num_classes=2):
"""Generates test data to train a model on.
classification=True overrides output_shape
(i.e. output_shape is set to (1,)) and the output
consists in integers in [0, num_class-1].
Otherwise: float output with shape output_shape.
"""
samples = num_train + num_test
if classification:
y = np.random.randint(0, num_classes, size=(samples,))
X = np.zeros((samples,) + input_shape)
for i in range(samples):
X[i] = np.random.normal(loc=y[i], scale=0.7, size=input_shape)
else:
y_loc = np.random.random((samples,))
X = np.zeros((samples,) + input_shape)
y = np.zeros((samples,) + output_shape)
for i in range(samples):
X[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=input_shape)
y[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=output_shape)
return (X[:num_train], y[:num_train]), (X[num_train:], y[num_train:])
def layer_test(layer_cls, kwargs={}, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, fixed_batch_size=False):
"""Test routine for a layer with a single input tensor
and single output tensor.
Copy of the function in keras-team/keras because it's not in the public API.
If we use the one from keras-team/keras it won't work with tf.keras.
"""
# generate input data
if input_data is None:
assert input_shape
if not input_dtype:
input_dtype = K.floatx()
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = (10 * np.random.random(input_data_shape))
input_data = input_data.astype(input_dtype)
else:
if input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
layer = layer_cls(**kwargs)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
expected_output_shape = layer.compute_output_shape(input_shape)
# test in functional API
if fixed_batch_size:
x = Input(batch_shape=input_shape, dtype=input_dtype)
else:
x = Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
assert K.dtype(y) == expected_output_dtype
# check with the functional API
model = Model(x, y)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
assert expected_dim == actual_dim
if expected_output is not None:
assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
custom_objects = {layer.__class__.__name__: layer.__class__}
recovered_model = model.__class__.from_config(model_config, custom_objects)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
_output = recovered_model.predict(input_data)
assert_allclose(_output, actual_output, rtol=1e-3)
# test training mode (e.g. useful when the layer has a
# different behavior at training and testing time).
if has_arg(layer.call, 'training'):
model.compile('rmsprop', 'mse')
model.train_on_batch(input_data, actual_output)
# test instantiation from layer config
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# for further checks in the caller function
return actual_output
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
For Python 2, checks if there is an argument with the given name.
For Python 3, checks if there is an argument with the given name, and
also whether this argument can be called with a keyword (i.e. if it is
not a positional-only argument).
This function is a copy of the one in keras-team/keras because it's not
in the public API.
# Arguments
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
# Returns
bool, whether `fn` accepts a `name` keyword argument.
"""
if sys.version_info < (3,):
arg_spec = inspect.getargspec(fn)
if accept_all and arg_spec.keywords is not None:
return True
return name in arg_spec.args
elif sys.version_info < (3, 3):
arg_spec = inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return (name in arg_spec.args or
name in arg_spec.kwonlyargs)
else:
signature = inspect.signature(fn)
parameter = signature.parameters.get(name)
if parameter is None:
if accept_all:
for param in signature.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD:
return True
return False
return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY))
def to_list(x, allow_tuple=False):
if isinstance(x, list):
return x
if allow_tuple and isinstance(x, tuple):
return list(x)
return [x]
def unpack_singleton(x):
if len(x) == 1:
return x[0]
return x
if keras.__name__ == 'keras':
is_tf_keras = False
elif keras.__name__ == 'tensorflow.keras':
is_tf_keras = True
else:
raise KeyError('Cannot detect if using keras or tf.keras.')
def to_tuple(shape):
"""This functions is here to fix an inconsistency between keras and tf.keras.
In tf.keras, the input_shape argument is an tuple with `Dimensions` objects.
In keras, the input_shape is a simple tuple of ints or `None`.
We'll work with tuples of ints or `None` to be consistent
with keras-team/keras. So we must apply this function to
all input_shapes of the build methods in custom layers.
"""
if is_tf_keras:
import tensorflow as tf
return tuple(tf.TensorShape(shape).as_list())
else:
return shape
| keras-contrib/keras_contrib/utils/test_utils.py/0 | {
"file_path": "keras-contrib/keras_contrib/utils/test_utils.py",
"repo_id": "keras-contrib",
"token_count": 2958
} | 16 |
import pytest
from keras_contrib.utils.test_utils import layer_test
from keras_contrib.layers import SReLU
@pytest.mark.parametrize('kwargs', [{}, {'shared_axes': 1}])
def test_srelu(kwargs):
layer_test(SReLU, kwargs=kwargs, input_shape=(2, 3, 4))
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/advanced_activations/test_srelu.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/advanced_activations/test_srelu.py",
"repo_id": "keras-contrib",
"token_count": 129
} | 17 |
"""
Title: Fine-tuning a pre-trained TorchVision Model
Author: [Ayush Thakur](https://twitter.com/ayushthakur0), [Soumik Rakshit](https://twitter.com/soumikRakshit96)
Date created: 2023/09/18
Last modified: 2023/09/18
Description: Fine-tuning a pre-trained Torch model from TorchVision for image
classification using Keras.
"""
"""
## Introduction
[TorchVision](https://pytorch.org/vision/stable/index.html) is a library part of the
[PyTorch](http://pytorch.org/) project that consists of popular datasets, model
architectures, and common image transformations for computer vision. This example
demonstrates how we can perform transfer learning for image classification using a
pre-trained backbone model from TorchVision on the [Imagenette
dataset](https://github.com/fastai/imagenette) using KerasCore. We will also demonstrate
the compatibility of KerasCore with an input system consisting of [Torch Datasets and
Dataloaders](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html).
### References:
- [Customizing what happens in `fit()` with
PyTorch](https://keras.io/keras_core/guides/custom_train_step_in_torch/)
- [PyTorch Datasets and
Dataloaders](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html)
- [Transfer learning for Computer Vision using
PyTorch](https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html)
- [Fine-tuning a TorchVision Model using Keras
](https://wandb.ai/ml-colabs/keras-torch/reports/Fine-tuning-a-TorchVision-Model-using-Keras--Vmlldzo1NDE5NDE1)
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "torch"
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
import keras_core as keras
from keras_core.layers import TorchModuleWrapper
"""
## Define the Hyperparameters
"""
batch_size = 32
image_size = 224
initial_learning_rate = 1e-3
num_epochs = 5
"""
## Creating the Torch Datasets and Dataloaders
In this example, we would train an image classification model on the [Imagenette
dataset](https://github.com/fastai/imagenette). Imagenette is a subset of 10 easily
classified classes from [Imagenet](https://www.image-net.org/) (tench, English springer,
cassette player, chain saw, church, French horn, garbage truck, gas pump, golf ball,
parachute).
"""
# Fetch the imagenette dataset
data_dir = keras.utils.get_file(
fname="imagenette2-320.tgz",
origin="https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz",
extract=True,
)
data_dir = data_dir.replace(".tgz", "")
"""
Next, we define pre-processing and augmentation transforms from TorchVision for the train
and validation sets.
"""
data_transforms = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
),
"val": transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
),
}
"""
Finally, we will use TorchVision and the
[`torch.utils.data`](https://pytorch.org/docs/stable/data.html) packages for creating the
dataloaders for trainig and validation.
"""
# Define the train and validation datasets
image_datasets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ["train", "val"]
}
# Define the torch dataloaders corresponding to the train and validation dataset
dataloaders = {
x: torch.utils.data.DataLoader(
image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4
)
for x in ["train", "val"]
}
dataset_sizes = {x: len(image_datasets[x]) for x in ["train", "val"]}
class_names = image_datasets["train"].classes
# Specify the global device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"""
Let us visualize a few samples from the training dataloader.
"""
plt.figure(figsize=(10, 10))
sample_images, sample_labels = next(iter(dataloaders["train"]))
sample_images = sample_images.numpy()
sample_labels = sample_labels.numpy()
for idx in range(9):
ax = plt.subplot(3, 3, idx + 1)
image = sample_images[idx].transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
plt.imshow(image)
plt.title("Ground Truth Label: " + class_names[int(sample_labels[idx])])
plt.axis("off")
"""
## The Image Classification Model
"""
"""
We typically define a model in PyTorch using
[`torch.nn.Module`s](https://pytorch.org/docs/stable/notes/modules.html) which act as the
building blocks of stateful computation. Let us define the ResNet18 model from the
TorchVision package as a `torch.nn.Module` pre-trained on the [Imagenet1K
dataset](https://huggingface.co/datasets/imagenet-1k).
"""
# Define the pre-trained resnet18 module from TorchVision
resnet_18 = models.resnet18(weights="IMAGENET1K_V1")
# We set the classification head of the pre-trained ResNet18
# module to an identity module
resnet_18.fc = nn.Identity()
"""
Even though Keras supports PyTorch as a backend, it does not mean that we can nest torch
modules inside a [`keras_core.Model`](https://keras.io/keras_core/api/models/), because
trainable variables inside a Keras Model is tracked exclusively via [Keras
Layers](https://keras.io/keras_core/api/layers/).
KerasCore provides us with a feature called `TorchModuleWrapper` which enables us to do
exactly this. The `TorchModuleWrapper` is a Keras Layer that accepts a torch module and
tracks its trainable variables, essentially converting the torch module into a Keras
Layer. This enables us to put any torch modules inside a Keras Model and train them with
a single `model.fit()`!
"""
# We set the trainable ResNet18 backbone to be a Keras Layer
# using `TorchModuleWrapper`
backbone = TorchModuleWrapper(resnet_18)
# We set this to `False` if you want to freeze the backbone
backbone.trainable = True
"""
Now, we will build a Keras functional model with the backbone layer.
"""
inputs = keras.Input(shape=(3, image_size, image_size))
x = backbone(inputs)
x = keras.layers.Dropout(0.5)(x)
x = keras.layers.Dense(len(class_names))(x)
outputs = keras.activations.softmax(x, axis=1)
model = keras.Model(inputs, outputs, name="ResNet18_Classifier")
model.summary()
# Create exponential decay learning rate scheduler
decay_steps = num_epochs * len(dataloaders["train"]) // batch_size
lr_scheduler = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=initial_learning_rate,
decay_steps=decay_steps,
decay_rate=0.1,
)
# Compile the model
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(lr_scheduler),
metrics=["accuracy"],
)
# Define the backend-agnostic WandB callbacks for KerasCore
callbacks = [
# Save best model checkpoints
keras.callbacks.ModelCheckpoint(
filepath="model.weights.h5",
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
]
# Train the model by calling model.fit
history = model.fit(
dataloaders["train"],
validation_data=dataloaders["val"],
epochs=num_epochs,
callbacks=callbacks,
)
def plot_history(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("loss")
plot_history("accuracy")
"""
## Evaluation and Inference
Now, we let us load the best model weights checkpoint and evaluate the model.
"""
model.load_weights("model.weights.h5")
_, val_accuracy = model.evaluate(dataloaders["val"])
print("Best Validation Accuracy:", val_accuracy)
"""
Finally, let us visualize the some predictions of the model
"""
plt.figure(figsize=(10, 10))
sample_images, sample_labels = next(iter(dataloaders["train"]))
# We perform inference and detach the predicted probabilities from the Torch
# computation graph with a tensor that does not require gradient computation.
sample_pred_probas = model(sample_images.to("cuda")).detach()
sample_pred_logits = keras.ops.argmax(sample_pred_probas, axis=1)
sample_pred_logits = sample_pred_logits.to("cpu").numpy()
sample_images = sample_images.numpy()
sample_labels = sample_labels.numpy()
for idx in range(9):
ax = plt.subplot(3, 3, idx + 1)
image = sample_images[idx].transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
plt.imshow(image)
title = "Ground Truth Label: " + class_names[int(sample_labels[idx])]
title += "\nPredicted Label: " + class_names[int(sample_pred_logits[idx])]
plt.title(title)
plt.axis("off")
| keras-core/examples/keras_io/pytorch/torchvision_keras.py/0 | {
"file_path": "keras-core/examples/keras_io/pytorch/torchvision_keras.py",
"repo_id": "keras-core",
"token_count": 3302
} | 18 |
import numpy as np
import keras_core
from keras_core import layers
from keras_core import ops
keras_core.utils.set_random_seed(1337)
x = np.random.rand(100, 32, 32, 3)
y = np.random.randint(0, 2, size=(100, 1))
# Test sequential model.
model = keras_core.Sequential(
[
layers.Conv2D(filters=10, kernel_size=3),
layers.GlobalAveragePooling2D(),
layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
loss="binary_crossentropy", optimizer="adam", metrics=["mae", "accuracy"]
)
history = model.fit(
x=x,
y=y,
epochs=10,
validation_data=(x, y),
verbose=0,
)
model.evaluate(x, y, verbose=0)
model.predict(x, verbose=0)
# Test on batch functions
model.train_on_batch(x, y)
model.test_on_batch(x, y)
model.predict_on_batch(x)
# Test functional model.
inputs = keras_core.Input(shape=(32, 32, 3))
outputs = layers.Conv2D(filters=10, kernel_size=3)(inputs)
outputs = layers.GlobalAveragePooling2D()(outputs)
outputs = layers.Dense(1, activation="sigmoid")(outputs)
model = keras_core.Model(inputs, outputs)
model.compile(
loss="binary_crossentropy", optimizer="adam", metrics=["mae", "accuracy"]
)
history = model.fit(
x=x,
y=y,
epochs=10,
validation_data=(x, y),
verbose=0,
)
model.evaluate(x, y, verbose=0)
model.predict(x, verbose=0)
# Test custom layer
class Linear(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
inputs = keras_core.Input(shape=(32, 32, 3))
outputs = layers.Conv2D(filters=10, kernel_size=3)(inputs)
outputs = layers.GlobalAveragePooling2D()(outputs)
outputs = Linear(1)(outputs)
outputs = layers.Activation("sigmoid")(outputs)
model = keras_core.Model(inputs, outputs)
model.compile(
loss="binary_crossentropy", optimizer="adam", metrics=["mae", "accuracy"]
)
history = model.fit(
x=x,
y=y,
epochs=10,
validation_data=(x, y),
verbose=0,
)
model.evaluate(x, y, verbose=0)
model.predict(x, verbose=0)
| keras-core/integration_tests/torch_backend_keras_workflow.py/0 | {
"file_path": "keras-core/integration_tests/torch_backend_keras_workflow.py",
"repo_id": "keras-core",
"token_count": 1038
} | 19 |
import warnings
from keras_core import backend
from keras_core import layers
from keras_core.api_export import keras_core_export
from keras_core.applications import imagenet_utils
from keras_core.models import Functional
from keras_core.ops import operation_utils
from keras_core.utils import file_utils
BASE_WEIGHT_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/"
)
@keras_core_export(
[
"keras_core.applications.mobilenet.MobileNet",
"keras_core.applications.MobileNet",
]
)
def MobileNet(
input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the MobileNet architecture.
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNet, call `keras_core.applications.mobilenet.preprocess_input`
on your inputs before passing them to the model.
`mobilenet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, only to be specified if `include_top`
is `False` (otherwise the input shape has to be `(224, 224, 3)`
(with `"channels_last"` data format) or `(3, 224, 224)`
(with `"channels_first"` data format).
It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would
be one valid value. Defaults to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper.
- If `alpha < 1.0`, proportionally decreases the number
of filters in each layer.
- If `alpha > 1.0`, proportionally increases the number
of filters in each layer.
- If `alpha == 1`, default number of filters from the paper
are used at each layer. Defaults to `1.0`.
depth_multiplier: Depth multiplier for depthwise convolution.
This is called the resolution multiplier in the MobileNet paper.
Defaults to `1.0`.
dropout: Dropout rate. Defaults to `0.001`.
include_top: Boolean, whether to include the fully-connected layer
at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization), `"imagenet"`
(pre-training on ImageNet), or the path to the weights file
to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. `input_tensor` is useful
for sharing inputs between multiple different networks.
Defaults to `None`.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into,
only to be specified if `include_top` is `True`, and if
no `weights` argument is specified. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function
to use on the "top" layer. Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), 'imagenet' "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. "
f"Received weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights='imagenet'` with `include_top=True`, "
"`classes` should be 1000. "
f"Received classes={classes}"
)
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == "channels_first":
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if backend.image_data_format() == "channels_last":
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == "imagenet":
if depth_multiplier != 1:
raise ValueError(
"If imagenet weights are being loaded, "
"depth multiplier must be 1. "
f"Received depth_multiplier={depth_multiplier}"
)
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError(
"If imagenet weights are being loaded, "
"alpha can be one of"
"`0.25`, `0.50`, `0.75` or `1.0` only. "
f"Received alpha={alpha}"
)
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
warnings.warn(
"`input_shape` is undefined or non-square, "
"or `rows` is not in [128, 160, 192, 224]. "
"Weights for input shape (224, 224) will be "
"loaded as the default.",
stacklevel=2,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2
)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4
)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6
)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12
)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
x = layers.GlobalAveragePooling2D(keepdims=True)(x)
x = layers.Dropout(dropout, name="dropout")(x)
x = layers.Conv2D(classes, (1, 1), padding="same", name="conv_preds")(x)
x = layers.Reshape((classes,), name="reshape_2")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(
activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=f"mobilenet_{alpha:0.2f}_{rows}")
# Load weights.
if weights == "imagenet":
if alpha == 1.0:
alpha_text = "1_0"
elif alpha == 0.75:
alpha_text = "7_5"
elif alpha == 0.50:
alpha_text = "5_0"
else:
alpha_text = "2_5"
if include_top:
model_name = "mobilenet_%s_%d_tf.h5" % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
else:
model_name = "mobilenet_%s_%d_tf_no_top.h5" % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Args:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height
should be no smaller than 32. E.g. `(224, 224, 3)` would be
one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number of filters
in each layer.
- If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width
and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height.
Can be a single integer to specify the same value for all
spatial dimensions. Specifying any stride value != 1 is
incompatible with specifying any `dilation_rate`
value != 1. # Input shape
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)`
if data_format='channels_last'. `rows` and `cols` values
might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
filters = int(filters * alpha)
x = layers.Conv2D(
filters,
kernel,
padding="same",
use_bias=False,
strides=strides,
name="conv1",
)(inputs)
x = layers.BatchNormalization(axis=channel_axis, name="conv1_bn")(x)
return layers.ReLU(6.0, name="conv1_relu")(x)
def _depthwise_conv_block(
inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1,
):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Args:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number of filters
in each layer.
- If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions. Specifying any stride value != 1 is
incompatible with specifying any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(
((0, 1), (0, 1)), name="conv_pad_%d" % block_id
)(inputs)
x = layers.DepthwiseConv2D(
(3, 3),
padding="same" if strides == (1, 1) else "valid",
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name="conv_dw_%d" % block_id,
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="conv_dw_%d_bn" % block_id
)(x)
x = layers.ReLU(6.0, name="conv_dw_%d_relu" % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters,
(1, 1),
padding="same",
use_bias=False,
strides=(1, 1),
name="conv_pw_%d" % block_id,
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="conv_pw_%d_bn" % block_id
)(x)
return layers.ReLU(6.0, name="conv_pw_%d_relu" % block_id)(x)
@keras_core_export("keras_core.applications.mobilenet.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_core_export("keras_core.applications.mobilenet.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| keras-core/keras_core/applications/mobilenet.py/0 | {
"file_path": "keras-core/keras_core/applications/mobilenet.py",
"repo_id": "keras-core",
"token_count": 7412
} | 20 |
import tree
from keras_core.api_export import keras_core_export
from keras_core.utils.naming import auto_name
@keras_core_export("keras_core.KerasTensor")
class KerasTensor:
"""Symbolic tensor -- encapsulates a shape and a dtype.
You can use `KerasTensor` instances to build computation
graphs of Keras operations, such as `keras_core.Function`
objects or Functional `keras_core.models.Model` objects.
Example:
>>> x = keras_core.KerasTensor(shape=(3, 4), dtype="float32")
>>> x.shape
(3, 4)
>>> x.dtype
float32
Calling a Keras operation (including a layer or a model)
on a `KerasTensor` instance will return another `KerasTensor`
instance with the appropriate shape and dtype. This is
called a "symbolic call" (since there is no actual data
involved). The computation of the correct output shape and
dtype is called "static shape inference".
"""
def __init__(
self,
shape,
dtype="float32",
sparse=False,
record_history=True,
name=None,
):
from keras_core import backend
self.shape = backend.standardize_shape(shape)
self.dtype = backend.standardize_dtype(dtype)
self.sparse = sparse
self.name = name or auto_name(self.__class__.__name__)
self.record_history = record_history
@property
def ndim(self):
return len(self.shape)
def reshape(self, new_shape):
from keras_core import ops
return ops.Reshape(new_shape)(self)
def squeeze(self, axis=None):
from keras_core import ops
return ops.Squeeze(axis)(self)
def __array__(self):
raise ValueError(
"A KerasTensor is symbolic: it's a placeholder for a shape "
"an a dtype. It doesn't have any actual numerical value. "
"You cannot convert it to a NumPy array."
)
def __jax_array__(self):
raise ValueError(
"A KerasTensor cannot be used as input to a JAX function. "
"A KerasTensor is a symbolic placeholder for a shape and dtype, "
"used when constructing Keras Functional models "
"or Keras Functions. You can only use it as input to a Keras layer "
"or a Keras operation (from the namespaces `keras_core.layers` "
"and `keras_core.operations`). "
"You are likely doing something like:\n\n"
"```\n"
"x = Input(...)\n"
"...\n"
"jax_fn(x) # Invalid.\n"
"```\n\n"
"What you should do instead is wrap `jax_fn` in a layer:\n\n"
"```\n"
"class MyLayer(Layer):\n"
" def call(self, x):\n"
" return jax_fn(x)\n\n"
"x = MyLayer()(x)\n"
"```\n"
)
def __tf_tensor__(self, dtype=None, name=None):
raise ValueError(
"A KerasTensor cannot be used as input to a TensorFlow function. "
"A KerasTensor is a symbolic placeholder for a shape and dtype, "
"used when constructing Keras Functional models "
"or Keras Functions. You can only use it as input to a Keras layer "
"or a Keras operation (from the namespaces `keras_core.layers` "
"and `keras_core.operations`). "
"You are likely doing something like:\n\n"
"```\n"
"x = Input(...)\n"
"...\n"
"tf_fn(x) # Invalid.\n"
"```\n\n"
"What you should do instead is wrap `tf_fn` in a layer:\n\n"
"```\n"
"class MyLayer(Layer):\n"
" def call(self, x):\n"
" return tf_fn(x)\n\n"
"x = MyLayer()(x)\n"
"```\n"
)
def __repr__(self):
return (
f"<KerasTensor shape={self.shape}, dtype={self.dtype}, "
f"sparse={self.sparse}, name={self.name}>"
)
def __iter__(self):
raise NotImplementedError(
"Iterating over a symbolic KerasTensor is not supported."
)
def __bool__(self):
raise TypeError("A symbolic KerasTensor cannot be used as a boolean.")
def __add__(self, other):
from keras_core import ops
return ops.Add().symbolic_call(self, other)
def __radd__(self, other):
from keras_core import ops
return ops.Add().symbolic_call(other, self)
def __sub__(self, other):
from keras_core import ops
return ops.Subtract().symbolic_call(self, other)
def __rsub__(self, other):
from keras_core import ops
return ops.Subtract().symbolic_call(other, self)
def __mul__(self, other):
from keras_core import ops
return ops.Multiply().symbolic_call(self, other)
def __rmul__(self, other):
from keras_core import ops
return ops.Multiply().symbolic_call(other, self)
def __matmul__(self, other):
from keras_core import ops
return ops.Matmul().symbolic_call(self, other)
def __rmatmul__(self, other):
from keras_core import ops
return ops.Matmul().symbolic_call(other, self)
def __div__(self, other):
from keras_core import ops
return ops.Divide().symbolic_call(self, other)
def __rdiv__(self, other):
from keras_core import ops
return ops.Divide().symbolic_call(other, self)
def __truediv__(self, other):
from keras_core import ops
return ops.TrueDivide().symbolic_call(self, other)
def __rtruediv__(self, other):
from keras_core import ops
return ops.TrueDivide().symbolic_call(other, self)
def __neg__(self):
from keras_core import ops
return ops.Negative().symbolic_call(self)
def __abs__(self):
from keras_core import ops
return ops.Absolute().symbolic_call(self)
def __pow__(self, other):
from keras_core import ops
return ops.Power().symbolic_call(self, other)
def __rpow__(self, other):
from keras_core import ops
return ops.Power().symbolic_call(other, self)
def __floordiv__(self, other):
from keras_core import ops
return ops.FloorDiv().symbolic_call(self, other)
def __rfloordiv__(self, other):
from keras_core import ops
return ops.FloorDiv().symbolic_call(other, self)
def __mod__(self, other):
from keras_core import ops
return ops.Mod().symbolic_call(self, other)
def __rmod__(self, other):
from keras_core import ops
return ops.Mod().symbolic_call(other, self)
def __lt__(self, other):
from keras_core import ops
return ops.Less().symbolic_call(self, other)
def __le__(self, other):
from keras_core import ops
return ops.LessEqual().symbolic_call(self, other)
def __gt__(self, other):
from keras_core import ops
return ops.Greater().symbolic_call(self, other)
def __ge__(self, other):
from keras_core import ops
return ops.GreaterEqual().symbolic_call(self, other)
def __ne__(self, other):
from keras_core import ops
return ops.NotEqual().symbolic_call(self, other)
def __and__(self, other):
from keras_core import ops
return ops.LogicalAnd().symbolic_call(self, other)
def __rand__(self, other):
from keras_core import ops
return ops.LogicalAnd().symbolic_call(other, self)
def __or__(self, other):
from keras_core import ops
return ops.LogicalOr().symbolic_call(self, other)
def __ror__(self, other):
from keras_core import ops
return ops.LogicalOr().symbolic_call(other, self)
def __invert__(self, other):
from keras_core import ops
return ops.LogicalNot().symbolic_call(other, self)
def __xor__(self, other):
from keras_core import ops
return ops.LogicalXor().symbolic_call(self, other)
def __rxor__(self, other):
from keras_core import ops
return ops.LogicalXor().symbolic_call(other, self)
def __getitem__(self, key):
from keras_core import ops
return ops.GetItem().symbolic_call(self, key)
def any_symbolic_tensors(args=None, kwargs=None):
args = args or ()
kwargs = kwargs or {}
for x in tree.flatten((args, kwargs)):
if isinstance(x, KerasTensor):
return True
return False
@keras_core_export(
["keras_core.utils.is_keras_tensor", "keras_core.backend.is_keras_tensor"]
)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a *symbolic tensor*, such as a tensor
that was created via `Input()`. A "symbolic tensor"
can be understood as a placeholder -- it does not
contain any actual numerical data, only a shape and dtype.
It can be used for building Functional models, but it
cannot be used in actual computations.
"""
return isinstance(x, KerasTensor)
| keras-core/keras_core/backend/common/keras_tensor.py/0 | {
"file_path": "keras-core/keras_core/backend/common/keras_tensor.py",
"repo_id": "keras-core",
"token_count": 4024
} | 21 |
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
from jax import nn as jnn
from keras_core.backend import standardize_data_format
from keras_core.backend import standardize_dtype
from keras_core.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
from keras_core.backend.config import epsilon
from keras_core.backend.jax.core import cast
from keras_core.backend.jax.core import convert_to_tensor
def relu(x):
x = convert_to_tensor(x)
return jnn.relu(x)
def relu6(x):
x = convert_to_tensor(x)
return jnn.relu6(x)
def sigmoid(x):
x = convert_to_tensor(x)
return jnn.sigmoid(x)
def tanh(x):
x = convert_to_tensor(x)
return jnn.tanh(x)
def softplus(x):
x = convert_to_tensor(x)
return jnn.softplus(x)
def softsign(x):
x = convert_to_tensor(x)
return jnn.soft_sign(x)
def silu(x):
x = convert_to_tensor(x)
return jnn.silu(x)
def log_sigmoid(x):
x = convert_to_tensor(x)
return jnn.log_sigmoid(x)
def leaky_relu(x, negative_slope=0.2):
x = convert_to_tensor(x)
return jnn.leaky_relu(x, negative_slope=negative_slope)
def hard_sigmoid(x):
x = convert_to_tensor(x)
return jnn.hard_sigmoid(x)
def elu(x, alpha=1.0):
x = convert_to_tensor(x)
return jnn.elu(x, alpha=alpha)
def selu(x):
x = convert_to_tensor(x)
return jnn.selu(x)
def gelu(x, approximate=True):
x = convert_to_tensor(x)
return jnn.gelu(x, approximate)
def softmax(x, axis=-1):
x = convert_to_tensor(x)
return jnn.softmax(x, axis=axis)
def log_softmax(x, axis=-1):
x = convert_to_tensor(x)
return jnn.log_softmax(x, axis=axis)
def _convert_to_spatial_operand(
x,
num_spatial_dims,
data_format="channels_last",
include_batch_and_channels=True,
):
# Helper function that converts an operand to a spatial operand.
x = (x,) * num_spatial_dims if isinstance(x, int) else x
if not include_batch_and_channels:
return x
if data_format == "channels_last":
x = (1,) + x + (1,)
else:
x = (1,) + (1,) + x
return x
def _pool(
inputs,
initial_value,
reduce_fn,
pool_size,
strides=None,
padding="valid",
):
"""Helper function to define pooling functions.
Args:
inputs: input data of shape `N+2`.
initial_value: the initial value for the reduction.
reduce_fn: a reduce function of the form `(T, T) -> T`.
pool_size: a sequence of `N` integers, representing the window size to
reduce over.
strides: a sequence of `N` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `same` or `valid`.
Returns:
The output of the reduction for each window slice.
"""
if padding not in ("same", "valid"):
raise ValueError(
f"Invalid padding '{padding}', must be 'same' or 'valid'."
)
padding = padding.upper()
return lax.reduce_window(
inputs,
initial_value,
reduce_fn,
pool_size,
strides,
padding,
)
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
return _pool(inputs, -jnp.inf, lax.max, pool_size, strides, padding)
def average_pool(
inputs,
pool_size,
strides,
padding,
data_format=None,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding)
if padding == "valid":
# Avoid the extra reduce_window.
return pooled / np.prod(pool_size)
else:
# Count the number of valid entries at each input point, then use that
# for computing average. Assumes that any two arrays of same shape will
# be padded the same. Avoid broadcasting on axis where pooling is
# skipped.
shape = [
(a if b != 1 else 1) for (a, b) in zip(inputs.shape, pool_size)
]
window_counts = _pool(
jnp.ones(shape, inputs.dtype),
0.0,
lax.add,
pool_size,
strides,
padding,
)
return pooled / window_counts
def _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format="channels_last",
transpose=False,
):
"""Create a `lax.ConvDimensionNumbers` for the given inputs."""
num_dims = num_spatial_dims + 2
if data_format == "channels_last":
spatial_dims = tuple(range(1, num_dims - 1))
inputs_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
inputs_dn = (0, 1) + spatial_dims
if transpose:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
return lax.ConvDimensionNumbers(
lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn
)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
if data_format == "channels_last":
channels = inputs.shape[-1]
else:
channels = inputs.shape[1]
kernel_in_channels = kernel.shape[-2]
if channels % kernel_in_channels > 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"kernel's in_channels. Received input channels {channels} and "
f"kernel in_channels {kernel_in_channels}. "
)
feature_group_count = channels // kernel_in_channels
return jax.lax.conv_general_dilated(
convert_to_tensor(inputs),
convert_to_tensor(kernel),
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
feature_group_count = (
inputs.shape[-1] if data_format == "channels_last" else inputs.shape[1]
)
kernel = jnp.reshape(
kernel,
kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1]),
)
return jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
data_format = standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
padding_values = compute_conv_transpose_padding_args_for_jax(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
return jax.lax.conv_transpose(
inputs,
kernel,
strides,
padding=padding_values,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
transpose_kernel=True,
)
def one_hot(x, num_classes, axis=-1, dtype="float32"):
x = convert_to_tensor(x)
return jnn.one_hot(x, num_classes, axis=axis, dtype=dtype)
def multi_hot(x, num_classes, axis=-1, dtype="float32"):
reduction_axis = 1 if len(x.shape) > 1 else 0
outputs = jnp.max(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
axis=reduction_axis,
)
return outputs
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = jnp.array(target)
output = jnp.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = jax.nn.log_softmax(output, axis=axis)
else:
output = output / jnp.sum(output, axis, keepdims=True)
output = jnp.clip(output, epsilon(), 1.0 - epsilon())
log_prob = jnp.log(output)
return -jnp.sum(target * log_prob, axis=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = jnp.array(target, dtype="int32")
output = jnp.array(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = jnp.squeeze(target, axis=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if target.shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = jax.nn.log_softmax(output, axis=axis)
else:
output = output / jnp.sum(output, axis, keepdims=True)
output = jnp.clip(output, epsilon(), 1.0 - epsilon())
log_prob = jnp.log(output)
target = jnn.one_hot(target, output.shape[axis], axis=axis)
return -jnp.sum(target * log_prob, axis=axis)
def binary_crossentropy(target, output, from_logits=False):
target = jnp.array(target)
output = jnp.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_logits = jax.nn.log_sigmoid(output)
log_neg_logits = jax.nn.log_sigmoid(-output)
return -1.0 * target * log_logits - (1.0 - target) * log_neg_logits
output = jnp.clip(output, epsilon(), 1.0 - epsilon())
bce = target * jnp.log(output)
bce += (1.0 - target) * jnp.log(1.0 - output)
return -bce
def moments(x, axes, keepdims=False):
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = standardize_dtype(x.dtype)
if ori_dtype == "float16":
need_cast = True
x = cast(x, "float32")
mean = jnp.mean(x, axes, keepdims=True)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
# Note: stop_gradient does not change the gradient to the mean, because that
# gradient is zero.
variance = jnp.mean(jnp.square(x), axis=axes, keepdims=True) - jnp.square(
jax.lax.stop_gradient(mean)
)
if not keepdims:
mean = jnp.squeeze(mean, axes)
variance = jnp.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = jnp.clip(
mean, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
)
variance = jnp.clip(
variance, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
| keras-core/keras_core/backend/jax/nn.py/0 | {
"file_path": "keras-core/keras_core/backend/jax/nn.py",
"repo_id": "keras-core",
"token_count": 6611
} | 22 |
import types
import numpy as np
import tensorflow as tf
from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
from keras_core.backend.common import KerasVariable
from keras_core.backend.common import global_state
from keras_core.backend.common import standardize_dtype
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.backend.common.name_scope import name_scope as base_name_scope
from keras_core.backend.common.stateless_scope import StatelessScope
from keras_core.utils.naming import auto_name
SUPPORTS_SPARSE_TENSORS = True
class Variable(
KerasVariable,
tf.__internal__.types.Tensor,
tf.__internal__.tracking.Trackable,
):
_should_act_as_resource_variable = True
@property
def handle(self):
return self.value.handle
def _initialize(self, value):
self._value = tf.Variable(
value, dtype=self._dtype, trainable=self.trainable, name=self.name
)
def _direct_assign(self, value):
self._value.assign(tf.cast(value, self._value.dtype))
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
def numpy(self): # noqa: F811
return self.value.numpy()
@property
def shape(self):
return tf.TensorShape(super().shape)
# Overload native accessor.
def __tf_tensor__(self, dtype=None, name=None):
return tf.convert_to_tensor(self.value, dtype=dtype, name=name)
# Methods below are for SavedModel support
@property
def _shared_name(self):
return self.value._shared_name
def _serialize_to_tensors(self):
return self.value._serialize_to_tensors()
def _restore_from_tensors(self, restored_tensors):
return self.value._restore_from_tensors(restored_tensors)
def _export_to_saved_model_graph(
self, object_map, tensor_map, options, **kwargs
):
resource_list = self.value._export_to_saved_model_graph(
object_map, tensor_map, options, **kwargs
)
object_map[self] = tf.Variable(object_map[self.value])
return resource_list
def _write_object_proto(self, proto, options):
return self.value._write_object_proto(proto, options)
def convert_to_tensor(x, dtype=None, sparse=True):
"""Convert to a TensorFlow tensor.
`sparse=True` means that `tf.SparseTensor`s are returned as-is, which is the
default with the TensorFlow backend. An explicit `sparse=False` densifies
`tf.SparseTensor`s.
"""
if isinstance(x, tf.SparseTensor) and not sparse:
x = tf.sparse.to_dense(x)
if dtype is not None:
dtype = standardize_dtype(dtype)
if not tf.is_tensor(x):
return tf.convert_to_tensor(x, dtype=dtype)
elif dtype is not None:
return tf.cast(x, dtype=dtype)
else:
return x
def convert_to_numpy(x):
if isinstance(x, tf.SparseTensor):
x = tf.sparse.to_dense(x)
return np.array(x)
def is_tensor(x):
return tf.is_tensor(x)
def shape(x):
"""Always return a tuple shape.
`tf.shape` will return a `tf.Tensor`, which differs from the tuple return
type on the torch and jax backends. We write our own method instead which
always returns a tuple, with integer values when the shape is known, and
tensor values when the shape is unknown (this is tf specific, as dynamic
shapes do not apply in other backends).
"""
if not tf.is_tensor(x):
x = tf.convert_to_tensor(x)
dynamic = tf.shape(x)
if x.shape == tf.TensorShape(None):
raise ValueError(
"All tensors passed to `ops.shape` must have a statically known "
f"rank. Received: x={x} with unknown rank."
)
static = x.shape.as_list()
return tuple(dynamic[i] if s is None else s for i, s in enumerate(static))
def cast(x, dtype):
dtype = standardize_dtype(dtype)
return tf.cast(x, dtype=dtype)
def compute_output_spec(fn, *args, **kwargs):
with StatelessScope():
graph_name = auto_name("scratch_graph")
with tf.__internal__.FuncGraph(graph_name).as_default():
def convert_keras_tensor_to_tf(x):
if isinstance(x, KerasTensor):
if x.sparse:
return tf.compat.v1.sparse_placeholder(
shape=x.shape, dtype=x.dtype
)
else:
return tf.compat.v1.placeholder(
shape=x.shape, dtype=x.dtype
)
if isinstance(x, types.FunctionType):
def _fn(*x_args, **x_kwargs):
out = x(*x_args, **x_kwargs)
out = convert_keras_tensor_to_tf(out)
return out
return _fn
return x
args, kwargs = tf.nest.map_structure(
convert_keras_tensor_to_tf, (args, kwargs)
)
tf_out = fn(*args, **kwargs)
def convert_tf_to_keras_tensor(x):
if tf.is_tensor(x):
return KerasTensor(
x.shape, x.dtype, sparse=isinstance(x, tf.SparseTensor)
)
return x
output_spec = tf.nest.map_structure(
convert_tf_to_keras_tensor, tf_out
)
return output_spec
def cond(pred, true_fn, false_fn):
return tf.cond(pred, true_fn=true_fn, false_fn=false_fn)
def vectorized_map(function, elements):
return tf.vectorized_map(function, elements)
def scatter(indices, values, shape):
return tf.scatter_nd(indices, values, shape)
def scatter_update(inputs, indices, updates):
return tf.tensor_scatter_nd_update(inputs, indices, updates)
def slice(inputs, start_indices, shape):
return tf.slice(inputs, start_indices, shape)
def slice_update(inputs, start_indices, updates):
return dynamic_update_slice(inputs, updates, start_indices)
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
return tf.while_loop(
cond,
body,
loop_vars,
maximum_iterations=maximum_iterations,
)
def fori_loop(lower, upper, body_fun, init_val):
return tf.while_loop(
lambda i, val: i < upper,
lambda i, val: (i + 1, body_fun(i, val)),
(lower, init_val),
)[1]
def stop_gradient(variable):
return tf.stop_gradient(variable)
def unstack(x, num=None, axis=0):
return tf.unstack(x, num=num, axis=axis)
class name_scope(base_name_scope):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self._tf_name_scope = tf.name_scope(name)
def __enter__(self):
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack", default=[], set_to_default=True
)
if self.deduplicate and name_scope_stack:
parent_caller = name_scope_stack[-1].caller
parent_name = name_scope_stack[-1].name
if (
self.caller is not None
and self.caller is parent_caller
and self.name == parent_name
):
return self
name_scope_stack.append(self)
self._pop_on_exit = True
self._tf_name_scope.__enter__()
return self
def __exit__(self, *args, **kwargs):
super().__exit__(*args, **kwargs)
if self._pop_on_exit:
self._tf_name_scope.__exit__(*args, **kwargs)
| keras-core/keras_core/backend/tensorflow/core.py/0 | {
"file_path": "keras-core/keras_core/backend/tensorflow/core.py",
"repo_id": "keras-core",
"token_count": 3506
} | 23 |
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras_core.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras_core.backend.torch import core
from keras_core.backend.torch import image
from keras_core.backend.torch import math
from keras_core.backend.torch import nn
from keras_core.backend.torch import numpy
from keras_core.backend.torch import random
from keras_core.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras_core.backend.torch.core import Variable
from keras_core.backend.torch.core import cast
from keras_core.backend.torch.core import compute_output_spec
from keras_core.backend.torch.core import cond
from keras_core.backend.torch.core import convert_to_numpy
from keras_core.backend.torch.core import convert_to_tensor
from keras_core.backend.torch.core import is_tensor
from keras_core.backend.torch.core import scatter
from keras_core.backend.torch.core import shape
from keras_core.backend.torch.core import stop_gradient
from keras_core.backend.torch.core import to_torch_dtype
from keras_core.backend.torch.core import vectorized_map
from keras_core.backend.torch.rnn import cudnn_ok
from keras_core.backend.torch.rnn import gru
from keras_core.backend.torch.rnn import lstm
from keras_core.backend.torch.rnn import rnn
| keras-core/keras_core/backend/torch/__init__.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/__init__.py",
"repo_id": "keras-core",
"token_count": 587
} | 24 |
from keras_core.optimizers.base_optimizer import BaseOptimizer
class TorchParallelOptimizer(BaseOptimizer):
def _internal_apply_gradients(self, grads_and_vars):
grads, trainable_variables = zip(*grads_and_vars)
self._parallel_update_step(
grads,
trainable_variables,
self._get_current_learning_rate(),
)
self.iterations.assign(self.iterations + 1)
| keras-core/keras_core/backend/torch/optimizers/torch_parallel_optimizer.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/optimizers/torch_parallel_optimizer.py",
"repo_id": "keras-core",
"token_count": 188
} | 25 |
import inspect
from keras_core.api_export import keras_core_export
from keras_core.constraints.constraints import Constraint
from keras_core.constraints.constraints import MaxNorm
from keras_core.constraints.constraints import MinMaxNorm
from keras_core.constraints.constraints import NonNeg
from keras_core.constraints.constraints import UnitNorm
from keras_core.saving import serialization_lib
from keras_core.utils.naming import to_snake_case
ALL_OBJECTS = {
Constraint,
MaxNorm,
MinMaxNorm,
NonNeg,
UnitNorm,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_core_export("keras_core.constraints.serialize")
def serialize(constraint):
return serialization_lib.serialize_keras_object(constraint)
@keras_core_export("keras_core.constraints.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras constraint object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_core_export("keras_core.constraints.get")
def get(identifier):
"""Retrieve a Keras constraint object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret constraint identifier: {identifier}"
)
| keras-core/keras_core/constraints/__init__.py/0 | {
"file_path": "keras-core/keras_core/constraints/__init__.py",
"repo_id": "keras-core",
"token_count": 715
} | 26 |
from keras_core.export.export_lib import ExportArchive
| keras-core/keras_core/export/__init__.py/0 | {
"file_path": "keras-core/keras_core/export/__init__.py",
"repo_id": "keras-core",
"token_count": 16
} | 27 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
from keras_core.layers.convolutional.conv_test import np_conv1d
from keras_core.layers.convolutional.conv_test import np_conv2d
from keras_core.layers.convolutional.depthwise_conv_test import (
np_depthwise_conv1d,
)
from keras_core.layers.convolutional.depthwise_conv_test import (
np_depthwise_conv2d,
)
class SeparableConvBasicTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 4, 5),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"input_shape": (3, 4, 4),
"output_shape": (3, 4, 6),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 2, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_separable_conv1d_basic(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.SeparableConv1D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 4, 4, 5),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"input_shape": (3, 4, 4, 4),
"output_shape": (3, 4, 4, 6),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 2, 2, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_separable_conv2d_basic(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.SeparableConv2D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `depth_multiplier` is not positive.
with self.assertRaises(ValueError):
layers.SeparableConv1D(depth_multiplier=0, filters=1, kernel_size=1)
# `filters` is not positive.
with self.assertRaises(ValueError):
layers.SeparableConv1D(depth_multiplier=1, filters=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaises(ValueError):
layers.SeparableConv2D(
depth_multiplier=2, filters=2, kernel_size=(1, 0)
)
# `strides` has 0.
with self.assertRaises(ValueError):
layers.SeparableConv2D(
depth_multiplier=2,
filters=2,
kernel_size=(2, 2),
strides=(1, 0),
)
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaises(ValueError):
layers.SeparableConv2D(
depth_multiplier=2,
filters=2,
kernel_size=(2, 2),
strides=2,
dilation_rate=(2, 1),
)
class SeparableConvCorrectnessTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_separable_conv1d(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.SeparableConv1D(
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
depthwise_kernel_shape = layer.depthwise_kernel.shape
depthwise_kernel_weights = np.random.normal(size=depthwise_kernel_shape)
layer.depthwise_kernel.assign(depthwise_kernel_weights)
pointwise_kernel_shape = layer.pointwise_kernel.shape
pointwise_kernel_weights = np.random.normal(size=pointwise_kernel_shape)
layer.pointwise_kernel.assign(pointwise_kernel_weights)
bias_weights = np.random.normal(size=(filters,))
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected_depthwise = np_depthwise_conv1d(
inputs,
depthwise_kernel_weights,
np.zeros(4 * depth_multiplier),
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
expected = np_conv1d(
expected_depthwise,
pointwise_kernel_weights,
bias_weights,
strides=1,
padding=padding,
data_format=data_format,
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_separable_conv2d(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.SeparableConv2D(
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
depthwise_kernel_shape = layer.depthwise_kernel.shape
depthwise_kernel_weights = np.random.normal(size=depthwise_kernel_shape)
layer.depthwise_kernel.assign(depthwise_kernel_weights)
pointwise_kernel_shape = layer.pointwise_kernel.shape
pointwise_kernel_weights = np.random.normal(size=pointwise_kernel_shape)
layer.pointwise_kernel.assign(pointwise_kernel_weights)
bias_weights = np.random.normal(size=(filters,))
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected_depthwise = np_depthwise_conv2d(
inputs,
depthwise_kernel_weights,
np.zeros(4 * depth_multiplier),
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
expected = np_conv2d(
expected_depthwise,
pointwise_kernel_weights,
bias_weights,
strides=1,
padding=padding,
data_format=data_format,
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected, rtol=1e-5, atol=1e-5)
| keras-core/keras_core/layers/convolutional/separable_conv_test.py/0 | {
"file_path": "keras-core/keras_core/layers/convolutional/separable_conv_test.py",
"repo_id": "keras-core",
"token_count": 5964
} | 28 |
from keras_core.backend import image_data_format
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = (
image_data_format() if data_format is None else data_format
)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
| keras-core/keras_core/layers/pooling/base_global_pooling.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/base_global_pooling.py",
"repo_id": "keras-core",
"token_count": 794
} | 29 |
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras_core import backend
from keras_core import layers
from keras_core import testing
class NormalizationTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_normalization_basics(self):
self.run_layer_test(
layers.Normalization,
init_kwargs={
"axis": -1,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=3,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.Normalization,
init_kwargs={
"axis": -1,
"mean": np.array([0.5, 0.2, -0.1]),
"variance": np.array([0.1, 0.2, 0.3]),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.Normalization,
init_kwargs={
"axis": -1,
"mean": np.array([0.5, 0.2, -0.1]),
"variance": np.array([0.1, 0.2, 0.3]),
"invert": True,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@parameterized.parameters([("np",), ("tensor",), ("tf.data")])
def test_normalization_adapt(self, input_type):
x = np.random.random((32, 4))
if input_type == "np":
data = x
elif input_type == "tensor":
data = backend.convert_to_tensor(x)
elif input_type == "tf.data":
data = tf_data.Dataset.from_tensor_slices(x).batch(8)
layer = layers.Normalization()
layer.adapt(data)
self.assertTrue(layer.built)
output = layer(x)
output = backend.convert_to_numpy(output)
self.assertAllClose(np.var(output, axis=0), 1.0, atol=1e-5)
self.assertAllClose(np.mean(output, axis=0), 0.0, atol=1e-5)
# Test in high-dim and with tuple axis.
x = np.random.random((32, 4, 3, 5))
if input_type == "np":
data = x
elif input_type == "tensor":
data = backend.convert_to_tensor(x)
elif input_type == "tf.data":
data = tf_data.Dataset.from_tensor_slices(x).batch(8)
layer = layers.Normalization(axis=(1, 2))
layer.adapt(data)
self.assertTrue(layer.built)
output = layer(x)
output = backend.convert_to_numpy(output)
self.assertAllClose(np.var(output, axis=(0, 3)), 1.0, atol=1e-5)
self.assertAllClose(np.mean(output, axis=(0, 3)), 0.0, atol=1e-5)
def test_normalization_errors(self):
# TODO
pass
@pytest.mark.skipif(
backend.backend() != "torch",
reason="Test symbolic call for torch meta device.",
)
def test_call_on_meta_device_after_built(self):
from keras_core.backend.torch import core
layer = layers.Normalization()
data = np.random.random((32, 4))
layer.adapt(data)
with core.device_scope("meta"):
layer(data)
| keras-core/keras_core/layers/preprocessing/normalization_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/normalization_test.py",
"repo_id": "keras-core",
"token_count": 1913
} | 30 |
import numpy as np
import pytest
from keras_core import backend
from keras_core import layers
from keras_core import testing
class GaussianNoiseTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_noise_basics(self):
self.run_layer_test(
layers.GaussianNoise,
init_kwargs={
"stddev": 0.2,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_gaussian_noise_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianNoise(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 0.3, atol=0.02
)
| keras-core/keras_core/layers/regularization/gaussian_noise_test.py/0 | {
"file_path": "keras-core/keras_core/layers/regularization/gaussian_noise_test.py",
"repo_id": "keras-core",
"token_count": 480
} | 31 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.layers.layer import Layer
from keras_core.ops import operation_utils
@keras_core_export("keras_core.layers.Reshape")
class Reshape(Layer):
"""Layer that reshapes inputs into the given shape.
Args:
target_shape: Target shape. Tuple of integers, does not include the
samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shape must be
known/fixed. Use the keyword argument `input_shape` (tuple of integers,
does not include the samples/batch size axis) when using this layer as
the first layer in a model.
Output shape:
`(batch_size, *target_shape)`
Example:
>>> x = keras_core.Input(shape=(12,))
>>> y = keras_core.layers.Reshape((3, 4))(x)
>>> y.shape
(None, 3, 4)
>>> # also supports shape inference using `-1` as dimension
>>> y = keras_core.layers.Reshape((-1, 2, 2))(x)
>>> y.shape
(None, 3, 2, 2)
"""
def __init__(self, target_shape, **kwargs):
super().__init__(**kwargs)
self.target_shape = tuple(target_shape)
def compute_output_shape(self, input_shape):
return (
input_shape[0],
*operation_utils.compute_reshape_output_shape(
input_shape[1:], self.target_shape, "target_shape"
),
)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
return ops.reshape(inputs, (ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {"target_shape": self.target_shape}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/reshaping/reshape.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/reshape.py",
"repo_id": "keras-core",
"token_count": 824
} | 32 |
import numpy as np
import pytest
from keras_core import initializers
from keras_core import layers
from keras_core import testing
class SimpleRNNTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.Bidirectional,
init_kwargs={"layer": layers.SimpleRNN(4)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 8),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.Bidirectional,
init_kwargs={
"layer": layers.SimpleRNN(4),
"backward_layer": layers.SimpleRNN(4, go_backwards=True),
"merge_mode": "sum",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 4),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
forward_layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(
layer=forward_layer,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.39687276, 0.39687276, 0.10004295, 0.10004295],
[0.7237238, 0.7237238, 0.53391594, 0.53391594],
]
),
output,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode="ave")
output = layer(sequence)
self.assertAllClose(
np.array([[0.24845785, 0.24845785], [0.6288199, 0.6288199]]),
output,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode=None)
output1, output2 = layer(sequence)
self.assertAllClose(
np.array([[0.39687276, 0.39687276], [0.7237238, 0.7237238]]),
output1,
)
self.assertAllClose(
np.array([[0.10004295, 0.10004295], [0.53391594, 0.53391594]]),
output2,
)
backward_layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.03),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.01),
go_backwards=True,
)
layer = layers.Bidirectional(
layer=forward_layer, backward_layer=backward_layer, merge_mode="mul"
)
output = layer(sequence)
self.assertAllClose(
np.array([[0.08374989, 0.08374989], [0.6740834, 0.6740834]]),
output,
)
forward_layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode="sum")
output = layer(sequence)
self.assertAllClose(
np.array(
[
[
[0.20937867, 0.20937867],
[0.34462988, 0.34462988],
[0.40290534, 0.40290534],
],
[
[0.59829646, 0.59829646],
[0.6734641, 0.6734641],
[0.6479671, 0.6479671],
],
]
),
output,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
stateful=True,
)
layer = layers.Bidirectional(layer=forward_layer)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.26234663, 0.26234663, 0.16959146, 0.16959146],
[0.6137073, 0.6137073, 0.5381646, 0.5381646],
]
),
output,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.26234663, 0.26234663, 0.16959146, 0.16959146],
[0.6137073, 0.6137073, 0.5381646, 0.5381646],
]
),
output,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = [
np.arange(4).reshape((2, 2)).astype("float32") * 1,
np.arange(4).reshape((2, 2)).astype("float32") * 2,
np.arange(4).reshape((2, 2)).astype("float32") * 3,
np.arange(4).reshape((2, 2)).astype("float32") * 4,
]
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(
layer=forward_layer,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array(
[
[0.20794602, 0.4577124, 0.14046375, 0.48191673],
[0.6682636, 0.6711909, 0.60943645, 0.60950446],
]
),
output,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(layer=forward_layer)
mask = np.array([[True, True, False, True], [True, False, False, True]])
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.19393763, 0.19393763, 0.11669192, 0.11669192],
[0.30818558, 0.30818558, 0.28380975, 0.28380975],
]
),
output,
)
| keras-core/keras_core/layers/rnn/bidirectional_test.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/bidirectional_test.py",
"repo_id": "keras-core",
"token_count": 3849
} | 33 |
from keras_core import backend
from keras_core.mixed_precision.dtype_policy import DTypePolicy
from keras_core.mixed_precision.dtype_policy import dtype_policy
from keras_core.mixed_precision.dtype_policy import set_dtype_policy
from keras_core.saving import serialization_lib
def resolve_policy(identifier):
if identifier is None:
return dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return serialization_lib.deserialize_keras_object(identifier)
if isinstance(identifier, str):
return DTypePolicy(identifier)
try:
return DTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
| keras-core/keras_core/mixed_precision/__init__.py/0 | {
"file_path": "keras-core/keras_core/mixed_precision/__init__.py",
"repo_id": "keras-core",
"token_count": 329
} | 34 |
import tree
from keras_core.trainers.data_adapters import data_adapter_utils
from keras_core.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None):
from keras_core.utils.module_utils import tensorflow as tf
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
self._dataset = dataset
def get_numpy_iterator(self):
for batch in self._dataset:
yield tree.map_structure(lambda x: x.numpy(), batch)
def get_tf_dataset(self):
return self._dataset
@property
def num_batches(self):
cardinality = int(self._dataset.cardinality())
# Return None for Unknown and Infiite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras_core.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
| keras-core/keras_core/trainers/data_adapters/tf_dataset_adapter.py/0 | {
"file_path": "keras-core/keras_core/trainers/data_adapters/tf_dataset_adapter.py",
"repo_id": "keras-core",
"token_count": 1531
} | 35 |
5.4.0 | keras-cv/.bazelversion/0 | {
"file_path": "keras-cv/.bazelversion",
"repo_id": "keras-cv",
"token_count": 5
} | 36 |
load("//build_deps/tf_dependency:tf_configure.bzl", "tf_configure")
tf_configure(name = "local_config_tf")
| keras-cv/WORKSPACE/0 | {
"file_path": "keras-cv/WORKSPACE",
"repo_id": "keras-cv",
"token_count": 44
} | 37 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomHue
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomHue(BaseImageAugmentationLayer):
"""Randomly adjusts the hue on given images.
This layer will randomly increase/reduce the hue for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the brightness of the input.
The image hue is adjusted by converting the image(s) to HSV and rotating the
hue channel (H) by delta. The image is then converted back to RGB.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image hue is impacted. `factor=0.0` makes this layer perform a no-op
operation, while a value of 1.0 performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, value_range, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
)
self.value_range = value_range
self.seed = seed
def get_random_transformation(self, **kwargs):
invert = preprocessing_utils.random_inversion(self._random_generator)
# We must scale self.factor() to the range [-0.5, 0.5]. This is because
# the tf.image operation performs rotation on the hue saturation value
# orientation. This can be thought of as an angle in the range
# [-180, 180]
return invert * self.factor() * 0.5
def augment_image(self, image, transformation=None, **kwargs):
image = preprocessing_utils.transform_value_range(
image, self.value_range, (0, 1), dtype=self.compute_dtype
)
# tf.image.adjust_hue expects floats to be in range [0, 1]
image = tf.image.adjust_hue(image, delta=transformation)
# RandomHue is one of the rare KPLs that needs to clip
image = tf.clip_by_value(image, 0, 1)
image = preprocessing_utils.transform_value_range(
image, (0, 1), self.value_range, dtype=self.compute_dtype
)
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomHueTest(tf.test.TestCase):
def test_consistency_with_old_impl_rescaled_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
fixed_seed = 2023
image = tf.random.uniform(shape=image_shape)
layer = RandomHue(fixed_factor, (0, 1), fixed_seed)
old_layer = OldRandomHue(fixed_factor, (0, 1), fixed_seed)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_rgb_range(self):
image_shape = (16, 32, 32, 3)
fixed_factor = (0.8, 0.8)
fixed_seed = 2023
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomHue(fixed_factor, (0, 255), fixed_seed)
old_layer = OldRandomHue(fixed_factor, (0, 255), fixed_seed)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output, atol=1e-3, rtol=1e-5)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomHue, OldRandomHue]
aug_args = {"factor": (0.5), "value_range": (0, 255)}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# OldRandomHue fails to run jit_compile=True
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_hue.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_hue.py",
"repo_id": "keras-cv",
"token_count": 3049
} | 38 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from packaging import version
from keras_cv.backend import config as backend_config
from keras_cv.backend.config import keras_3
def pytest_addoption(parser):
parser.addoption(
"--run_large",
action="store_true",
default=False,
help="run large tests",
)
parser.addoption(
"--run_extra_large",
action="store_true",
default=False,
help="run extra_large tests",
)
parser.addoption(
"--check_gpu",
action="store_true",
default=False,
help="fail if a gpu is not present",
)
def pytest_configure(config):
# Verify that device has GPU and detected by backend
if config.getoption("--check_gpu"):
found_gpu = False
backend = backend_config.backend()
if backend == "jax":
import jax
try:
found_gpu = bool(jax.devices("gpu"))
except RuntimeError:
found_gpu = False
elif backend == "tensorflow":
found_gpu = bool(tf.config.list_logical_devices("GPU"))
elif backend == "torch":
import torch
found_gpu = bool(torch.cuda.device_count())
if not found_gpu:
pytest.fail(f"No GPUs discovered on the {backend} backend.")
config.addinivalue_line(
"markers", "large: mark test as being slow or requiring a network"
)
config.addinivalue_line(
"markers",
"extra_large: mark test as being too large to run continuously",
)
config.addinivalue_line(
"markers",
"tf_keras_only: mark test as a Keras 2-only test",
)
config.addinivalue_line(
"markers",
"tf_only: mark test as a Tensorflow-only test",
)
def pytest_collection_modifyitems(config, items):
run_extra_large_tests = config.getoption("--run_extra_large")
# Run large tests for --run_extra_large or --run_large.
run_large_tests = config.getoption("--run_large") or run_extra_large_tests
# Run Keras saving tests on 2.12 stable, nightlies and later releases.
skip_keras_saving_test = pytest.mark.skipif(
version.parse(tf.__version__) < version.parse("2.12.0-dev0"),
reason="keras_v3 format requires tf > 2.12.",
)
skip_large = pytest.mark.skipif(
not run_large_tests, reason="need --run_large option to run"
)
skip_extra_large = pytest.mark.skipif(
not run_extra_large_tests, reason="need --run_extra_large option to run"
)
skip_keras_2_only = pytest.mark.skipif(
keras_3(),
reason="This test is only supported on Keras 2",
)
skip_tf_only = pytest.mark.skipif(
keras_3() and backend_config.backend() != "tensorflow",
reason="This test is only supported on TensorFlow",
)
for item in items:
if "keras_format" in item.name:
item.add_marker(skip_keras_saving_test)
if "tf_format" in item.name:
item.add_marker(skip_extra_large)
if "large" in item.keywords:
item.add_marker(skip_large)
if "extra_large" in item.keywords:
item.add_marker(skip_extra_large)
if "tf_keras_only" in item.keywords:
item.add_marker(skip_keras_2_only)
if "tf_only" in item.keywords:
item.add_marker(skip_tf_only)
| keras-cv/conftest.py/0 | {
"file_path": "keras-cv/conftest.py",
"repo_id": "keras-cv",
"token_count": 1663
} | 39 |
licenses(["notice"]) # Apache 2.0
package(default_visibility = ["//visibility:public"])
config_setting(
name = "windows",
constraint_values = ["@bazel_tools//platforms:windows"],
)
py_library(
name = "keras_cv",
srcs = glob(["**/*.py"]),
data = [
"//keras_cv/custom_ops:_keras_cv_custom_ops.so",
]
)
| keras-cv/keras_cv/BUILD/0 | {
"file_path": "keras-cv/keras_cv/BUILD",
"repo_id": "keras-cv",
"token_count": 145
} | 40 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
formats.py contains axis information for each supported format.
"""
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.bounding_box.XYXY")
class XYXY:
"""XYXY contains axis indices for the XYXY format.
All values in the XYXY format should be absolute pixel values.
The XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
@keras_cv_export("keras_cv.bounding_box.REL_XYXY")
class REL_XYXY:
"""REL_XYXY contains axis indices for the REL_XYXY format.
REL_XYXY is like XYXY, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
@keras_cv_export("keras_cv.bounding_box.CENTER_XYWH")
class CENTER_XYWH:
"""CENTER_XYWH contains axis indices for the CENTER_XYWH format.
All values in the CENTER_XYWH format should be absolute pixel values.
The CENTER_XYWH format consists of the following required indices:
- X: X coordinate of the center of the bounding box
- Y: Y coordinate of the center of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
@keras_cv_export("keras_cv.bounding_box.XYWH")
class XYWH:
"""XYWH contains axis indices for the XYWH format.
All values in the XYWH format should be absolute pixel values.
The XYWH format consists of the following required indices:
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
@keras_cv_export("keras_cv.bounding_box.REL_XYWH")
class REL_XYWH:
"""REL_XYWH contains axis indices for the XYWH format.
REL_XYXY is like XYWH, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
@keras_cv_export("keras_cv.bounding_box.YXYX")
class YXYX:
"""YXYX contains axis indices for the YXYX format.
All values in the YXYX format should be absolute pixel values.
The YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
@keras_cv_export("keras_cv.bounding_box.REL_YXYX")
class REL_YXYX:
"""REL_YXYX contains axis indices for the REL_YXYX format.
REL_YXYX is like YXYX, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
| keras-cv/keras_cv/bounding_box/formats.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/formats.py",
"repo_id": "keras-cv",
"token_count": 1517
} | 41 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.