text
stringlengths
5
261k
id
stringlengths
16
106
metadata
dict
__index_level_0__
int64
0
266
import torch from keras import layers from keras import testing from keras.backend.common import KerasVariable class Net(torch.nn.Module): def __init__(self): super().__init__() self.fc1 = layers.Dense(1) def forward(self, x): x = self.fc1(x) return x class TorchWorkflowTest(testing.TestCase): def test_keras_layer_in_nn_module(self): net = Net() # Test using Keras layer in a nn.Module. # Test forward pass self.assertAllEqual(list(net(torch.empty(100, 10)).shape), [100, 1]) # Test KerasVariables are added as nn.Parameter. self.assertLen(list(net.parameters()), 2) # Test using KerasVariable as a torch tensor for torch ops. kernel = net.fc1.kernel transposed_kernel = torch.transpose(kernel, 0, 1) self.assertIsInstance(kernel, KerasVariable) self.assertIsInstance( torch.mul(kernel, transposed_kernel), torch.Tensor )
keras/integration_tests/torch_workflow_test.py/0
{ "file_path": "keras/integration_tests/torch_workflow_test.py", "repo_id": "keras", "token_count": 416 }
137
import threading from keras import backend from keras.api_export import keras_export GLOBAL_STATE_TRACKER = threading.local() GLOBAL_SETTINGS_TRACKER = threading.local() def set_global_attribute(name, value): setattr(GLOBAL_STATE_TRACKER, name, value) def get_global_attribute(name, default=None, set_to_default=False): attr = getattr(GLOBAL_STATE_TRACKER, name, None) if attr is None and default is not None: attr = default if set_to_default: set_global_attribute(name, attr) return attr @keras_export(["keras.utils.clear_session", "keras.backend.clear_session"]) def clear_session(): """Resets all state generated by Keras. Keras manages a global state, which it uses to implement the Functional model-building API and to uniquify autogenerated layer names. If you are creating many models in a loop, this global state will consume an increasing amount of memory over time, and you may want to clear it. Calling `clear_session()` releases the global state: this helps avoid clutter from old models and layers, especially when memory is limited. Example 1: calling `clear_session()` when creating models in a loop ```python for _ in range(100): # Without `clear_session()`, each iteration of this loop will # slightly increase the size of the global state managed by Keras model = keras.Sequential([ keras.layers.Dense(10) for _ in range(10)]) for _ in range(100): # With `clear_session()` called at the beginning, # Keras starts with a blank state at each iteration # and memory consumption is constant over time. keras.backend.clear_session() model = keras.Sequential([ keras.layers.Dense(10) for _ in range(10)]) ``` Example 2: resetting the layer name generation counter >>> layers = [keras.layers.Dense(10) for _ in range(10)] >>> new_layer = keras.layers.Dense(10) >>> print(new_layer.name) dense_10 >>> keras.backend.clear_session() >>> new_layer = keras.layers.Dense(10) >>> print(new_layer.name) dense """ global GLOBAL_STATE_TRACKER global GLOBAL_SETTINGS_TRACKER GLOBAL_STATE_TRACKER = threading.local() GLOBAL_SETTINGS_TRACKER = threading.local() if backend.backend() == "tensorflow": from keras.utils.module_utils import tensorflow as tf tf.compat.v1.reset_default_graph() if tf.executing_eagerly(): # Clear pending nodes in eager executors, kernel caches and # step_containers. from tensorflow.python.eager import context context.context().clear_kernel_cache() elif backend.backend() == "torch": import torch._dynamo as dynamo # reset's torchdynamo's cache so that cached guards, compiled fn, etc # do not persist between clear_session() calls dynamo.reset()
keras/keras/backend/common/global_state.py/0
{ "file_path": "keras/keras/backend/common/global_state.py", "repo_id": "keras", "token_count": 1087 }
138
import builtins import collections import math import warnings import numpy as np import tensorflow as tf from tensorflow.experimental import numpy as tfnp from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops from keras.backend import config from keras.backend import standardize_dtype from keras.backend.common import dtypes from keras.backend.tensorflow import sparse from keras.backend.tensorflow.core import convert_to_tensor @sparse.elementwise_binary_union(tf.sparse.add) def add(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.add(x1, x2) def bincount(x, weights=None, minlength=0): x = convert_to_tensor(x) dtypes_to_resolve = [x.dtype] if standardize_dtype(x.dtype) not in ["int32", "int64"]: x = tf.cast(x, tf.int32) if weights is not None: weights = convert_to_tensor(weights) dtypes_to_resolve.append(weights.dtype) dtype = dtypes.result_type(*dtypes_to_resolve) if standardize_dtype(weights.dtype) not in [ "int32", "int64", "float32", "float64", ]: if "int" in standardize_dtype(weights.dtype): weights = tf.cast(weights, tf.int32) else: weights = tf.cast(weights, tf.float32) else: dtype = "int32" if isinstance(x, tf.SparseTensor): output = tf.sparse.bincount( x, weights=weights, minlength=minlength, axis=-1, ) output = tf.cast(output, dtype) if x.shape.rank == 1: output_shape = (minlength,) else: batch_size = tf.shape(output)[0] output_shape = (batch_size, minlength) return tf.SparseTensor( indices=output.indices, values=output.values, dense_shape=output_shape, ) return tf.cast( tf.math.bincount(x, weights=weights, minlength=minlength, axis=-1), dtype, ) def einsum(subscripts, *operands, **kwargs): operands = tf.nest.map_structure(convert_to_tensor, operands) dtypes_to_resolve = [] for x in operands: dtypes_to_resolve.append(x.dtype) result_dtype = dtypes.result_type(*dtypes_to_resolve) compute_dtype = result_dtype # TODO: tf.einsum doesn't support integer dtype with gpu if "int" in compute_dtype: compute_dtype = config.floatx() operands = tf.nest.map_structure( lambda x: tf.cast(x, compute_dtype), operands ) return tf.cast(tf.einsum(subscripts, *operands, **kwargs), result_dtype) @sparse.elementwise_binary_union(sparse.sparse_subtract) def subtract(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.subtract(x1, x2) def matmul(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) x1_shape = x1.shape x2_shape = x2.shape x1_sparse = isinstance(x1, tf.SparseTensor) x2_sparse = isinstance(x2, tf.SparseTensor) # When both x1 and x2 are of int8 and dense tensor, specifying `output_type` # as int32 to enable hardware-accelerated matmul x1_dtype = standardize_dtype(x1.dtype) x2_dtype = standardize_dtype(x2.dtype) if ( x1_dtype == "int8" and x2_dtype == "int8" and not x1_sparse and not x2_sparse and x1_shape.rank != 1 # TODO: support tf.tensordot and x2_shape.rank != 1 # TODO: support tf.tensordot ): compute_dtype = "int8" result_dtype = "int32" output_type = result_dtype else: # TODO: Typically, GPU and XLA only support float types compute_dtype = dtypes.result_type(x1.dtype, x2.dtype, float) result_dtype = dtypes.result_type(x1.dtype, x2.dtype) output_type = None x1 = tf.cast(x1, compute_dtype) x2 = tf.cast(x2, compute_dtype) def with_combined_batch_dimensions(a, b, output_shape, fn_3d): a_sparse = isinstance(a, tf.SparseTensor) b_sparse = isinstance(b, tf.SparseTensor) batch_shape = b.shape[:-2] if b_sparse else a.shape[:-2] batch_size = math.prod(batch_shape) a3d_shape = [batch_size] + a.shape[-2:] a_3d = ( tf.sparse.reshape(a, a3d_shape) if a_sparse else tf.reshape(a, a3d_shape) ) b3d_shape = [batch_size] + b.shape[-2:] b_3d = ( tf.sparse.reshape(b, b3d_shape) if b_sparse else tf.reshape(b, b3d_shape) ) result_3d = fn_3d(a_3d, b_3d) return ( tf.sparse.reshape(result_3d, output_shape) if isinstance(result_3d, tf.SparseTensor) else tf.reshape(result_3d, output_shape) ) def sparse_sparse_matmul(a, b): dtype = a.values.dtype # Convert SparseTensors to CSR SparseMatrix. a_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( a.indices, a.values, a.dense_shape ) b_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( b.indices, b.values, b.dense_shape ) # Compute the CSR SparseMatrix matrix multiplication. result_csr = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul( a_csr, b_csr, dtype ) # Convert the CSR SparseMatrix to a SparseTensor. res = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor( result_csr, dtype ) return tf.SparseTensor(res.indices, res.values, res.dense_shape) def embedding_lookup_sparse_dense_matmul(a, b): # We need at least one id per rows for embedding_lookup_sparse, # otherwise there will be missing rows in the output. a, _ = tf.sparse.fill_empty_rows(a, 0) # We need to split x1 into separate ids and weights tensors. The ids # should be the column indices of x1 and the values of the weights # can continue to be the actual x1. The column arrangement of ids # and weights does not matter as we sum over columns. See details in # the documentation for sparse_ops.sparse_tensor_dense_matmul. ids = tf.SparseTensor( indices=a.indices, values=a.indices[:, 1], dense_shape=a.dense_shape, ) return tf.nn.embedding_lookup_sparse(b, ids, a, combiner="sum") # Either a or b is sparse def sparse_dense_matmul_3d(a, b): return tf.map_fn( lambda x: tf.sparse.sparse_dense_matmul(x[0], x[1]), elems=(a, b), fn_output_signature=a.dtype, ) if x1_sparse or x2_sparse: from keras.ops.operation_utils import compute_matmul_output_shape output_shape = compute_matmul_output_shape(x1_shape, x2_shape) if x1_sparse and x2_sparse: if x1_shape.rank <= 3: output = sparse_sparse_matmul(x1, x2) else: output = with_combined_batch_dimensions( x1, x2, output_shape, sparse_sparse_matmul ) else: # Sparse * dense or dense * sparse sparse_rank = x1_shape.rank if x1_sparse else x2_shape.rank # Special case: embedding_lookup_sparse for sparse * dense, rank 2 if x1_sparse and sparse_rank == 2: output = embedding_lookup_sparse_dense_matmul(x1, x2) elif sparse_rank == 2: output = tf.sparse.sparse_dense_matmul(x1, x2) elif sparse_rank == 3: output = sparse_dense_matmul_3d(x1, x2) else: output = with_combined_batch_dimensions( x1, x2, output_shape, sparse_dense_matmul_3d ) output = tf.cast(output, result_dtype) output.set_shape(output_shape) return output else: if x1_shape.rank == 2 and x2_shape.rank == 2: output = tf.matmul(x1, x2, output_type=output_type) elif x2_shape.rank == 1: output = tf.tensordot(x1, x2, axes=1) elif x1_shape.rank == 1: output = tf.tensordot(x1, x2, axes=[[0], [-2]]) else: output = tf.matmul(x1, x2, output_type=output_type) return tf.cast(output, result_dtype) @sparse.elementwise_binary_intersection def multiply(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.multiply(x1, x2) def mean(x, axis=None, keepdims=False): if isinstance(x, tf.IndexedSlices): if axis is None: # Reduce against all axes, result is a single value and dense. # The denominator has to account for `dense_shape`. sum = tf.reduce_sum(x.values, keepdims=keepdims) return sum / tf.cast(tf.reduce_prod(x.dense_shape), dtype=sum.dtype) if isinstance(axis, int): axis = [axis] elif not axis: # Empty axis tuple, this is a no-op return x dense_shape = tf.convert_to_tensor(x.dense_shape) rank = tf.shape(dense_shape)[0] # Normalize axis: convert negative values and sort axis = [rank + a if a < 0 else a for a in axis] axis.sort() if axis == [0]: # Reduce against `axis=0` only, result is dense. # The denominator has to account for `dense_shape[0]`. sum = tf.reduce_sum(x.values, axis=0, keepdims=keepdims) return sum / tf.cast(dense_shape[0], dtype=sum.dtype) elif axis[0] == 0: # Reduce against axis 0 and other axes, result is dense. # We do `axis=0` separately first. The denominator has to account # for `dense_shape[0]`. # We use `keepdims=True` in `reduce_sum`` so that we can leave the # 0 in axis and do `reduce_mean` with `keepdims` to apply it for all # axes. sum = tf.reduce_sum(x.values, axis=0, keepdims=True) axis_0_mean = sum / tf.cast(dense_shape[0], dtype=sum.dtype) return tf.reduce_mean(axis_0_mean, axis=axis, keepdims=keepdims) elif keepdims: # With `keepdims=True`, result is an `IndexedSlices` with the same # indices since axis 0 is not touched. The only thing to do is to # correct `dense_shape` to account for dimensions that became 1. new_values = tf.reduce_mean(x.values, axis=axis, keepdims=True) new_dense_shape = tf.concat( [dense_shape[0:1], new_values.shape[1:]], axis=0 ) return tf.IndexedSlices(new_values, x.indices, new_dense_shape) elif rank == len(axis) + 1: # `keepdims=False` and reducing against all axes exept 0, result is # a 1D tensor, which cannot be `IndexedSlices`. We have to scatter # the computed means to construct the correct dense tensor. return tf.scatter_nd( tf.expand_dims(x.indices, axis=1), tf.reduce_mean(x.values, axis=axis), [dense_shape[0]], ) else: # `keepdims=False`, not reducing against axis 0 and there is at # least one other axis we are not reducing against. We simply need # to fix `dense_shape` to remove dimensions that were reduced. gather_indices = [i for i in range(rank) if i not in axis] return tf.IndexedSlices( tf.reduce_mean(x.values, axis=axis), x.indices, tf.gather(x.dense_shape, gather_indices, axis=0), ) x = convert_to_tensor(x) ori_dtype = standardize_dtype(x.dtype) compute_dtype = dtypes.result_type(x.dtype, "float32") # `tf.reduce_mean` does not handle low precision (e.g., float16) overflow # correctly, so we compute with float32 and cast back to the original type. if "int" in ori_dtype or ori_dtype == "bool": result_dtype = compute_dtype else: result_dtype = ori_dtype output = tf.reduce_mean( tf.cast(x, compute_dtype), axis=axis, keepdims=keepdims ) return tf.cast(output, result_dtype) def max(x, axis=None, keepdims=False, initial=None): # The TensorFlow numpy API implementation doesn't support `initial` so we # handle it manually here. if initial is not None: return tf.math.maximum( tfnp.max(x, axis=axis, keepdims=keepdims), initial ) # TensorFlow returns -inf by default for an empty list, but for consistency # with other backends and the numpy API we want to throw in this case. if tf.executing_eagerly(): size_x = size(x) tf.assert_greater( size_x, tf.constant(0, dtype=size_x.dtype), message="Cannot compute the max of an empty tensor.", ) return tfnp.max(x, axis=axis, keepdims=keepdims) def ones(shape, dtype=None): dtype = dtype or config.floatx() return tf.ones(shape, dtype=dtype) def zeros(shape, dtype=None): dtype = dtype or config.floatx() return tf.zeros(shape, dtype=dtype) @sparse.elementwise_unary def absolute(x): # uintx and bool are always non-negative dtype = standardize_dtype(x.dtype) if "uint" in dtype or dtype == "bool": return x return tf.abs(x) @sparse.elementwise_unary def abs(x): return absolute(x) def all(x, axis=None, keepdims=False): x = tf.cast(x, "bool") return tf.reduce_all(x, axis=axis, keepdims=keepdims) def any(x, axis=None, keepdims=False): x = tf.cast(x, "bool") return tf.reduce_any(x, axis=axis, keepdims=keepdims) def amax(x, axis=None, keepdims=False): return max(x, axis=axis, keepdims=keepdims) def amin(x, axis=None, keepdims=False): return min(x, axis=axis, keepdims=keepdims) def append(x1, x2, axis=None): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) if axis is None: return tf.concat([tf.reshape(x1, [-1]), tf.reshape(x2, [-1])], axis=0) else: return tf.concat([x1, x2], axis=axis) def arange(start, stop=None, step=1, dtype=None): # tfnp.arange has trouble with dynamic Tensors in compiled function. # tf.range does not. if dtype is None: dtypes_to_resolve = [ getattr(start, "dtype", type(start)), getattr(step, "dtype", type(step)), ] if stop is not None: dtypes_to_resolve.append(getattr(stop, "dtype", type(stop))) dtype = dtypes.result_type(*dtypes_to_resolve) dtype = standardize_dtype(dtype) try: out = tf.range(start, stop, delta=step, dtype=dtype) except tf.errors.NotFoundError: # Some dtypes may not work in eager mode on CPU or GPU. out = tf.range(start, stop, delta=step, dtype="float32") out = tf.cast(out, dtype) return out @sparse.densifying_unary(0.5 * np.pi) def arccos(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.acos(x) @sparse.densifying_unary(np.nan) def arccosh(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.acosh(x) @sparse.elementwise_unary def arcsin(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.asin(x) @sparse.elementwise_unary def arcsinh(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.asinh(x) @sparse.elementwise_unary def arctan(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.atan(x) def arctan2(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype, float) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.math.atan2(x1, x2) @sparse.elementwise_unary def arctanh(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.atanh(x) def argmax(x, axis=None): if axis is None: x = tf.reshape(x, [-1]) return tf.cast(tf.argmax(x, axis=axis), dtype="int32") def argmin(x, axis=None): if axis is None: x = tf.reshape(x, [-1]) return tf.cast(tf.argmin(x, axis=axis), dtype="int32") def argsort(x, axis=-1): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "bool": x = tf.cast(x, "uint8") x_shape = x.shape if x_shape.rank == 0: return tf.cast([0], "int32") if axis is None: x = tf.reshape(x, [-1]) axis = 0 return tf.argsort(x, axis=axis) def array(x, dtype=None): return convert_to_tensor(x, dtype=dtype) def average(x, axis=None, weights=None): x = convert_to_tensor(x) if not isinstance(axis, (list, tuple)): axis = (axis,) dtypes_to_resolve = [x.dtype, float] if weights is not None: weights = convert_to_tensor(weights) dtypes_to_resolve.append(weights.dtype) result_dtype = dtypes.result_type(*dtypes_to_resolve) compute_dtype = result_dtype # TODO: since tfnp.average incorrectly promote bfloat16 to float64, we # need to cast to float32 first and then cast back to bfloat16 if compute_dtype == "bfloat16": compute_dtype = "float32" x = tf.cast(x, compute_dtype) if weights is not None: weights = tf.cast(weights, compute_dtype) for a in axis: # `tfnp.average` does not handle multiple axes. x = tfnp.average(x, weights=weights, axis=a) return tf.cast(x, result_dtype) def broadcast_to(x, shape): return tf.broadcast_to(x, shape) @sparse.elementwise_unary def ceil(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.ceil(x) def clip(x, x_min, x_max): dtype = standardize_dtype(x.dtype) if dtype == "bool": x = tf.cast(x, "int32") return tf.clip_by_value(x, x_min, x_max) def concatenate(xs, axis=0): sparse_count = builtins.sum(isinstance(x, tf.SparseTensor) for x in xs) if sparse_count: if sparse_count == len(xs): return tf.sparse.concat(axis=axis, sp_inputs=xs) else: xs = [ ( convert_to_tensor(x, sparse=False) if isinstance(x, tf.SparseTensor) else x ) for x in xs ] xs = tf.nest.map_structure(convert_to_tensor, xs) dtype_set = set([x.dtype for x in xs]) if len(dtype_set) > 1: dtype = dtypes.result_type(*dtype_set) xs = tf.nest.map_structure(lambda x: tf.cast(x, dtype), xs) return tf.concat(xs, axis=axis) @sparse.elementwise_unary def conjugate(x): return tf.math.conj(x) @sparse.elementwise_unary def conj(x): return tf.math.conj(x) @sparse.elementwise_unary def copy(x): return tfnp.copy(x) @sparse.densifying_unary(1) def cos(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.cos(x) @sparse.densifying_unary(1) def cosh(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.cosh(x) def count_nonzero(x, axis=None): return tf.math.count_nonzero(x, axis=axis, dtype="int32") def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tfnp.cross( x1, x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis, ) def cumprod(x, axis=None, dtype=None): x = convert_to_tensor(x, dtype=dtype) # tf.math.cumprod doesn't support bool if standardize_dtype(x.dtype) == "bool": x = tf.cast(x, "int32") if axis is None: x = tf.reshape(x, [-1]) axis = 0 return tf.math.cumprod(x, axis=axis) def cumsum(x, axis=None, dtype=None): x = convert_to_tensor(x, dtype=dtype) # tf.math.cumprod doesn't support bool if standardize_dtype(x.dtype) == "bool": x = tf.cast(x, "int32") if axis is None: x = tf.reshape(x, [-1]) axis = 0 return tf.math.cumsum(x, axis=axis) def diag(x, k=0): return tfnp.diag(x, k=k) def diagonal(x, offset=0, axis1=0, axis2=1): return tfnp.diagonal( x, offset=offset, axis1=axis1, axis2=axis2, ) def diff(a, n=1, axis=-1): return tfnp.diff(a, n=n, axis=axis) def digitize(x, bins): x = convert_to_tensor(x) bins = list(bins) # bins must be float type bins = tf.nest.map_structure(lambda x: float(x), bins) # TODO: tf.raw_ops.Bucketize doesn't support bool, bfloat16, float16, int8 # int16, uint8, uint16, uint32 ori_dtype = standardize_dtype(x.dtype) if ori_dtype in ("bool", "int8", "int16", "uint8", "uint16"): x = tf.cast(x, "int32") elif ori_dtype == "uint32": x = tf.cast(x, "int64") elif ori_dtype in ("bfloat16", "float16"): x = tf.cast(x, "float32") if isinstance(x, tf.RaggedTensor): return tf.ragged.map_flat_values( lambda y: tf.raw_ops.Bucketize(input=y, boundaries=bins), x ) elif isinstance(x, tf.SparseTensor): output = tf.SparseTensor( indices=tf.identity(x.indices), values=tf.raw_ops.Bucketize(input=x.values, boundaries=bins), dense_shape=tf.identity(x.dense_shape), ) output.set_shape(x.shape) return output return tf.raw_ops.Bucketize(input=x, boundaries=bins) def dot(x, y): x = convert_to_tensor(x) y = convert_to_tensor(y) result_dtype = dtypes.result_type(x.dtype, y.dtype) # GPU only supports float types compute_dtype = dtypes.result_type(result_dtype, float) x = tf.cast(x, compute_dtype) y = tf.cast(y, compute_dtype) x_shape = x.shape y_shape = y.shape if x_shape.rank == 0 or y_shape.rank == 0: output = x * y elif y_shape.rank == 1: output = tf.tensordot(x, y, axes=[[-1], [-1]]) else: output = tf.tensordot(x, y, axes=[[-1], [-2]]) return tf.cast(output, result_dtype) def empty(shape, dtype=None): dtype = dtype or config.floatx() return tf.zeros(shape, dtype=dtype) def equal(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.equal(x1, x2) @sparse.densifying_unary(1) def exp(x): x = convert_to_tensor(x) ori_dtype = standardize_dtype(x.dtype) if "int" in ori_dtype or ori_dtype == "bool": x = tf.cast(x, config.floatx()) return tf.exp(x) def expand_dims(x, axis): if isinstance(x, tf.SparseTensor): from keras.ops.operation_utils import compute_expand_dims_output_shape output = tf.sparse.expand_dims(x, axis) output.set_shape(compute_expand_dims_output_shape(x.shape, axis)) return output return tf.expand_dims(x, axis) @sparse.elementwise_unary def expm1(x): x = convert_to_tensor(x) ori_dtype = standardize_dtype(x.dtype) if "int" in ori_dtype or ori_dtype == "bool": x = tf.cast(x, config.floatx()) return tf.math.expm1(x) def flip(x, axis=None): x = convert_to_tensor(x) if axis is None: return tf.reverse(x, tf.range(tf.rank(x))) return tf.reverse(x, [axis]) @sparse.elementwise_unary def floor(x): x = convert_to_tensor(x) dtype = ( config.floatx() if standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) x = tf.cast(x, dtype) return tf.floor(x) def full(shape, fill_value, dtype=None): dtype = dtype or config.floatx() fill_value = convert_to_tensor(fill_value, dtype) return tf.broadcast_to(fill_value, shape) def full_like(x, fill_value, dtype=None): x = convert_to_tensor(x) dtype = dtypes.result_type(dtype or x.dtype) fill_value = convert_to_tensor(fill_value, dtype) return tf.broadcast_to(fill_value, tf.shape(x)) def greater(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.greater(x1, x2) def greater_equal(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.greater_equal(x1, x2) def hstack(xs): dtype_set = set([getattr(x, "dtype", type(x)) for x in xs]) if len(dtype_set) > 1: dtype = dtypes.result_type(*dtype_set) xs = tf.nest.map_structure(lambda x: convert_to_tensor(x, dtype), xs) rank = tf.rank(xs[0]) return tf.cond( tf.equal(rank, 1), lambda: tf.concat(xs, axis=0), lambda: tf.concat(xs, axis=1), ) def identity(n, dtype=None): return eye(N=n, M=n, dtype=dtype) @sparse.elementwise_unary def imag(x): return tf.math.imag(x) def isclose(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) if "float" in dtype: # atol defaults to 1e-08 # rtol defaults to 1e-05 return tf.abs(x1 - x2) <= (1e-08 + 1e-05 * tf.abs(x2)) else: return tf.equal(x1, x2) @sparse.densifying_unary(True) def isfinite(x): # `tfnp.isfinite` requires `enable_numpy_behavior`, so we reimplement it. x = convert_to_tensor(x) dtype_as_dtype = tf.as_dtype(x.dtype) if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: return tf.ones(x.shape, tf.bool) return tf.math.is_finite(x) def isinf(x): # `tfnp.isinf` requires `enable_numpy_behavior`, so we reimplement it. x = convert_to_tensor(x) dtype_as_dtype = tf.as_dtype(x.dtype) if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: return tf.zeros(x.shape, tf.bool) return tf.math.is_inf(x) def isnan(x): # `tfnp.isnan` requires `enable_numpy_behavior`, so we reimplement it. x = convert_to_tensor(x) dtype_as_dtype = tf.as_dtype(x.dtype) if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: return tf.zeros(x.shape, tf.bool) return tf.math.is_nan(x) def less(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.less(x1, x2) def less_equal(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.less_equal(x1, x2) def linspace( start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0 ): if dtype is None: dtypes_to_resolve = [ getattr(start, "dtype", type(start)), getattr(stop, "dtype", type(stop)), float, ] dtype = dtypes.result_type(*dtypes_to_resolve) return tfnp.linspace( start, stop, num=num, endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis, ) @sparse.densifying_unary(-np.inf) def log(x): x = convert_to_tensor(x) dtype = ( config.floatx() if standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) x = tf.cast(x, dtype) return tf.math.log(x) @sparse.densifying_unary(-np.inf) def log10(x): x = convert_to_tensor(x) dtype = ( config.floatx() if standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) x = tf.cast(x, dtype) return tf.math.log(x) / tf.math.log(tf.constant(10, x.dtype)) @sparse.elementwise_unary def log1p(x): x = convert_to_tensor(x) dtype = ( config.floatx() if standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) x = tf.cast(x, dtype) return tf.math.log1p(x) @sparse.densifying_unary(-np.inf) def log2(x): x = convert_to_tensor(x) dtype = ( config.floatx() if standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) x = tf.cast(x, dtype) return tf.math.log(x) / tf.math.log(tf.constant(2, x.dtype)) def logaddexp(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype, float) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) # Below is the same implementation as tfnp.logaddexp using all native # ops to prevent incorrect promotion of bfloat16. delta = x1 - x2 return tf.where( tf.math.is_nan(delta), x1 + x2, tf.maximum(x1, x2) + tf.math.log1p(tf.math.exp(-tf.abs(delta))), ) def logical_and(x1, x2): x1 = tf.cast(x1, "bool") x2 = tf.cast(x2, "bool") return tf.logical_and(x1, x2) def logical_not(x): x = tf.cast(x, "bool") return tf.logical_not(x) def logical_or(x1, x2): x1 = tf.cast(x1, "bool") x2 = tf.cast(x2, "bool") return tf.logical_or(x1, x2) def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): if dtype is None: dtypes_to_resolve = [ getattr(start, "dtype", type(start)), getattr(stop, "dtype", type(stop)), float, ] dtype = dtypes.result_type(*dtypes_to_resolve) start = tf.cast(start, dtype) stop = tf.cast(stop, dtype) return tfnp.logspace( start, stop, num=num, endpoint=endpoint, base=base, dtype=dtype, axis=axis, ) @sparse.elementwise_binary_union(tf.sparse.maximum, densify_mixed=True) def maximum(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.maximum(x1, x2) def median(x, axis=None, keepdims=False): return quantile(x, 0.5, axis=axis, keepdims=keepdims) def meshgrid(*x, indexing="xy"): return tf.meshgrid(*x, indexing=indexing) def min(x, axis=None, keepdims=False, initial=None): x = convert_to_tensor(x) # The TensorFlow numpy API implementation doesn't support `initial` so we # handle it manually here. if initial is not None: return tf.math.minimum( tfnp.min(x, axis=axis, keepdims=keepdims), initial ) # TensorFlow returns inf by default for an empty list, but for consistency # with other backends and the numpy API we want to throw in this case. if tf.executing_eagerly(): size_x = size(x) tf.assert_greater( size_x, tf.constant(0, dtype=size_x.dtype), message="Cannot compute the min of an empty tensor.", ) return tfnp.min(x, axis=axis, keepdims=keepdims) @sparse.elementwise_binary_union(tf.sparse.minimum, densify_mixed=True) def minimum(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.minimum(x1, x2) def mod(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) if dtype == "bool": dtype = "int32" x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.math.mod(x1, x2) def moveaxis(x, source, destination): return tfnp.moveaxis(x, source=source, destination=destination) def nan_to_num(x): x = convert_to_tensor(x) dtype = x.dtype dtype_as_dtype = tf.as_dtype(dtype) if dtype_as_dtype.is_integer or not dtype_as_dtype.is_numeric: return x # Replace NaN with 0 x = tf.where(tf.math.is_nan(x), tf.constant(0, dtype), x) # Replace positive infinitiy with dtype.max x = tf.where(tf.math.is_inf(x) & (x > 0), tf.constant(dtype.max, dtype), x) # Replace negative infinity with dtype.min x = tf.where(tf.math.is_inf(x) & (x < 0), tf.constant(dtype.min, dtype), x) return x def ndim(x): x = convert_to_tensor(x) return x.ndim def nonzero(x): x = convert_to_tensor(x) result = tf.unstack(tf.where(tf.cast(x, "bool")), x.shape.rank, axis=1) return tf.nest.map_structure( lambda indices: tf.cast(indices, "int32"), result ) def not_equal(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.not_equal(x1, x2) def ones_like(x, dtype=None): return tf.ones_like(x, dtype=dtype) def zeros_like(x, dtype=None): return tf.zeros_like(x, dtype=dtype) def outer(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) dtype = dtypes.result_type(x1.dtype, x2.dtype) x1 = tf.cast(x1, dtype) x2 = tf.cast(x2, dtype) return tf.reshape(x1, [-1, 1]) * tf.reshape(x2, [-1]) def pad(x, pad_width, mode="constant", constant_values=None): x = convert_to_tensor(x) kwargs = {} if constant_values is not None: if mode != "constant": raise ValueError( "Argument `constant_values` can only be " "provided when `mode == 'constant'`. " f"Received: mode={mode}" ) kwargs["constant_values"] = constant_values pad_width = convert_to_tensor(pad_width, "int32") return tf.pad(x, pad_width, mode.upper(), **kwargs) def prod(x, axis=None, keepdims=False, dtype=None): x = convert_to_tensor(x) if dtype is None: dtype = dtypes.result_type(x.dtype) if dtype == "bool": dtype = "int32" elif dtype in ("int8", "int16"): dtype = "int32" elif dtype in ("uint8", "uint16"): dtype = "uint32" x = tf.cast(x, dtype) return tf.reduce_prod(x, axis=axis, keepdims=keepdims) def _quantile(x, q, axis=None, method="linear", keepdims=False): # ref: tfp.stats.percentile # float64 is needed here and below, else we get the wrong index if the array # is huge along axis. q = tf.cast(q, "float64") # Move `axis` dims of `x` to the rightmost, call it `y`. if axis is None: y = tf.reshape(x, [-1]) else: x_ndims = len(x.shape) # _make_static_axis_non_negative_list axis = list(map(lambda x: x if x >= 0 else x + x_ndims, axis)) # _move_dims_to_flat_end other_dims = sorted(set(range(x_ndims)).difference(axis)) perm = other_dims + list(axis) x_permed = tf.transpose(a=x, perm=perm) if None not in x.shape: x_shape = list(x.shape) other_shape = [x_shape[i] for i in other_dims] end_shape = [math.prod([x_shape[i] for i in axis])] full_shape = other_shape + end_shape else: other_shape = tf.gather(tf.shape(x), tf.cast(other_dims, tf.int64)) full_shape = tf.concat([other_shape, [-1]], axis=0) y = tf.reshape(x_permed, shape=full_shape) # Sort (in ascending order) everything which allows multiple calls to sort # only once (under the hood) and use CSE. sorted_y = tf.sort(y, axis=-1, direction="ASCENDING") d = tf.cast(tf.shape(y)[-1], "float64") def _get_indices(method): """Get values of y at the indices implied by method.""" if method == "lower": indices = tf.math.floor((d - 1) * q) elif method == "higher": indices = tf.math.ceil((d - 1) * q) elif method == "nearest": indices = tf.round((d - 1) * q) # d - 1 will be distinct from d in int32, but not necessarily double. # So clip to avoid out of bounds errors. return tf.clip_by_value( tf.cast(indices, "int32"), 0, tf.shape(y)[-1] - 1 ) if method in ["nearest", "lower", "higher"]: gathered_y = tf.gather(sorted_y, _get_indices(method), axis=-1) elif method == "midpoint": gathered_y = 0.5 * ( tf.gather(sorted_y, _get_indices("lower"), axis=-1) + tf.gather(sorted_y, _get_indices("higher"), axis=-1) ) elif method == "linear": larger_y_idx = _get_indices("higher") exact_idx = (d - 1) * q # preserve_gradients smaller_y_idx = tf.maximum(larger_y_idx - 1, 0) larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(y)[-1] - 1) fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx fraction = tf.cast(fraction, y.dtype) gathered_y = ( tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) + tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction ) # Propagate NaNs if x.dtype in (tf.bfloat16, tf.float16, tf.float32, tf.float64): # Apparently tf.is_nan doesn't like other dtypes nan_batch_members = tf.reduce_any(tf.math.is_nan(x), axis=axis) right_rank_matched_shape = tf.pad( tf.shape(nan_batch_members), paddings=[[0, tf.rank(q)]], constant_values=1, ) nan_batch_members = tf.reshape( nan_batch_members, shape=right_rank_matched_shape ) gathered_y = tf.where(nan_batch_members, float("NaN"), gathered_y) # Expand dimensions if requested if keepdims: if axis is None: ones_vec = tf.ones(shape=[tf.rank(x) + tf.rank(q)], dtype="int32") gathered_y *= tf.ones(ones_vec, dtype=gathered_y.dtype) else: for i in sorted(axis): gathered_y = tf.expand_dims(gathered_y, axis=i) # rotate_transpose shift_value_static = tf.get_static_value(tf.rank(q)) ndims = tf.TensorShape(gathered_y.shape).rank if ndims < 2: return gathered_y shift_value_static = int( math.copysign(1, shift_value_static) * (builtins.abs(shift_value_static) % ndims) ) if shift_value_static == 0: return gathered_y perm = collections.deque(range(ndims)) perm.rotate(shift_value_static) return tf.transpose(a=gathered_y, perm=perm) def quantile(x, q, axis=None, method="linear", keepdims=False): if isinstance(axis, int): axis = [axis] x = convert_to_tensor(x) q = convert_to_tensor(q) compute_dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, compute_dtype) return _quantile(x, q, axis=axis, method=method, keepdims=keepdims) def ravel(x): x = convert_to_tensor(x) return tf.reshape(x, [-1]) @sparse.elementwise_unary def real(x): x = convert_to_tensor(x) return tf.math.real(x) @sparse.densifying_unary(np.inf) def reciprocal(x): x = convert_to_tensor(x) return tf.math.reciprocal(x) def repeat(x, repeats, axis=None): # tfnp.repeat has trouble with dynamic Tensors in compiled function. # tf.repeat does not. x = convert_to_tensor(x) # TODO: tf.repeat doesn't support uint16 if standardize_dtype(x.dtype) == "uint16": x = tf.cast(x, "uint32") return tf.cast(tf.repeat(x, repeats, axis=axis), "uint16") return tf.repeat(x, repeats, axis=axis) def reshape(x, newshape): x = convert_to_tensor(x) if isinstance(x, tf.SparseTensor): from keras.ops.operation_utils import compute_reshape_output_shape output_shape = compute_reshape_output_shape( x.shape, newshape, "newshape" ) output = tf.sparse.reshape(x, newshape) output.set_shape(output_shape) return output return tf.reshape(x, newshape) def roll(x, shift, axis=None): return tfnp.roll(x, shift, axis=axis) @sparse.elementwise_unary def sign(x): x = convert_to_tensor(x) ori_dtype = standardize_dtype(x.dtype) # TODO: tf.sign doesn't support uint8, uint16, uint32 if ori_dtype in ("uint8", "uint16", "uint32"): x = tf.cast(x, "int32") return tf.cast(tf.sign(x), ori_dtype) return tf.sign(x) @sparse.elementwise_unary def sin(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.sin(x) @sparse.elementwise_unary def sinh(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.sinh(x) def size(x): x = convert_to_tensor(x) return tf.size(x) def sort(x, axis=-1): x = convert_to_tensor(x) ori_dtype = standardize_dtype(x.dtype) # TODO: tf.sort doesn't support bool if ori_dtype == "bool": x = tf.cast(x, "int8") return tf.cast(tf.sort(x, axis=axis), ori_dtype) return tf.sort(x, axis=axis) def split(x, indices_or_sections, axis=0): if not isinstance(indices_or_sections, int): # `tf.split` requires `num_or_size_splits`, so we need to convert # `indices_or_sections` to the appropriate format. # The following implementation offers better compatibility for the # tensor argument `indices_or_sections` than original `tfnp.split`. total_size = x.shape[axis] indices_or_sections = convert_to_tensor(indices_or_sections) start_size = indices_or_sections[0:1] end_size = total_size - indices_or_sections[-1:] num_or_size_splits = tf.concat( [start_size, tfnp.diff(indices_or_sections), end_size], axis=0 ) else: num_or_size_splits = indices_or_sections return tf.split(x, num_or_size_splits, axis=axis) def stack(x, axis=0): dtype_set = set([getattr(a, "dtype", type(a)) for a in x]) if len(dtype_set) > 1: dtype = dtypes.result_type(*dtype_set) x = tf.nest.map_structure(lambda a: convert_to_tensor(a, dtype), x) return tf.stack(x, axis=axis) def std(x, axis=None, keepdims=False): x = convert_to_tensor(x) ori_dtype = standardize_dtype(x.dtype) if "int" in ori_dtype or ori_dtype == "bool": x = tf.cast(x, config.floatx()) return tf.math.reduce_std(x, axis=axis, keepdims=keepdims) def swapaxes(x, axis1, axis2): return tfnp.swapaxes(x, axis1=axis1, axis2=axis2) def take(x, indices, axis=None): if isinstance(indices, tf.SparseTensor): if x.dtype not in (tf.float16, tf.float32, tf.float64, tf.bfloat16): warnings.warn( "`take` with the TensorFlow backend does not support " f"`x.dtype={x.dtype}` when `indices` is a sparse tensor; " "densifying `indices`." ) return tfnp.take( x, convert_to_tensor(indices, sparse=False), axis=axis ) if axis is None: x = tf.reshape(x, (-1,)) elif axis != 0: warnings.warn( "`take` with the TensorFlow backend does not support " f"`axis={axis}` when `indices` is a sparse tensor; " "densifying `indices`." ) return tfnp.take( x, convert_to_tensor(indices, sparse=False), axis=axis ) output = tf.nn.safe_embedding_lookup_sparse( embedding_weights=tf.convert_to_tensor(x), sparse_ids=tf.sparse.expand_dims(indices, axis=-1), default_id=0, ) output.set_shape(indices.shape + output.shape[len(indices.shape) :]) return output return tfnp.take(x, indices, axis=axis) def take_along_axis(x, indices, axis=None): return tfnp.take_along_axis(x, indices, axis=axis) @sparse.elementwise_unary def tan(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.tan(x) @sparse.elementwise_unary def tanh(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, dtype) return tf.math.tanh(x) def tensordot(x1, x2, axes=2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) result_dtype = dtypes.result_type(x1.dtype, x2.dtype) # TODO: tf.tensordot only supports float types compute_dtype = dtypes.result_type(result_dtype, float) x1 = tf.cast(x1, compute_dtype) x2 = tf.cast(x2, compute_dtype) return tf.cast(tf.tensordot(x1, x2, axes=axes), dtype=result_dtype) @sparse.elementwise_unary def round(x, decimals=0): # `tfnp.round` requires `enable_numpy_behavior`, so we reimplement it. if decimals == 0: return tf.round(x) x_dtype = x.dtype if tf.as_dtype(x_dtype).is_integer: # int if decimals > 0: return x # temporarilaly convert to floats factor = tf.cast(math.pow(10, decimals), config.floatx()) x = tf.cast(x, config.floatx()) else: # float factor = tf.cast(math.pow(10, decimals), x.dtype) x = tf.multiply(x, factor) x = tf.round(x) x = tf.divide(x, factor) return tf.cast(x, x_dtype) def tile(x, repeats): # The TFNP implementation is buggy, we roll our own. x = convert_to_tensor(x) repeats = tf.reshape(convert_to_tensor(repeats, dtype="int32"), [-1]) repeats_size = tf.size(repeats) repeats = tf.pad( repeats, [[tf.maximum(x.shape.rank - repeats_size, 0), 0]], constant_values=1, ) x_shape = tf.pad( tf.shape(x), [[tf.maximum(repeats_size - x.shape.rank, 0), 0]], constant_values=1, ) x = tf.reshape(x, x_shape) return tf.tile(x, repeats) def trace(x, offset=0, axis1=0, axis2=1): x = convert_to_tensor(x) dtype = standardize_dtype(x.dtype) if dtype not in ("int64", "uint32", "uint64"): dtype = dtypes.result_type(dtype, "int32") return tfnp.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype) def tri(N, M=None, k=0, dtype=None): dtype = dtype or config.floatx() return tfnp.tri(N, M=M, k=k, dtype=dtype) def tril(x, k=0): x = convert_to_tensor(x) if k >= 0: return tf.linalg.band_part(x, -1, k) shape = tf.shape(x) rows, cols = shape[-2], shape[-1] i, j = tf.meshgrid(tf.range(rows), tf.range(cols), indexing="ij") mask = i >= j - k return tf.where(tf.broadcast_to(mask, shape), x, tf.zeros_like(x)) def triu(x, k=0): x = convert_to_tensor(x) if k <= 0: return tf.linalg.band_part(x, -k, -1) shape = tf.shape(x) rows, cols = shape[-2], shape[-1] i, j = tf.meshgrid(tf.range(rows), tf.range(cols), indexing="ij") mask = i <= j - k return tf.where(tf.broadcast_to(mask, shape), x, tf.zeros_like(x)) def vdot(x1, x2): x1 = convert_to_tensor(x1) x2 = convert_to_tensor(x2) result_dtype = dtypes.result_type(x1.dtype, x2.dtype) compute_dtype = dtypes.result_type(result_dtype, float) x1 = tf.cast(x1, compute_dtype) x2 = tf.cast(x2, compute_dtype) x1 = tf.reshape(x1, [-1]) x2 = tf.reshape(x2, [-1]) return tf.cast(dot(x1, x2), result_dtype) def vstack(xs): dtype_set = set([getattr(x, "dtype", type(x)) for x in xs]) if len(dtype_set) > 1: dtype = dtypes.result_type(*dtype_set) xs = tf.nest.map_structure(lambda x: convert_to_tensor(x, dtype), xs) return tf.concat(xs, axis=0) def where(condition, x1, x2): condition = tf.cast(condition, "bool") if x1 is not None and x2 is not None: if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.where(condition, x1, x2) if x1 is None and x2 is None: return nonzero(condition) raise ValueError( "`x1` and `x2` either both should be `None`" " or both should have non-None value." ) @sparse.elementwise_division def divide(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), float, ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.divide(x1, x2) def divide_no_nan(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), float, ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.math.divide_no_nan(x1, x2) def true_divide(x1, x2): return divide(x1, x2) def power(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) # TODO: tf.pow doesn't support uint* types if "uint" in dtype: x1 = convert_to_tensor(x1, "int32") x2 = convert_to_tensor(x2, "int32") return tf.cast(tf.pow(x1, x2), dtype) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.pow(x1, x2) @sparse.elementwise_unary def negative(x): return tf.negative(x) @sparse.elementwise_unary def square(x): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "bool": x = tf.cast(x, "int32") return tf.square(x) @sparse.elementwise_unary def sqrt(x): x = convert_to_tensor(x) dtype = ( config.floatx() if standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) x = tf.cast(x, dtype) return tf.math.sqrt(x) def squeeze(x, axis=None): if isinstance(x, tf.SparseTensor): static_shape = x.shape.as_list() if axis is not None: if static_shape[axis] != 1: raise ValueError( f"Cannot squeeze axis {axis}, because the " "dimension is not 1." ) if axis < 0: axis += len(static_shape) dynamic_shape = tf.shape(x) new_shape = [] gather_indices = [] for i, dim in enumerate(static_shape): if not (dim == 1 if axis is None else i == axis): new_shape.append(dim if dim is not None else dynamic_shape[i]) gather_indices.append(i) new_indices = tf.gather(x.indices, gather_indices, axis=1) return tf.SparseTensor(new_indices, x.values, tuple(new_shape)) return tf.squeeze(x, axis=axis) def transpose(x, axes=None): if isinstance(x, tf.SparseTensor): from keras.ops.operation_utils import compute_transpose_output_shape output = tf.sparse.transpose(x, perm=axes) output.set_shape(compute_transpose_output_shape(x.shape, axes)) return output return tf.transpose(x, perm=axes) def var(x, axis=None, keepdims=False): x = convert_to_tensor(x) compute_dtype = dtypes.result_type(x.dtype, "float32") result_dtype = dtypes.result_type(x.dtype, float) x = tf.cast(x, compute_dtype) return tf.cast( tf.math.reduce_variance(x, axis=axis, keepdims=keepdims), result_dtype, ) def sum(x, axis=None, keepdims=False): x = convert_to_tensor(x) dtype = standardize_dtype(x.dtype) # follow jax's rule if dtype in ("bool", "int8", "int16"): dtype = "int32" elif dtype in ("uint8", "uint16"): dtype = "uint32" x = tf.cast(x, dtype) return tf.reduce_sum(x, axis=axis, keepdims=keepdims) def eye(N, M=None, k=0, dtype=None): dtype = dtype or config.floatx() return tfnp.eye(N, M=M, k=k, dtype=dtype) def floor_divide(x1, x2): if not isinstance(x1, (int, float)): x1 = convert_to_tensor(x1) if not isinstance(x2, (int, float)): x2 = convert_to_tensor(x2) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1 = convert_to_tensor(x1, dtype) x2 = convert_to_tensor(x2, dtype) return tf.math.floordiv(x1, x2) def logical_xor(x1, x2): x1 = tf.cast(x1, "bool") x2 = tf.cast(x2, "bool") return tf.math.logical_xor(x1, x2)
keras/keras/backend/tensorflow/numpy.py/0
{ "file_path": "keras/keras/backend/tensorflow/numpy.py", "repo_id": "keras", "token_count": 27224 }
139
import torch from keras.backend import config from keras.backend import standardize_dtype from keras.backend.common import dtypes from keras.backend.torch.core import cast from keras.backend.torch.core import convert_to_tensor def cholesky(x): return torch.cholesky(x) def det(x): return torch.det(x) def eig(x): return torch.linalg.eig(x) def inv(x): return torch.linalg.inv(x) def lu_factor(x): LU, pivots = torch.linalg.lu_factor(x) # torch retuns pivots with 1-based indexing return LU, pivots - 1 def norm(x, ord=None, axis=None, keepdims=False): x = convert_to_tensor(x) if standardize_dtype(x.dtype) == "int64": dtype = config.floatx() else: dtype = dtypes.result_type(x.dtype, float) x = cast(x, dtype) return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims) def qr(x, mode="reduced"): if mode not in {"reduced", "complete"}: raise ValueError( "`mode` argument value not supported. " "Expected one of {'reduced', 'complete'}. " f"Received: mode={mode}" ) return torch.linalg.qr(x, mode=mode) def solve(a, b): return torch.linalg.solve(a, b) def solve_triangular(a, b, lower=False): if b.ndim == a.ndim - 1: b = torch.unsqueeze(b, axis=-1) return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze( axis=-1 ) return torch.linalg.solve_triangular(a, b, upper=not lower) def svd(x, full_matrices=True, compute_uv=True): if not compute_uv: raise NotImplementedError( "`compute_uv=False` is not supported for torch backend." ) return torch.linalg.svd(x, full_matrices=full_matrices)
keras/keras/backend/torch/linalg.py/0
{ "file_path": "keras/keras/backend/torch/linalg.py", "repo_id": "keras", "token_count": 779 }
140
import torch import torch._dynamo as dynamo import torch.nn.functional as tnn from keras.backend.config import floatx from keras.backend.torch.core import convert_to_tensor from keras.backend.torch.core import get_device from keras.backend.torch.core import to_torch_dtype from keras.random.seed_generator import SeedGenerator from keras.random.seed_generator import draw_seed from keras.random.seed_generator import make_default_seed # torch.Generator not supported with dynamo # see: https://github.com/pytorch/pytorch/issues/88576 @dynamo.disable() def torch_seed_generator(seed): first_seed, second_seed = draw_seed(seed) device = get_device() if device == "meta": # Generator is not supported by the meta device. return None generator = torch.Generator(device=get_device()) generator.manual_seed(int(first_seed + second_seed)) return generator def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): dtype = dtype or floatx() dtype = to_torch_dtype(dtype) # Do not use generator during symbolic execution. if get_device() == "meta": return torch.normal( mean, stddev, size=shape, dtype=dtype, device=get_device() ) generator = torch_seed_generator(seed) return torch.normal( mean, stddev, size=shape, generator=generator, dtype=dtype, device=get_device(), ) def categorical(logits, num_samples, dtype="int32", seed=None): logits = convert_to_tensor(logits) dtype = to_torch_dtype(dtype) probs = torch.softmax(logits, dim=-1) # Do not use generator during symbolic execution. if get_device() == "meta": return torch.multinomial( probs, num_samples, replacement=True, ).type(dtype) generator = torch_seed_generator(seed) return torch.multinomial( probs, num_samples, replacement=True, generator=generator, ).type(dtype) def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): dtype = dtype or floatx() dtype = to_torch_dtype(dtype) requested_shape = shape if len(requested_shape) == 0: shape = (1,) # Do not use generator during symbolic execution. if get_device() == "meta": rand_tensor = torch.rand(size=shape, dtype=dtype, device=get_device()) else: generator = torch_seed_generator(seed) rand_tensor = torch.rand( size=shape, generator=generator, dtype=dtype, device=get_device() ) output = (maxval - minval) * rand_tensor + minval if len(requested_shape) == 0: return output[0] return output def randint(shape, minval, maxval, dtype="int32", seed=None): dtype = to_torch_dtype(dtype) # Do not use generator during symbolic execution. if get_device() == "meta": return torch.randint( low=minval, high=maxval, size=shape, dtype=dtype, device=get_device(), ) generator = torch_seed_generator(seed) return torch.randint( low=minval, high=maxval, size=shape, generator=generator, dtype=dtype, device=get_device(), ) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): # Take a larger standard normal dist, discard values outside 2 * stddev # Offset by mean and stddev x = normal(tuple(shape) + (4,), mean=0, stddev=1, dtype=dtype, seed=seed) valid = (x > -2) & (x < 2) indexes = valid.max(-1, keepdim=True)[1] trunc_x = torch.empty(shape, device=get_device()) trunc_x.data.copy_(x.gather(-1, indexes).squeeze(-1)) trunc_x.data.mul_(stddev).add_(mean) return trunc_x def _get_concrete_noise_shape(inputs, noise_shape): if noise_shape is None: return inputs.shape concrete_inputs_shape = inputs.shape concrete_noise_shape = [] for i, value in enumerate(noise_shape): concrete_noise_shape.append( concrete_inputs_shape[i] if value is None else value ) return concrete_noise_shape def dropout(inputs, rate, noise_shape=None, seed=None): if ( seed is not None and not (isinstance(seed, SeedGenerator) and seed._initial_seed is None) or noise_shape is not None ): keep_prob = 1.0 - rate noise_shape = _get_concrete_noise_shape(inputs, noise_shape) keep_prob_matrix = torch.full( noise_shape, keep_prob, device=get_device() ) generator = torch_seed_generator(seed) # Do not use generator during symbolic execution. if get_device() == "meta": mask = torch.bernoulli(keep_prob_matrix) else: mask = torch.bernoulli(keep_prob_matrix, generator=generator) mask = mask.bool() mask = torch.broadcast_to(mask, inputs.shape) return torch.where( mask, inputs / keep_prob, torch.zeros_like(inputs, dtype=inputs.dtype), ) # Fast path, unseeded (since torch doesn't support seeding dropout!!!!) # Using the above implementation is possible, but much slower. return torch.nn.functional.dropout( inputs, p=rate, training=True, inplace=False ) def shuffle(x, axis=0, seed=None): # Ref: https://github.com/pytorch/pytorch/issues/71409 x = convert_to_tensor(x) # Get permutation indices # Do not use generator during symbolic execution. if get_device() == "meta": row_perm = torch.rand(x.shape[: axis + 1], device=get_device()).argsort( axis ) else: generator = torch_seed_generator(seed) row_perm = torch.rand( x.shape[: axis + 1], generator=generator, device=get_device() ).argsort(axis) for _ in range(x.ndim - axis - 1): row_perm.unsqueeze_(-1) # Reformat this for the gather operation row_perm = row_perm.repeat( *[1 for _ in range(axis + 1)], *(x.shape[axis + 1 :]) ) return x.gather(axis, row_perm) def gamma(shape, alpha, dtype=None, seed=None): dtype = dtype or floatx() dtype = to_torch_dtype(dtype) alpha = torch.broadcast_to(convert_to_tensor(alpha), shape) beta = torch.ones(shape, device=get_device()) prev_rng_state = torch.random.get_rng_state() first_seed, second_seed = draw_seed(seed) torch.manual_seed(first_seed + second_seed) gamma_distribution = torch.distributions.gamma.Gamma(alpha, beta) sample = gamma_distribution.sample().type(dtype) torch.random.set_rng_state(prev_rng_state) return sample def binomial(shape, counts, probabilities, dtype=None, seed=None): dtype = dtype or floatx() dtype = to_torch_dtype(dtype) counts = torch.broadcast_to(convert_to_tensor(counts), shape) probabilities = torch.broadcast_to(convert_to_tensor(probabilities), shape) prev_rng_state = torch.random.get_rng_state() first_seed, second_seed = draw_seed(seed) torch.manual_seed(first_seed + second_seed) binomial_distribution = torch.distributions.binomial.Binomial( total_count=counts, probs=probabilities ) sample = binomial_distribution.sample().type(dtype) torch.random.set_rng_state(prev_rng_state) return sample def beta(shape, alpha, beta, dtype=None, seed=None): dtype = dtype or floatx() dtype = to_torch_dtype(dtype) alpha = torch.broadcast_to(convert_to_tensor(alpha), shape) beta = torch.broadcast_to(convert_to_tensor(beta), shape) prev_rng_state = torch.random.get_rng_state() first_seed, second_seed = draw_seed(seed) torch.manual_seed(first_seed + second_seed) beta_distribution = torch.distributions.beta.Beta( concentration1=alpha, concentration0=beta ) sample = beta_distribution.sample().type(dtype) torch.random.set_rng_state(prev_rng_state) return sample
keras/keras/backend/torch/random.py/0
{ "file_path": "keras/keras/backend/torch/random.py", "repo_id": "keras", "token_count": 3362 }
141
import numpy as np import pytest from keras import testing from keras.layers.activations import elu class ELUTest(testing.TestCase): def test_config(self): elu_layer = elu.ELU() self.run_class_serialization_test(elu_layer) @pytest.mark.requires_trainable_backend def test_elu(self): self.run_layer_test( elu.ELU, init_kwargs={}, input_shape=(2, 3, 4), supports_masking=True, ) def test_correctness(self): def np_elu(x, alpha=1.0): return (x > 0) * x + (x <= 0) * alpha * (np.exp(x) - 1) x = np.random.random((2, 2, 5)) elu_layer = elu.ELU() self.assertAllClose(elu_layer(x), np_elu(x)) elu_layer = elu.ELU(alpha=0.7) self.assertAllClose(elu_layer(x), np_elu(x, alpha=0.7))
keras/keras/layers/activations/elu_test.py/0
{ "file_path": "keras/keras/layers/activations/elu_test.py", "repo_id": "keras", "token_count": 428 }
142
import numpy as np import pytest from absl.testing import parameterized from keras import backend from keras import layers from keras import models from keras import testing def np_dot(a, b, axes): if isinstance(axes, int): axes = (axes, axes) axes = [axis if axis < 0 else axis - 1 for axis in axes] res = np.stack([np.tensordot(a[i], b[i], axes) for i in range(a.shape[0])]) if len(res.shape) == 1: res = np.expand_dims(res, axis=1) return res TEST_PARAMETERS = [ { "testcase_name": "add", "layer_class": layers.Add, "np_op": np.add, }, { "testcase_name": "substract", "layer_class": layers.Subtract, "np_op": np.subtract, }, { "testcase_name": "minimum", "layer_class": layers.Minimum, "np_op": np.minimum, }, { "testcase_name": "maximum", "layer_class": layers.Maximum, "np_op": np.maximum, }, { "testcase_name": "multiply", "layer_class": layers.Multiply, "np_op": np.multiply, }, { "testcase_name": "average", "layer_class": layers.Average, "np_op": lambda a, b: np.multiply(np.add(a, b), 0.5), }, { "testcase_name": "concat", "layer_class": layers.Concatenate, "np_op": lambda a, b, **kwargs: np.concatenate((a, b), **kwargs), "init_kwargs": {"axis": -1}, "expected_output_shape": (2, 4, 10), }, { "testcase_name": "dot_2d", "layer_class": layers.Dot, "np_op": np_dot, "init_kwargs": {"axes": -1}, "input_shape": (2, 4), "expected_output_shape": (2, 1), "skip_mask_test": True, }, { "testcase_name": "dot_3d", "layer_class": layers.Dot, "np_op": np_dot, "init_kwargs": {"axes": -1}, "expected_output_shape": (2, 4, 4), "skip_mask_test": True, }, ] @pytest.mark.requires_trainable_backend class MergingLayersTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters(TEST_PARAMETERS) def test_basic( self, layer_class, init_kwargs={}, input_shape=(2, 4, 5), expected_output_shape=(2, 4, 5), **kwargs, ): self.run_layer_test( layer_class, init_kwargs=init_kwargs, input_shape=(input_shape, input_shape), expected_output_shape=expected_output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, ) @parameterized.named_parameters(TEST_PARAMETERS) def test_correctness_static( self, layer_class, np_op, init_kwargs={}, input_shape=(2, 4, 5), expected_output_shape=(2, 4, 5), skip_mask_test=False, ): batch_size = input_shape[0] shape = input_shape[1:] x1 = np.random.rand(*input_shape) x2 = np.random.rand(*input_shape) x3 = np_op(x1, x2, **init_kwargs) input_1 = layers.Input(shape=shape, batch_size=batch_size) input_2 = layers.Input(shape=shape, batch_size=batch_size) layer = layer_class(**init_kwargs) out = layer([input_1, input_2]) model = models.Model([input_1, input_2], out) res = model([x1, x2]) self.assertEqual(res.shape, expected_output_shape) self.assertAllClose(res, x3, atol=1e-4) self.assertIsNone(layer.compute_mask([input_1, input_2], [None, None])) if not skip_mask_test: self.assertTrue( np.all( backend.convert_to_numpy( layer.compute_mask( [input_1, input_2], [backend.Variable(x1), backend.Variable(x2)], ) ) ) ) @parameterized.named_parameters(TEST_PARAMETERS) def test_correctness_dynamic( self, layer_class, np_op, init_kwargs={}, input_shape=(2, 4, 5), expected_output_shape=(2, 4, 5), skip_mask_test=False, ): shape = input_shape[1:] x1 = np.random.rand(*input_shape) x2 = np.random.rand(*input_shape) x3 = np_op(x1, x2, **init_kwargs) input_1 = layers.Input(shape=shape) input_2 = layers.Input(shape=shape) layer = layer_class(**init_kwargs) out = layer([input_1, input_2]) model = models.Model([input_1, input_2], out) res = model([x1, x2]) self.assertEqual(res.shape, expected_output_shape) self.assertAllClose(res, x3, atol=1e-4) self.assertIsNone(layer.compute_mask([input_1, input_2], [None, None])) if not skip_mask_test: self.assertTrue( np.all( backend.convert_to_numpy( layer.compute_mask( [input_1, input_2], [backend.Variable(x1), backend.Variable(x2)], ) ) ) ) @parameterized.named_parameters(TEST_PARAMETERS) def test_errors( self, layer_class, init_kwargs={}, input_shape=(2, 4, 5), skip_mask_test=False, **kwargs, ): if skip_mask_test: pytest.skip("Masking not supported") batch_size = input_shape[0] shape = input_shape[1:] x1 = np.random.rand(*input_shape) x1 = np.random.rand(batch_size, *shape) input_1 = layers.Input(shape=shape, batch_size=batch_size) input_2 = layers.Input(shape=shape, batch_size=batch_size) layer = layer_class(**init_kwargs) with self.assertRaisesRegex(ValueError, "`mask` should be a list."): layer.compute_mask([input_1, input_2], x1) with self.assertRaisesRegex(ValueError, "`inputs` should be a list."): layer.compute_mask(input_1, [None, None]) with self.assertRaisesRegex( ValueError, " should have the same length." ): layer.compute_mask([input_1, input_2], [None]) def test_subtract_layer_inputs_length_errors(self): shape = (4, 5) input_1 = layers.Input(shape=shape) input_2 = layers.Input(shape=shape) input_3 = layers.Input(shape=shape) with self.assertRaisesRegex( ValueError, "layer should be called on exactly 2 inputs" ): layers.Subtract()([input_1, input_2, input_3]) with self.assertRaisesRegex( ValueError, "layer should be called on exactly 2 inputs" ): layers.Subtract()([input_1]) def test_dot_higher_dim(self): a_shape = (1, 3, 2) b_shape = (1, 1, 2, 3) # Test symbolic call a = layers.Input(batch_shape=a_shape) b = layers.Input(batch_shape=b_shape) c = layers.Dot(axes=(-2, -1))([a, b]) self.assertEqual(c.shape, (1, 2, 1, 2)) a = np.random.random(a_shape) b = np.random.random(b_shape) c = layers.Dot(axes=(-2, -1))([a, b]) self.assertEqual(backend.standardize_shape(c.shape), (1, 2, 1, 2)) @parameterized.named_parameters(TEST_PARAMETERS) @pytest.mark.skipif( not backend.SUPPORTS_SPARSE_TENSORS, reason="Backend does not support sparse tensors.", ) def test_sparse( self, layer_class, np_op, init_kwargs={}, input_shape=(2, 4, 5), expected_output_shape=(2, 4, 5), **kwargs, ): self.run_layer_test( layer_class, init_kwargs=init_kwargs, input_shape=[input_shape, input_shape], input_sparse=True, expected_output_shape=expected_output_shape, expected_output_sparse=True, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, run_training_check=False, run_mixed_precision_check=False, ) layer = layer_class(**init_kwargs) # Merging a sparse tensor with a dense tensor, or a dense tensor with a # sparse tensor produces a dense tensor if backend.backend() == "tensorflow": import tensorflow as tf x1 = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 3)) x3 = tf.SparseTensor([[0, 0], [1, 1]], [4.0, 5.0], (2, 3)) sparse_class = tf.SparseTensor elif backend.backend() == "jax": import jax.experimental.sparse as jax_sparse # Use n_batch of 1 to be compatible with all ops. x1 = jax_sparse.BCOO(([[1.0, 2.0]], [[[0], [2]]]), shape=(2, 3)) x3 = jax_sparse.BCOO(([[4.0, 5.0]], [[[0], [1]]]), shape=(2, 3)) sparse_class = jax_sparse.JAXSparse else: self.fail(f"Sparse is unsupported with backend {backend.backend()}") x1_np = backend.convert_to_numpy(x1) x2 = np.random.rand(2, 3) self.assertAllClose(layer([x1, x2]), np_op(x1_np, x2, **init_kwargs)) self.assertAllClose(layer([x2, x1]), np_op(x2, x1_np, **init_kwargs)) # Merging a sparse tensor with a sparse tensor produces a sparse tensor x3_np = backend.convert_to_numpy(x3) self.assertIsInstance(layer([x1, x3]), sparse_class) self.assertAllClose(layer([x1, x3]), np_op(x1_np, x3_np, **init_kwargs))
keras/keras/layers/merging/merging_test.py/0
{ "file_path": "keras/keras/layers/merging/merging_test.py", "repo_id": "keras", "token_count": 5052 }
143
import numpy as np import pytest from keras import backend from keras import layers from keras import testing class AlphaDropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_alpha_dropout_basics(self): self.run_layer_test( layers.AlphaDropout, init_kwargs={ "rate": 0.2, }, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=1, expected_num_losses=0, supports_masking=True, ) def test_alpha_dropout_correctness(self): inputs = np.ones((20, 500)).astype("float32") layer = layers.AlphaDropout(0.3, seed=1337) outputs = layer(inputs, training=True) self.assertAllClose( np.std(backend.convert_to_numpy(outputs)), 1.0, atol=1e-1 ) def test_alpha_dropout_partial_noise_shape_dynamic(self): inputs = np.ones((20, 5, 10)) layer = layers.AlphaDropout(0.5, noise_shape=(None, 1, None)) outputs = layer(inputs, training=True) self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :]) def test_alpha_dropout_partial_noise_shape_static(self): inputs = np.ones((20, 5, 10)) layer = layers.AlphaDropout(0.5, noise_shape=(20, 1, 10)) outputs = layer(inputs, training=True) self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :]) def test_alpha_dropout_negative_rate(self): with self.assertRaisesRegex( ValueError, "Invalid value received for argument `rate`. " "Expected a float value between 0 and 1.", ): _ = layers.AlphaDropout(rate=-0.5) def test_alpha_dropout_rate_greater_than_one(self): with self.assertRaisesRegex( ValueError, "Invalid value received for argument `rate`. " "Expected a float value between 0 and 1.", ): _ = layers.AlphaDropout(rate=1.5)
keras/keras/layers/regularization/alpha_dropout_test.py/0
{ "file_path": "keras/keras/layers/regularization/alpha_dropout_test.py", "repo_id": "keras", "token_count": 982 }
144
import math from keras import backend from keras import ops from keras.api_export import keras_export from keras.backend.common.keras_tensor import KerasTensor from keras.layers.input_spec import InputSpec from keras.layers.layer import Layer @keras_export("keras.layers.Flatten") class Flatten(Layer): """Flattens the input. Does not affect the batch size. Note: If inputs are shaped `(batch,)` without a feature axis, then flattening adds an extra channel dimension and output shape is `(batch, 1)`. Args: data_format: A string, one of `"channels_last"` (default) or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, ..., channels)` while `"channels_first"` corresponds to inputs with shape `(batch, channels, ...)`. When unspecified, uses `image_data_format` value found in your Keras config file at `~/.keras/keras.json` (if exists). Defaults to `"channels_last"`. Example: >>> x = keras.Input(shape=(10, 64)) >>> y = keras.layers.Flatten()(x) >>> y.shape (None, 640) """ def __init__(self, data_format=None, **kwargs): super().__init__(**kwargs) self.data_format = backend.standardize_data_format(data_format) self.input_spec = InputSpec(min_ndim=1) self._channels_first = self.data_format == "channels_first" def call(self, inputs): input_shape = inputs.shape rank = len(input_shape) if self._channels_first and rank > 1: # Switch to channels-last format. inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1)) output_shape = tuple( dim if dim is not None else -1 for dim in self.compute_output_shape(input_shape) ) return ops.reshape(inputs, output_shape) def compute_output_shape(self, input_shape): non_batch_dims = input_shape[1:] if len(non_batch_dims) == 0: flattened_dim = 1 elif any(d is None for d in non_batch_dims): # NB: we cannot use the shorter `None in non_batch_dims` here b/c # torchdynamo errors when calling `__contains__` op with # a constant (in this case `None`) operand since it assumes # that the elements in the collection are also `ConstantVariable`s # but tensor shapes can be `SymNodeVariable`s (e.g. `SymInt`) flattened_dim = None else: flattened_dim = math.prod(non_batch_dims) return (input_shape[0], flattened_dim) def compute_output_spec(self, inputs): output_shape = self.compute_output_shape(inputs.shape) return KerasTensor( shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse ) def get_config(self): config = {"data_format": self.data_format} base_config = super().get_config() return {**base_config, **config}
keras/keras/layers/reshaping/flatten.py/0
{ "file_path": "keras/keras/layers/reshaping/flatten.py", "repo_id": "keras", "token_count": 1273 }
145
from keras.api_export import keras_export from keras.losses.losses import binary_crossentropy from keras.losses.losses import categorical_crossentropy from keras.losses.losses import kl_divergence from keras.losses.losses import poisson from keras.losses.losses import sparse_categorical_crossentropy from keras.metrics import reduction_metrics @keras_export("keras.metrics.KLDivergence") class KLDivergence(reduction_metrics.MeanMetricWrapper): """Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`. Formula: ```python metric = y_true * log(y_true / y_pred) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = keras.metrics.KLDivergence() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result() 0.45814306 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result() 0.9162892 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[keras.metrics.KLDivergence()]) ``` """ def __init__(self, name="kl_divergence", dtype=None): super().__init__(fn=kl_divergence, name=name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_export("keras.metrics.Poisson") class Poisson(reduction_metrics.MeanMetricWrapper): """Computes the Poisson metric between `y_true` and `y_pred`. Formula: ```python metric = y_pred - y_true * log(y_pred) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: Standalone usage: >>> m = keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.49999997 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 0.99999994 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[keras.metrics.Poisson()]) ``` """ def __init__(self, name="poisson", dtype=None): super().__init__(fn=poisson, name=name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_export("keras.metrics.BinaryCrossentropy") class BinaryCrossentropy(reduction_metrics.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are only two label classes (0 and 1). Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in `[0, 1]`. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of 0.1 for label "0" and 0.9 for label "1". Examples: Standalone usage: >>> m = keras.metrics.BinaryCrossentropy() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result() 0.81492424 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result() 0.9162905 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras.metrics.BinaryCrossentropy()]) ``` """ def __init__( self, name="binary_crossentropy", dtype=None, from_logits=False, label_smoothing=0, ): super().__init__( binary_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing, ) self.from_logits = from_logits self.label_smoothing = label_smoothing # Metric should be minimized during optimization. self._direction = "down" def get_config(self): return { "name": self.name, "dtype": self.dtype, "from_logits": self.from_logits, "label_smoothing": self.label_smoothing, } @keras_export("keras.metrics.CategoricalCrossentropy") class CategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are multiple label classes (2 or more). It assumes that labels are one-hot encoded, e.g., when labels values are `[2, 0, 1]`, then `y_true` is `[[0, 0, 1], [1, 0, 0], [0, 1, 0]]`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in `[0, 1]`. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of 0.1 for label "0" and 0.9 for label "1". axis: (Optional) Defaults to `-1`. The dimension along which entropy is computed. Examples: Standalone usage: >>> # EPSILON = 1e-7, y = y_true, y` = y_pred >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) >>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(y'), axis = -1) >>> # = -((log 0.95), (log 0.1)) >>> # = [0.051, 2.302] >>> # Reduced xent = (0.051 + 2.302) / 2 >>> m = keras.metrics.CategoricalCrossentropy() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result() 1.1769392 >>> m.reset_state() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=np.array([0.3, 0.7])) >>> m.result() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras.metrics.CategoricalCrossentropy()]) ``` """ def __init__( self, name="categorical_crossentropy", dtype=None, from_logits=False, label_smoothing=0, axis=-1, ): super().__init__( categorical_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) self.from_logits = from_logits self.label_smoothing = label_smoothing self.axis = axis # Metric should be minimized during optimization. self._direction = "down" def get_config(self): return { "name": self.name, "dtype": self.dtype, "from_logits": self.from_logits, "label_smoothing": self.label_smoothing, "axis": self.axis, } @keras_export("keras.metrics.SparseCategoricalCrossentropy") class SparseCategoricalCrossentropy(reduction_metrics.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. Use this crossentropy metric when there are two or more label classes. It expects labels to be provided as integers. If you want to provide labels that are one-hot encoded, please use the `CategoricalCrossentropy` metric instead. There should be `num_classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. axis: (Optional) Defaults to `-1`. The dimension along which entropy is computed. Examples: Standalone usage: >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] >>> # logits = log(y_pred) >>> # softmax = exp(logits) / sum(exp(logits), axis=-1) >>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(softmax), 1) >>> # log(softmax) = [[-2.9957, -0.0513, -16.1181], >>> # [-2.3026, -0.2231, -2.3026]] >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] >>> # xent = [0.0513, 2.3026] >>> # Reduced xent = (0.0513 + 2.3026) / 2 >>> m = keras.metrics.SparseCategoricalCrossentropy() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result() 1.1769392 >>> m.reset_state() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=np.array([0.3, 0.7])) >>> m.result() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras.metrics.SparseCategoricalCrossentropy()]) ``` """ def __init__( self, name="sparse_categorical_crossentropy", dtype=None, from_logits=False, axis=-1, ): super().__init__( sparse_categorical_crossentropy, name=name, dtype=dtype, from_logits=from_logits, axis=axis, ) self.from_logits = from_logits self.axis = axis # Metric should be minimized during optimization. self._direction = "down" def get_config(self): return { "name": self.name, "dtype": self.dtype, "from_logits": self.from_logits, "axis": self.axis, }
keras/keras/metrics/probabilistic_metrics.py/0
{ "file_path": "keras/keras/metrics/probabilistic_metrics.py", "repo_id": "keras", "token_count": 4913 }
146
import numpy as np from keras import testing from keras.saving import saving_lib_test class VariableMappingTest(testing.TestCase): def test_basics(self): model = saving_lib_test._get_basic_functional_model() model.optimizer.build(model.trainable_variables) variable_map = model._get_variable_map() self.assertIn("first_dense/kernel", variable_map) self.assertIn("second_dense/bias", variable_map) self.assertIn("adam/learning_rate", variable_map) model = saving_lib_test._get_basic_sequential_model() model.build((None, 1)) model.optimizer.build(model.trainable_variables) variable_map = model._get_variable_map() self.assertIn("sequential/dense_1/bias", variable_map) self.assertIn("adam/learning_rate", variable_map) model = saving_lib_test._get_subclassed_model() model(np.ones((1, 1))) model.optimizer.build(model.trainable_variables) variable_map = model._get_variable_map() self.assertIn("custom_model_x/my_dense_1/dense/kernel", variable_map) self.assertIn("custom_model_x/my_dense_1/my_dict_weight", variable_map) self.assertIn( "custom_model_x/my_dense_1/my_additional_weight", variable_map ) self.assertIn("adam/learning_rate", variable_map)
keras/keras/models/variable_mapping_test.py/0
{ "file_path": "keras/keras/models/variable_mapping_test.py", "repo_id": "keras", "token_count": 564 }
147
""" MANIFEST: abs absolute add all amax amin append arange arccos arccosh arcsin arcsinh arctan arctan2 arctanh argmax argmin argsort array average bincount broadcast_to ceil clip concatenate conj conjugate copy cos cosh count_nonzero cross cumprod cumsum diag diagonal diff digitize divide dot dtype einsum empty equal exp expand_dims expm1 eye flip floor full full_like greater greater_equal hstack identity imag interp isclose isfinite isinf isnan less less_equal linspace log log10 log1p log2 logaddexp logical_and logical_not logical_or logspace matmul max maximum mean median meshgrid mgrid min minimum mod moveaxis multiply nan_to_num ndim nonzero not_equal ones ones_like outer pad percentile power prod quantile ravel real reciprocal repeat reshape roll round sign sin sinh size sort split sqrt square squeeze stack std subtract sum swapaxes take take_along_axis tan tanh tensordot tile trace transpose tri tril triu true_divide vdot vstack where zeros zeros_like """ import builtins import re import numpy as np from keras import backend from keras.api_export import keras_export from keras.backend import KerasTensor from keras.backend import any_symbolic_tensors from keras.backend.common import dtypes from keras.ops import operation_utils from keras.ops.operation import Operation from keras.ops.operation_utils import broadcast_shapes from keras.ops.operation_utils import reduce_shape def shape_equal(shape1, shape2, axis=None, allow_none=True): """Check if two shapes are equal. Args: shape1: A list or tuple of integers for first shape to be compared. shape2: A list or tuple of integers for second shape to be compared. axis: An integer, list, or tuple of integers (optional): Axes to ignore during comparison. Defaults to `None`. allow_none (bool, optional): If `True`, allows `None` in a shape to match any value in the corresponding position of the other shape. Defaults to `True`. Returns: bool: `True` if shapes are considered equal based on the criteria, `False` otherwise. Examples: >>> shape_equal((32, 64, 128), (32, 64, 128)) True >>> shape_equal((32, 64, 128), (32, 64, 127)) False >>> shape_equal((32, 64, None), (32, 64, 128), allow_none=True) True >>> shape_equal((32, 64, None), (32, 64, 128), allow_none=False) False >>> shape_equal((32, 64, 128), (32, 63, 128), axis=1) True >>> shape_equal((32, 64, 128), (32, 63, 127), axis=(1, 2)) True >>> shape_equal((32, 64, 128), (32, 63, 127), axis=[1,2]) True >>> shape_equal((32, 64), (32, 64, 128)) False """ if len(shape1) != len(shape2): return False shape1 = list(shape1) shape2 = list(shape2) if axis is not None: if isinstance(axis, int): axis = [axis] for ax in axis: shape1[ax] = -1 shape2[ax] = -1 if allow_none: for i in range(len(shape1)): if shape1[i] is None: shape1[i] = shape2[i] if shape2[i] is None: shape2[i] = shape1[i] return shape1 == shape2 class Absolute(Operation): def call(self, x): return backend.numpy.absolute(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.absolute", "keras.ops.numpy.absolute"]) def absolute(x): """Compute the absolute value element-wise. `keras.ops.abs` is a shorthand for this function. Args: x: Input tensor. Returns: An array containing the absolute value of each element in `x`. Example: >>> x = keras.ops.convert_to_tensor([-1.2, 1.2]) >>> keras.ops.absolute(x) array([1.2, 1.2], dtype=float32) """ if any_symbolic_tensors((x,)): return Absolute().symbolic_call(x) return backend.numpy.absolute(x) class Abs(Absolute): pass @keras_export(["keras.ops.abs", "keras.ops.numpy.abs"]) def abs(x): """Shorthand for `keras.ops.absolute`.""" return absolute(x) class Add(Operation): def call(self, x1, x2): return backend.numpy.add(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1_sparse = getattr(x1, "sparse", False) x2_sparse = getattr(x2, "sparse", False) output_sparse = x1_sparse and x2_sparse return KerasTensor( output_shape, dtype=output_dtype, sparse=output_sparse ) @keras_export(["keras.ops.add", "keras.ops.numpy.add"]) def add(x1, x2): """Add arguments element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: The tensor containing the element-wise sum of `x1` and `x2`. Examples: >>> x1 = keras.ops.convert_to_tensor([1, 4]) >>> x2 = keras.ops.convert_to_tensor([5, 6]) >>> keras.ops.add(x1, x2) array([6, 10], dtype=int32) `keras.ops.add` also broadcasts shapes: >>> x1 = keras.ops.convert_to_tensor( ... [[5, 4], ... [5, 6]] ... ) >>> x2 = keras.ops.convert_to_tensor([5, 6]) >>> keras.ops.add(x1, x2) array([[10 10] [10 12]], shape=(2, 2), dtype=int32) """ if any_symbolic_tensors((x1, x2)): return Add().symbolic_call(x1, x2) return backend.numpy.add(x1, x2) class All(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.all( x, axis=self.axis, keepdims=self.keepdims, ) def compute_output_spec(self, x): return KerasTensor( reduce_shape( x.shape, axis=self.axis, keepdims=self.keepdims, ), dtype="bool", ) @keras_export(["keras.ops.all", "keras.ops.numpy.all"]) def all(x, axis=None, keepdims=False): """Test whether all array elements along a given axis evaluate to `True`. Args: x: Input tensor. axis: An integer or tuple of integers that represent the axis along which a logical AND reduction is performed. The default (`axis=None`) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in which case it counts for the last to the first axis. keepdims: If `True`, axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. Defaults to`False`. Returns: The tensor containing the logical AND reduction over the `axis`. Examples: >>> x = keras.ops.convert_to_tensor([True, False]) >>> keras.ops.all(x) array(False, shape=(), dtype=bool) >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) >>> keras.ops.all(x, axis=0) array([ True False], shape=(2,), dtype=bool) `keepdims=True` outputs a tensor with dimensions reduced to one. >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) >>> keras.ops.all(x, keepdims=True) array([[False]], shape=(1, 1), dtype=bool) """ if any_symbolic_tensors((x,)): return All(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.all(x, axis=axis, keepdims=keepdims) class Any(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.any( x, axis=self.axis, keepdims=self.keepdims, ) def compute_output_spec(self, x): return KerasTensor( reduce_shape( x.shape, axis=self.axis, keepdims=self.keepdims, ), dtype="bool", ) @keras_export(["keras.ops.any", "keras.ops.numpy.any"]) def any(x, axis=None, keepdims=False): """Test whether any array element along a given axis evaluates to `True`. Args: x: Input tensor. axis: An integer or tuple of integers that represent the axis along which a logical OR reduction is performed. The default (`axis=None`) is to perform a logical OR over all the dimensions of the input array. `axis` may be negative, in which case it counts for the last to the first axis. keepdims: If `True`, axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. Defaults to`False`. Returns: The tensor containing the logical OR reduction over the `axis`. Examples: >>> x = keras.ops.convert_to_tensor([True, False]) >>> keras.ops.any(x) array(True, shape=(), dtype=bool) >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) >>> keras.ops.any(x, axis=0) array([ True True], shape=(2,), dtype=bool) `keepdims=True` outputs a tensor with dimensions reduced to one. >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) >>> keras.ops.all(x, keepdims=True) array([[False]], shape=(1, 1), dtype=bool) """ if any_symbolic_tensors((x,)): return Any(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.any(x, axis=axis, keepdims=keepdims) class Amax(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): axis = [axis] self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.amax( x, axis=self.axis, keepdims=self.keepdims, ) def compute_output_spec(self, x): return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype, ) @keras_export(["keras.ops.amax", "keras.ops.numpy.amax"]) def amax(x, axis=None, keepdims=False): """Returns the maximum of an array or maximum value along an axis. Args: x: Input tensor. axis: Axis along which to compute the maximum. By default (`axis=None`), find the maximum value in all the dimensions of the input array. keepdims: If `True`, axes which are reduced are left in the result as dimensions that are broadcast to the size of the original input tensor. Defaults to `False`. Returns: An array with the maximum value. If `axis=None`, the result is a scalar value representing the maximum element in the entire array. If `axis` is given, the result is an array with the maximum values along the specified axis. Examples: >>> x = keras.ops.convert_to_tensor([[1, 3, 5], [2, 3, 6]]) >>> keras.ops.amax(x) array(6, dtype=int32) >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]]) >>> keras.ops.amax(x, axis=0) array([1, 6, 8], dtype=int32) >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]]) >>> keras.ops.amax(x, axis=1, keepdims=True) array([[8], [5]], dtype=int32) """ if any_symbolic_tensors((x,)): return Amax(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.amax(x, axis=axis, keepdims=keepdims) class Amin(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): axis = [axis] self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.amin(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype, ) @keras_export(["keras.ops.amin", "keras.ops.numpy.amin"]) def amin(x, axis=None, keepdims=False): """Returns the minimum of an array or minimum value along an axis. Args: x: Input tensor. axis: Axis along which to compute the minimum. By default (`axis=None`), find the minimum value in all the dimensions of the input array. keepdims: If `True`, axes which are reduced are left in the result as dimensions that are broadcast to the size of the original input tensor. Defaults to `False`. Returns: An array with the minimum value. If `axis=None`, the result is a scalar value representing the minimum element in the entire array. If `axis` is given, the result is an array with the minimum values along the specified axis. Examples: >>> x = keras.ops.convert_to_tensor([1, 3, 5, 2, 3, 6]) >>> keras.ops.amin(x) array(1, dtype=int32) >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]]) >>> keras.ops.amin(x, axis=0) array([1,5,3], dtype=int32) >>> x = keras.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]]) >>> keras.ops.amin(x, axis=1, keepdims=True) array([[1],[3]], dtype=int32) """ if any_symbolic_tensors((x,)): return Amin(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.amin(x, axis=axis, keepdims=keepdims) class Append(Operation): def __init__(self, axis=None): super().__init__() self.axis = axis def call(self, x1, x2): return backend.numpy.append(x1, x2, axis=self.axis) def compute_output_spec(self, x1, x2): x1_shape = x1.shape x2_shape = x2.shape dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) if self.axis is None: if None in x1_shape or None in x2_shape: output_shape = [None] else: output_shape = [int(np.prod(x1_shape) + np.prod(x2_shape))] return KerasTensor(output_shape, dtype=dtype) if not shape_equal(x1_shape, x2_shape, [self.axis]): raise ValueError( "`append` requires inputs to have the same shape except the " f"`axis={self.axis}`, but received shape {x1_shape} and " f"{x2_shape}." ) output_shape = list(x1_shape) output_shape[self.axis] = x1_shape[self.axis] + x2_shape[self.axis] return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.append", "keras.ops.numpy.append"]) def append( x1, x2, axis=None, ): """Append tensor `x2` to the end of tensor `x1`. Args: x1: First input tensor. x2: Second input tensor. axis: Axis along which tensor `x2` is appended to tensor `x1`. If `None`, both tensors are flattened before use. Returns: A tensor with the values of `x2` appended to `x1`. Examples: >>> x1 = keras.ops.convert_to_tensor([1, 2, 3]) >>> x2 = keras.ops.convert_to_tensor([[4, 5, 6], [7, 8, 9]]) >>> keras.ops.append(x1, x2) array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32) When `axis` is specified, `x1` and `x2` must have compatible shapes. >>> x1 = keras.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]]) >>> x2 = keras.ops.convert_to_tensor([[7, 8, 9]]) >>> keras.ops.append(x1, x2, axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=int32) >>> x3 = keras.ops.convert_to_tensor([7, 8, 9]) >>> keras.ops.append(x1, x3, axis=0) Traceback (most recent call last): ... TypeError: Cannot concatenate arrays with different numbers of dimensions: got (2, 3), (3,). """ if any_symbolic_tensors((x1, x2)): return Append(axis=axis).symbolic_call(x1, x2) return backend.numpy.append(x1, x2, axis=axis) class Arange(Operation): def call(self, start, stop=None, step=1, dtype=None): return backend.numpy.arange(start, stop, step=step, dtype=dtype) def compute_output_spec(self, start, stop=None, step=1, dtype=None): if stop is None: start, stop = 0, start output_shape = [int(np.ceil((stop - start) / step))] if dtype is None: dtypes_to_resolve = [ getattr(start, "dtype", type(start)), getattr(step, "dtype", type(step)), ] if stop is not None: dtypes_to_resolve.append(getattr(stop, "dtype", type(stop))) dtype = dtypes.result_type(*dtypes_to_resolve) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.arange", "keras.ops.numpy.arange"]) def arange(start, stop=None, step=1, dtype=None): """Return evenly spaced values within a given interval. `arange` can be called with a varying number of positional arguments: * `arange(stop)`: Values are generated within the half-open interval `[0, stop)` (in other words, the interval including start but excluding stop). * `arange(start, stop)`: Values are generated within the half-open interval `[start, stop)`. * `arange(start, stop, step)`: Values are generated within the half-open interval `[start, stop)`, with spacing between values given by step. Args: start: Integer or real, representing the start of the interval. The interval includes this value. stop: Integer or real, representing the end of the interval. The interval does not include this value, except in some cases where `step` is not an integer and floating point round-off affects the length of `out`. Defaults to `None`. step: Integer or real, represent the spacing between values. For any output `out`, this is the distance between two adjacent values, `out[i+1] - out[i]`. The default step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype: The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. Returns: Tensor of evenly spaced values. For floating point arguments, the length of the result is `ceil((stop - start)/step)`. Because of floating point overflow, this rule may result in the last element of out being greater than stop. Examples: >>> keras.ops.arange(3) array([0, 1, 2], dtype=int32) >>> keras.ops.arange(3.0) array([0., 1., 2.], dtype=float32) >>> keras.ops.arange(3, 7) array([3, 4, 5, 6], dtype=int32) >>> keras.ops.arange(3, 7, 2) array([3, 5], dtype=int32) """ return backend.numpy.arange(start, stop, step=step, dtype=dtype) class Arccos(Operation): def call(self, x): return backend.numpy.arccos(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.arccos", "keras.ops.numpy.arccos"]) def arccos(x): """Trigonometric inverse cosine, element-wise. The inverse of `cos` so that, if `y = cos(x)`, then `x = arccos(y)`. Args: x: Input tensor. Returns: Tensor of the angle of the ray intersecting the unit circle at the given x-coordinate in radians `[0, pi]`. Example: >>> x = keras.ops.convert_to_tensor([1, -1]) >>> keras.ops.arccos(x) array([0.0, 3.1415927], dtype=float32) """ if any_symbolic_tensors((x,)): return Arccos().symbolic_call(x) return backend.numpy.arccos(x) class Arccosh(Operation): def call(self, x): return backend.numpy.arccosh(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.arccosh", "keras.ops.numpy.arccosh"]) def arccosh(x): """Inverse hyperbolic cosine, element-wise. Arguments: x: Input tensor. Returns: Output tensor of same shape as x. Example: >>> x = keras.ops.convert_to_tensor([10, 100]) >>> keras.ops.arccosh(x) array([2.993223, 5.298292], dtype=float32) """ if any_symbolic_tensors((x,)): return Arccosh().symbolic_call(x) return backend.numpy.arccosh(x) class Arcsin(Operation): def call(self, x): return backend.numpy.arcsin(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.arcsin", "keras.ops.numpy.arcsin"]) def arcsin(x): """Inverse sine, element-wise. Args: x: Input tensor. Returns: Tensor of the inverse sine of each element in `x`, in radians and in the closed interval `[-pi/2, pi/2]`. Example: >>> x = keras.ops.convert_to_tensor([1, -1, 0]) >>> keras.ops.arcsin(x) array([ 1.5707964, -1.5707964, 0.], dtype=float32) """ if any_symbolic_tensors((x,)): return Arcsin().symbolic_call(x) return backend.numpy.arcsin(x) class Arcsinh(Operation): def call(self, x): return backend.numpy.arcsinh(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.arcsinh", "keras.ops.numpy.arcsinh"]) def arcsinh(x): """Inverse hyperbolic sine, element-wise. Arguments: x: Input tensor. Returns: Output tensor of same shape as `x`. Example: >>> x = keras.ops.convert_to_tensor([1, -1, 0]) >>> keras.ops.arcsinh(x) array([0.88137364, -0.88137364, 0.0], dtype=float32) """ if any_symbolic_tensors((x,)): return Arcsinh().symbolic_call(x) return backend.numpy.arcsinh(x) class Arctan(Operation): def call(self, x): return backend.numpy.arctan(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.arctan", "keras.ops.numpy.arctan"]) def arctan(x): """Trigonometric inverse tangent, element-wise. Args: x: Input tensor. Returns: Tensor of the inverse tangent of each element in `x`, in the interval `[-pi/2, pi/2]`. Example: >>> x = keras.ops.convert_to_tensor([0, 1]) >>> keras.ops.arctan(x) array([0., 0.7853982], dtype=float32) """ if any_symbolic_tensors((x,)): return Arctan().symbolic_call(x) return backend.numpy.arctan(x) class Arctan2(Operation): def call(self, x1, x2): return backend.numpy.arctan2(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) outputs_shape = broadcast_shapes(x1_shape, x2_shape) x1_dtype = backend.standardize_dtype( getattr(x1, "dtype", backend.floatx()) ) x2_dtype = backend.standardize_dtype( getattr(x2, "dtype", backend.floatx()) ) dtype = dtypes.result_type(x1_dtype, x2_dtype, float) return KerasTensor(outputs_shape, dtype=dtype) @keras_export(["keras.ops.arctan2", "keras.ops.numpy.arctan2"]) def arctan2(x1, x2): """Element-wise arc tangent of `x1/x2` choosing the quadrant correctly. The quadrant (i.e., branch) is chosen so that `arctan2(x1, x2)` is the signed angle in radians between the ray ending at the origin and passing through the point `(1, 0)`, and the ray ending at the origin and passing through the point `(x2, x1)`. (Note the role reversal: the "y-coordinate" is the first function parameter, the "x-coordinate" is the second.) By IEEE convention, this function is defined for `x2 = +/-0` and for either or both of `x1` and `x2` `= +/-inf`. Args: x1: First input tensor. x2: Second input tensor. Returns: Tensor of angles in radians, in the range `[-pi, pi]`. Examples: Consider four points in different quadrants: >>> x = keras.ops.convert_to_tensor([-1, +1, +1, -1]) >>> y = keras.ops.convert_to_tensor([-1, -1, +1, +1]) >>> keras.ops.arctan2(y, x) * 180 / numpy.pi array([-135., -45., 45., 135.], dtype=float32) Note the order of the parameters. `arctan2` is defined also when x2=0 and at several other points, obtaining values in the range `[-pi, pi]`: >>> keras.ops.arctan2( ... keras.ops.array([1., -1.]), ... keras.ops.array([0., 0.]), ... ) array([ 1.5707964, -1.5707964], dtype=float32) >>> keras.ops.arctan2( ... keras.ops.array([0., 0., numpy.inf]), ... keras.ops.array([+0., -0., numpy.inf]), ... ) array([0., 3.1415925, 0.7853982], dtype=float32) """ if any_symbolic_tensors((x1, x2)): return Arctan2().symbolic_call(x1, x2) return backend.numpy.arctan2(x1, x2) class Arctanh(Operation): def call(self, x): return backend.numpy.arctanh(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.arctanh", "keras.ops.numpy.arctanh"]) def arctanh(x): """Inverse hyperbolic tangent, element-wise. Arguments: x: Input tensor. Returns: Output tensor of same shape as `x`. """ if any_symbolic_tensors((x,)): return Arctanh().symbolic_call(x) return backend.numpy.arctanh(x) class Argmax(Operation): def __init__(self, axis=None): super().__init__() self.axis = axis def call(self, x): return backend.numpy.argmax(x, axis=self.axis) def compute_output_spec(self, x): if self.axis is None: return KerasTensor([], dtype="int32") return KerasTensor( reduce_shape(x.shape, axis=[self.axis]), dtype="int32" ) @keras_export(["keras.ops.argmax", "keras.ops.numpy.argmax"]) def argmax(x, axis=None): """Returns the indices of the maximum values along an axis. Args: x: Input tensor. axis: By default, the index is into the flattened tensor, otherwise along the specified axis. Returns: Tensor of indices. It has the same shape as `x`, with the dimension along `axis` removed. Example: >>> x = keras.ops.arange(6).reshape(2, 3) + 10 >>> x array([[10, 11, 12], [13, 14, 15]], dtype=int32) >>> keras.ops.argmax(x) array(5, dtype=int32) >>> keras.ops.argmax(x, axis=0) array([1, 1, 1], dtype=int32) >>> keras.ops.argmax(x, axis=1) array([2, 2], dtype=int32) """ if any_symbolic_tensors((x,)): return Argmax(axis=axis).symbolic_call(x) return backend.numpy.argmax(x, axis=axis) class Argmin(Operation): def __init__(self, axis=None): super().__init__() self.axis = axis def call(self, x): return backend.numpy.argmin(x, axis=self.axis) def compute_output_spec(self, x): if self.axis is None: return KerasTensor([], dtype="int32") return KerasTensor( reduce_shape(x.shape, axis=[self.axis]), dtype="int32" ) @keras_export(["keras.ops.argmin", "keras.ops.numpy.argmin"]) def argmin(x, axis=None): """Returns the indices of the minium values along an axis. Args: x: Input tensor. axis: By default, the index is into the flattened tensor, otherwise along the specified axis. Returns: Tensor of indices. It has the same shape as `x`, with the dimension along `axis` removed. Example: >>> x = keras.ops.arange(6).reshape(2, 3) + 10 >>> x array([[10, 11, 12], [13, 14, 15]], dtype=int32) >>> keras.ops.argmin(x) array(0, dtype=int32) >>> keras.ops.argmin(x, axis=0) array([0, 0, 0], dtype=int32) >>> keras.ops.argmin(x, axis=1) array([0, 0], dtype=int32) """ if any_symbolic_tensors((x,)): return Argmin(axis=axis).symbolic_call(x) return backend.numpy.argmin(x, axis=axis) class Argsort(Operation): def __init__(self, axis=-1): super().__init__() self.axis = axis def call(self, x): return backend.numpy.argsort(x, axis=self.axis) def compute_output_spec(self, x): if self.axis is None: return KerasTensor([int(np.prod(x.shape))], dtype="int32") return KerasTensor(x.shape, dtype="int32") @keras_export(["keras.ops.argsort", "keras.ops.numpy.argsort"]) def argsort(x, axis=-1): """Returns the indices that would sort a tensor. Args: x: Input tensor. axis: Axis along which to sort. Defaults to`-1` (the last axis). If `None`, the flattened tensor is used. Returns: Tensor of indices that sort `x` along the specified `axis`. Examples: One dimensional array: >>> x = keras.ops.array([3, 1, 2]) >>> keras.ops.argsort(x) array([1, 2, 0], dtype=int32) Two-dimensional array: >>> x = keras.ops.array([[0, 3], [3, 2], [4, 5]]) >>> x array([[0, 3], [3, 2], [4, 5]], dtype=int32) >>> keras.ops.argsort(x, axis=0) array([[0, 1], [1, 0], [2, 2]], dtype=int32) >>> keras.ops.argsort(x, axis=1) array([[0, 1], [1, 0], [0, 1]], dtype=int32) """ if any_symbolic_tensors((x,)): return Argsort(axis=axis).symbolic_call(x) return backend.numpy.argsort(x, axis=axis) class Array(Operation): def call(self, x, dtype=None): return backend.numpy.array(x, dtype=dtype) def compute_output_spec(self, x, dtype=None): return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.array", "keras.ops.numpy.array"]) def array(x, dtype=None): """Create a tensor. Args: x: Input tensor. dtype: The desired data-type for the tensor. Returns: A tensor. Examples: >>> keras.ops.array([1, 2, 3]) array([1, 2, 3], dtype=int32) >>> keras.ops.array([1, 2, 3], dtype="float32") array([1., 2., 3.], dtype=float32) """ if any_symbolic_tensors((x,)): return Array().symbolic_call(x, dtype=dtype) return backend.numpy.array(x, dtype=dtype) class Average(Operation): def __init__(self, axis=None): super().__init__() # np.average() does not support axis as tuple as declared by the # docstring, it only supports int or None. self.axis = axis def call(self, x, weights=None): return backend.numpy.average(x, weights=weights, axis=self.axis) def compute_output_spec(self, x, weights=None): dtypes_to_resolve = [getattr(x, "dtype", type(x)), float] if weights is not None: shape_match = shape_equal(x.shape, weights.shape, allow_none=True) if self.axis is not None: shape_match_on_axis = shape_equal( [x.shape[self.axis]], weights.shape, allow_none=True ) dtypes_to_resolve.append(getattr(weights, "dtype", type(weights))) dtype = dtypes.result_type(*dtypes_to_resolve) if self.axis is None: if weights is None or shape_match: return KerasTensor([], dtype=dtype) else: raise ValueError( "`weights` must have the same shape as `x` when " f"`axis=None`, but received `weights.shape={weights.shape}`" f" and `x.shape={x.shape}`." ) if weights is None or shape_match_on_axis or shape_match: return KerasTensor( reduce_shape(x.shape, axis=[self.axis]), dtype=dtype ) else: # `weights` can either be a 1D array of length `x.shape[axis]` or # of the same shape as `x`. raise ValueError( "`weights` must have the same size as `x` at " f"`axis={self.axis}` but received " f"`weights.shape={weights.shape}` while x.shape at " f"`{self.axis}` is `{x.shape[self.axis]}`." ) @keras_export(["keras.ops.average", "keras.ops.numpy.average"]) def average(x, axis=None, weights=None): """Compute the weighted average along the specified axis. Args: x: Input tensor. axis: Integer along which to average `x`. The default, `axis=None`, will average over all of the elements of the input tensor. If axis is negative it counts from the last to the first axis. weights: Tensor of wieghts associated with the values in `x`. Each value in `x` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of a along the given axis) or of the same shape as `x`. If `weights=None` (default), then all data in `x` are assumed to have a weight equal to one. The 1-D calculation is: `avg = sum(a * weights) / sum(weights)`. The only constraint on weights is that `sum(weights)` must not be 0. Returns: Return the average along the specified axis. Examples: >>> data = keras.ops.arange(1, 5) >>> data array([1, 2, 3, 4], dtype=int32) >>> keras.ops.average(data) array(2.5, dtype=float32) >>> keras.ops.average( ... keras.ops.arange(1, 11), ... weights=keras.ops.arange(10, 0, -1) ... ) array(4., dtype=float32) >>> data = keras.ops.arange(6).reshape((3, 2)) >>> data array([[0, 1], [2, 3], [4, 5]], dtype=int32) >>> keras.ops.average( ... data, ... axis=1, ... weights=keras.ops.array([1./4, 3./4]) ... ) array([0.75, 2.75, 4.75], dtype=float32) >>> keras.ops.average( ... data, ... weights=keras.ops.array([1./4, 3./4]) ... ) Traceback (most recent call last): ... ValueError: Axis must be specified when shapes of a and weights differ. """ if any_symbolic_tensors((x,)): return Average(axis=axis).symbolic_call(x, weights=weights) return backend.numpy.average(x, weights=weights, axis=axis) class Bincount(Operation): def __init__(self, weights=None, minlength=0): super().__init__() self.weights = weights self.minlength = minlength def call(self, x): return backend.numpy.bincount( x, weights=self.weights, minlength=self.minlength ) def compute_output_spec(self, x): dtypes_to_resolve = [x.dtype] if self.weights is not None: weights = backend.convert_to_tensor(self.weights) dtypes_to_resolve.append(weights.dtype) dtype = dtypes.result_type(*dtypes_to_resolve) else: dtype = "int32" return KerasTensor(list(x.shape[:-1]) + [None], dtype=dtype) @keras_export(["keras.ops.bincount", "keras.ops.numpy.bincount"]) def bincount(x, weights=None, minlength=0): """Count the number of occurrences of each value in a tensor of integers. Args: x: Input tensor. It must be of dimension 1, and it must only contain non-negative integer(s). weights: Weight tensor. It must have the same length as `x`. The default value is `None`. If specified, `x` is weighted by it, i.e. if `n = x[i]`, `out[n] += weight[i]` instead of the default behavior `out[n] += 1`. minlength: An integer. The default value is 0. If specified, there will be at least this number of bins in the output tensor. If greater than `max(x) + 1`, each value of the output at an index higher than `max(x)` is set to 0. Returns: 1D tensor where each element gives the number of occurrence(s) of its index value in x. Its length is the maximum between `max(x) + 1` and minlength. Examples: >>> x = keras.ops.array([1, 2, 2, 3], dtype="uint8") >>> keras.ops.bincount(x) array([0, 1, 2, 1], dtype=int32) >>> weights = x / 2 >>> weights array([0.5, 1., 1., 1.5], dtype=float64) >>> keras.ops.bincount(x, weights=weights) array([0., 0.5, 2., 1.5], dtype=float64) >>> minlength = (keras.ops.max(x).numpy() + 1) + 2 # 6 >>> keras.ops.bincount(x, minlength=minlength) array([0, 1, 2, 1, 0, 0], dtype=int32) """ if any_symbolic_tensors((x,)): return Bincount(weights=weights, minlength=minlength).symbolic_call(x) return backend.numpy.bincount(x, weights=weights, minlength=minlength) class BroadcastTo(Operation): def __init__(self, shape): super().__init__() self.shape = shape def call(self, x): return backend.numpy.broadcast_to(x, self.shape) def compute_output_spec(self, x): # Catch broadcasting errors for clear error messages. broadcast_shapes(x.shape, self.shape) return KerasTensor(self.shape, dtype=x.dtype) @keras_export( [ "keras.ops.broadcast_to", "keras.ops.numpy.broadcast_to", ] ) def broadcast_to(x, shape): """Broadcast a tensor to a new shape. Args: x: The tensor to broadcast. shape: The shape of the desired tensor. A single integer `i` is interpreted as `(i,)`. Returns: A tensor with the desired shape. Examples: >>> x = keras.ops.array([1, 2, 3]) >>> keras.ops.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) """ if any_symbolic_tensors((x,)): return BroadcastTo(shape=shape).symbolic_call(x) return backend.numpy.broadcast_to(x, shape) class Ceil(Operation): def call(self, x): return backend.numpy.ceil(x) def compute_output_spec(self, x): if backend.standardize_dtype(x.dtype) == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(x.dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.ceil", "keras.ops.numpy.ceil"]) def ceil(x): """Return the ceiling of the input, element-wise. The ceil of the scalar `x` is the smallest integer `i`, such that `i >= x`. Args: x: Input tensor. Returns: The ceiling of each element in `x`, with float dtype. """ if any_symbolic_tensors((x,)): return Ceil().symbolic_call(x) return backend.numpy.ceil(x) class Clip(Operation): def __init__(self, x_min, x_max): super().__init__() self.x_min = x_min self.x_max = x_max def call(self, x): return backend.numpy.clip(x, self.x_min, self.x_max) def compute_output_spec(self, x): dtype = backend.standardize_dtype(x.dtype) if dtype == "bool": dtype = "int32" return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.clip", "keras.ops.numpy.clip"]) def clip(x, x_min, x_max): """Clip (limit) the values in a tensor. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of `[0, 1]` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Args: x: Input tensor. x_min: Minimum value. x_max: Maximum value. Returns: The clipped tensor. """ if any_symbolic_tensors((x,)): return Clip(x_min, x_max).symbolic_call(x) return backend.numpy.clip(x, x_min, x_max) class Concatenate(Operation): def __init__(self, axis=0): super().__init__() if axis is None: raise ValueError("`axis` cannot be None for `concatenate`.") self.axis = axis def call(self, xs): return backend.numpy.concatenate(xs, axis=self.axis) def compute_output_spec(self, xs): first_shape = xs[0].shape total_size_on_axis = 0 all_sparse = True dtypes_to_resolve = [] for x in xs: if not shape_equal( x.shape, first_shape, axis=[self.axis], allow_none=True ): raise ValueError( "Every value in `xs` must have the same shape except on " f"the `axis` dim. But found element of shape {x.shape}, " f"which is different from the first element's " f"shape {first_shape}." ) if total_size_on_axis is None or x.shape[self.axis] is None: total_size_on_axis = None else: total_size_on_axis += x.shape[self.axis] all_sparse = all_sparse and getattr(x, "sparse", False) dtypes_to_resolve.append(getattr(x, "dtype", type(x))) output_shape = list(first_shape) output_shape[self.axis] = total_size_on_axis dtype = dtypes.result_type(*dtypes_to_resolve) return KerasTensor(output_shape, dtype=dtype, sparse=all_sparse) @keras_export( [ "keras.ops.concatenate", "keras.ops.numpy.concatenate", ] ) def concatenate(xs, axis=0): """Join a sequence of tensors along an existing axis. Args: xs: The sequence of tensors to concatenate. axis: The axis along which the tensors will be joined. Defaults to `0`. Returns: The concatenated tensor. """ if any_symbolic_tensors(xs): return Concatenate(axis=axis).symbolic_call(xs) return backend.numpy.concatenate(xs, axis=axis) class Conjugate(Operation): def call(self, x): return backend.numpy.conjugate(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.conjugate", "keras.ops.numpy.conjugate"]) def conjugate(x): """Returns the complex conjugate, element-wise. The complex conjugate of a complex number is obtained by changing the sign of its imaginary part. `keras.ops.conj` is a shorthand for this function. Args: x: Input tensor. Returns: The complex conjugate of each element in `x`. """ if any_symbolic_tensors((x,)): return Conjugate().symbolic_call(x) return backend.numpy.conjugate(x) class Conj(Conjugate): pass @keras_export(["keras.ops.conj", "keras.ops.numpy.conj"]) def conj(x): """Shorthand for `keras.ops.conjugate`.""" return conjugate(x) class Copy(Operation): def call(self, x): return backend.numpy.copy(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.copy", "keras.ops.numpy.copy"]) def copy(x): """Returns a copy of `x`. Args: x: Input tensor. Returns: A copy of `x`. """ if any_symbolic_tensors((x,)): return Copy().symbolic_call(x) return backend.numpy.copy(x) class Cos(Operation): def call(self, x): return backend.numpy.cos(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.cos", "keras.ops.numpy.cos"]) def cos(x): """Cosine, element-wise. Args: x: Input tensor. Returns: The corresponding cosine values. """ if any_symbolic_tensors((x,)): return Cos().symbolic_call(x) return backend.numpy.cos(x) class Cosh(Operation): def call(self, x): return backend.numpy.cosh(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.cosh", "keras.ops.numpy.cosh"]) def cosh(x): """Hyperbolic cosine, element-wise. Arguments: x: Input tensor. Returns: Output tensor of same shape as `x`. """ if any_symbolic_tensors((x,)): return Cosh().symbolic_call(x) return backend.numpy.cosh(x) class CountNonzero(Operation): def __init__(self, axis=None): super().__init__() if isinstance(axis, int): self.axis = (axis,) else: self.axis = axis def call(self, x): return backend.numpy.count_nonzero(x, axis=self.axis) def compute_output_spec(self, x): return KerasTensor( reduce_shape(x.shape, axis=self.axis), dtype="int32", ) @keras_export( [ "keras.ops.count_nonzero", "keras.ops.numpy.count_nonzero", ] ) def count_nonzero(x, axis=None): """Counts the number of non-zero values in `x` along the given `axis`. If no axis is specified then all non-zeros in the tensor are counted. Args: x: Input tensor. axis: Axis or tuple of axes along which to count the number of non-zeros. Defaults to `None`. Returns: int or tensor of ints. Examples: >>> x = keras.ops.array([[0, 1, 7, 0], [3, 0, 2, 19]]) >>> keras.ops.count_nonzero(x) 5 >>> keras.ops.count_nonzero(x, axis=0) array([1, 1, 2, 1], dtype=int64) >>> keras.ops.count_nonzero(x, axis=1) array([2, 3], dtype=int64) """ if any_symbolic_tensors((x,)): return CountNonzero(axis=axis).symbolic_call(x) return backend.numpy.count_nonzero(x, axis=axis) class Cross(Operation): def __init__(self, axisa=-1, axisb=-1, axisc=-1, axis=None): super().__init__() if axis is not None: self.axisa = axis self.axisb = axis self.axisc = axis else: self.axisa = axisa self.axisb = axisb self.axisc = axisc def call(self, x1, x2): return backend.numpy.cross(x1, x2, self.axisa, self.axisb, self.axisc) def compute_output_spec(self, x1, x2): x1_shape = list(x1.shape) x2_shape = list(x2.shape) x1_value_size = x1_shape[self.axisa] x2_value_size = x2_shape[self.axisa] del x1_shape[self.axisa] del x2_shape[self.axisb] output_shape = broadcast_shapes(x1_shape, x2_shape) if x1_value_size is not None and x1_value_size not in (2, 3): raise ValueError( "`x1`'s dim on `axis={axisa}` must be either 2 or 3, but " f"received: {x1_value_size}" ) if x2_value_size is not None and x2_value_size not in (2, 3): raise ValueError( "`x2`'s dim on `axis={axisb}` must be either 2 or 3, but " f"received: {x2_value_size}" ) if x1_value_size == 3 or x2_value_size == 3: value_size = [3] else: value_size = [] output_shape = ( output_shape[: self.axisc] + value_size + output_shape[self.axisc :] ) dtype = dtypes.result_type(x1.dtype, x2.dtype) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.cross", "keras.ops.numpy.cross"]) def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None): """Returns the cross product of two (arrays of) vectors. The cross product of `x1` and `x2` in R^3 is a vector perpendicular to both `x1` and `x2`. If `x1` and `x2` are arrays of vectors, the vectors are defined by the last axis of `x1` and `x2` by default, and these axes can have dimensions 2 or 3. Where the dimension of either `x1` or `x2` is 2, the third component of the input vector is assumed to be zero and the cross product calculated accordingly. In cases where both input vectors have dimension 2, the z-component of the cross product is returned. Args: x1: Components of the first vector(s). x2: Components of the second vector(s). axisa: Axis of `x1` that defines the vector(s). Defaults to `-1`. axisb: Axis of `x2` that defines the vector(s). Defaults to `-1`. axisc: Axis of the result containing the cross product vector(s). Ignored if both input vectors have dimension 2, as the return is scalar. By default, the last axis. axis: If defined, the axis of `x1`, `x2` and the result that defines the vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`. Note: Torch backend does not support two dimensional vectors, or the arguments `axisa`, `axisb` and `axisc`. Use `axis` instead. Returns: Vector cross product(s). """ if any_symbolic_tensors((x1, x2)): return Cross( axisa=axisa, axisb=axisb, axisc=axisc, axis=axis ).symbolic_call(x1, x2) return backend.numpy.cross( x1, x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis, ) class Cumprod(Operation): def __init__(self, axis=None, dtype=None): super().__init__() self.axis = axis self.dtype = dtype def call(self, x): return backend.numpy.cumprod(x, axis=self.axis, dtype=self.dtype) def compute_output_spec(self, x): if self.axis is None: if None in x.shape: output_shape = (None,) else: output_shape = (int(np.prod(x.shape)),) else: output_shape = x.shape output_dtype = backend.standardize_dtype(self.dtype or x.dtype) if output_dtype == "bool": output_dtype = "int32" return KerasTensor(output_shape, output_dtype) @keras_export(["keras.ops.cumprod", "keras.ops.numpy.cumprod"]) def cumprod(x, axis=None, dtype=None): """Return the cumulative product of elements along a given axis. Args: x: Input tensor. axis: Axis along which the cumulative product is computed. By default the input is flattened. dtype: dtype of returned tensor. Defaults to x.dtype. Returns: Output tensor. """ return Cumprod(axis=axis, dtype=dtype)(x) class Cumsum(Operation): def __init__(self, axis=None, dtype=None): super().__init__() self.axis = axis self.dtype = dtype def call(self, x): return backend.numpy.cumsum(x, axis=self.axis, dtype=self.dtype) def compute_output_spec(self, x): if self.axis is None: if None in x.shape: output_shape = (None,) else: output_shape = (int(np.prod(x.shape)),) else: output_shape = x.shape output_dtype = backend.standardize_dtype(self.dtype or x.dtype) if output_dtype == "bool": output_dtype = "int32" return KerasTensor(output_shape, output_dtype) @keras_export(["keras.ops.cumsum", "keras.ops.numpy.cumsum"]) def cumsum(x, axis=None, dtype=None): """Returns the cumulative sum of elements along a given axis. Args: x: Input tensor. axis: Axis along which the cumulative sum is computed. By default the input is flattened. dtype: dtype of returned tensor. Defaults to x.dtype. Returns: Output tensor. """ return Cumsum(axis=axis, dtype=dtype)(x) class Diag(Operation): def __init__(self, k=0): super().__init__() self.k = k def call(self, x): return backend.numpy.diag(x, k=self.k) def compute_output_spec(self, x): x_shape = x.shape if len(x_shape) == 1: if x_shape[0] is None: output_shape = [None, None] else: output_shape = [ x_shape[0] + int(np.abs(self.k)), x_shape[0] + int(np.abs(self.k)), ] elif len(x_shape) == 2: if None in x_shape: output_shape = [None] else: shorter_side = np.minimum(x_shape[0], x_shape[1]) if self.k > 0: remaining = x_shape[1] - self.k else: remaining = x_shape[0] + self.k output_shape = [ int(np.maximum(0, np.minimum(remaining, shorter_side))) ] else: raise ValueError( f"`x` must be 1-D or 2-D, but received shape {x.shape}." ) return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.diag", "keras.ops.numpy.diag"]) def diag(x, k=0): """Extract a diagonal or construct a diagonal array. Args: x: Input tensor. If `x` is 2-D, returns the k-th diagonal of `x`. If `x` is 1-D, return a 2-D tensor with `x` on the k-th diagonal. k: The diagonal to consider. Defaults to `0`. Use `k > 0` for diagonals above the main diagonal, and `k < 0` for diagonals below the main diagonal. Returns: The extracted diagonal or constructed diagonal tensor. Examples: >>> from keras import ops >>> x = ops.arange(9).reshape((3, 3)) >>> x array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> ops.diag(x) array([0, 4, 8]) >>> ops.diag(x, k=1) array([1, 5]) >>> ops.diag(x, k=-1) array([3, 7]) >>> ops.diag(ops.diag(x))) array([[0, 0, 0], [0, 4, 0], [0, 0, 8]]) """ if any_symbolic_tensors((x,)): return Diag(k=k).symbolic_call(x) return backend.numpy.diag(x, k=k) class Diagonal(Operation): def __init__(self, offset=0, axis1=0, axis2=1): super().__init__() self.offset = offset self.axis1 = axis1 self.axis2 = axis2 def call(self, x): return backend.numpy.diagonal( x, offset=self.offset, axis1=self.axis1, axis2=self.axis2, ) def compute_output_spec(self, x): x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "`diagonal` requires an array of at least two dimensions, but " "`x` is of shape {x.shape}." ) shape_2d = [x_shape[self.axis1], x_shape[self.axis2]] x_shape[self.axis1] = -1 x_shape[self.axis2] = -1 output_shape = list(filter((-1).__ne__, x_shape)) if None in shape_2d: diag_shape = [None] else: shorter_side = np.minimum(shape_2d[0], shape_2d[1]) if self.offset > 0: remaining = shape_2d[1] - self.offset else: remaining = shape_2d[0] + self.offset diag_shape = [ int(np.maximum(0, np.minimum(remaining, shorter_side))) ] output_shape = output_shape + diag_shape return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.diagonal", "keras.ops.numpy.diagonal"]) def diagonal(x, offset=0, axis1=0, axis2=1): """Return specified diagonals. If `x` is 2-D, returns the diagonal of `x` with the given offset, i.e., the collection of elements of the form `x[i, i+offset]`. If `x` has more than two dimensions, the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing `axis1` and `axis2` and appending an index to the right equal to the size of the resulting diagonals. Args: x: Input tensor. offset: Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to `0`.(main diagonal). axis1: Axis to be used as the first axis of the 2-D sub-arrays. Defaults to `0`.(first axis). axis2: Axis to be used as the second axis of the 2-D sub-arrays. Defaults to `1` (second axis). Returns: Tensor of diagonals. Examples: >>> from keras import ops >>> x = ops.arange(4).reshape((2, 2)) >>> x array([[0, 1], [2, 3]]) >>> x.diagonal() array([0, 3]) >>> x.diagonal(1) array([1]) >>> x = ops.arange(8).reshape((2, 2, 2)) >>> x array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> x.diagonal(0, 0, 1) array([[0, 6], [1, 7]]) """ if any_symbolic_tensors((x,)): return Diagonal( offset=offset, axis1=axis1, axis2=axis2, ).symbolic_call(x) return backend.numpy.diagonal( x, offset=offset, axis1=axis1, axis2=axis2, ) class Diff(Operation): def __init__(self, n=1, axis=-1): super().__init__() self.n = n self.axis = axis def call(self, a): return backend.numpy.diff(a, n=self.n, axis=self.axis) def compute_output_spec(self, a): shape = list(a.shape) size = shape[self.axis] if size is not None: shape[self.axis] = builtins.max(size - self.n, 0) return KerasTensor(shape, dtype=a.dtype) @keras_export(["keras.ops.diff", "keras.ops.numpy.diff"]) def diff(a, n=1, axis=-1): """Calculate the n-th discrete difference along the given axis. The first difference is given by `out[i] = a[i+1] - a[i]` along the given axis, higher differences are calculated by using `diff` recursively. Args: a: Input tensor. n: The number of times values are differenced. Defaults to `1`. axis: Axis to compute discrete difference(s) along. Defaults to `-1`.(last axis). Returns: Tensor of diagonals. Examples: >>> from keras import ops >>> x = ops.convert_to_tensor([1, 2, 4, 7, 0]) >>> ops.diff(x) array([ 1, 2, 3, -7]) >>> ops.diff(x, n=2) array([ 1, 1, -10]) >>> x = ops.convert_to_tensor([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> ops.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> ops.diff(x, axis=0) array([[-1, 2, 0, -2]]) """ return Diff(n=n, axis=axis)(a) class Digitize(Operation): def call(self, x, bins): return backend.numpy.digitize(x, bins) def compute_output_spec(self, x, bins): bins_shape = bins.shape if len(bins_shape) > 1: raise ValueError( f"`bins` must be a 1D array. Received: bins={bins} " f"with shape bins.shape={bins_shape}" ) return KerasTensor(x.shape, dtype="int32") @keras_export(["keras.ops.digitize", "keras.ops.numpy.digitize"]) def digitize(x, bins): """Returns the indices of the bins to which each value in `x` belongs. Args: x: Input array to be binned. bins: Array of bins. It has to be one-dimensional and monotonically increasing. Returns: Output array of indices, of same shape as `x`. Example: >>> x = np.array([0.0, 1.0, 3.0, 1.6]) >>> bins = np.array([0.0, 3.0, 4.5, 7.0]) >>> keras.ops.digitize(x, bins) array([1, 1, 2, 1]) """ if any_symbolic_tensors((x, bins)): return Digitize().symbolic_call(x, bins) return backend.numpy.digitize(x, bins) class Dot(Operation): def call(self, x1, x2): return backend.numpy.dot(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = list(getattr(x1, "shape", [])) x2_shape = list(getattr(x2, "shape", [])) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) if x1_shape == [] or x2_shape == []: return multiply(x1, x2) if len(x1_shape) == 1 and len(x2_shape) == 1: return KerasTensor([], dtype=dtype) if len(x2_shape) == 1: if x1_shape[-1] != x2_shape[0]: raise ValueError( "Shape must match on the last axis of `x1` and `x2` when " "`x1` is N-d array while `x2` is 1-D, but receive shape " f"`x1.shape={x1.shape}` and x2.shape=`{x2.shape}`." ) return KerasTensor(x1_shape[:-1], dtype=dtype) if ( x1_shape[-1] is None or x2_shape[-2] is None or x1_shape[-1] == x2_shape[-2] ): del x1_shape[-1] del x2_shape[-2] return KerasTensor(x1_shape + x2_shape, dtype=dtype) raise ValueError( "Shape must match on the last axis of `x1` and second last " "axis of `x2` when `x1` is N-d array while `x2` is M-D, but " f"received `x1.shape={x1.shape}` and x2.shape=`{x2.shape}`." ) @keras_export(["keras.ops.dot", "keras.ops.numpy.dot"]) def dot(x1, x2): """Dot product of two tensors. - If both `x1` and `x2` are 1-D tensors, it is inner product of vectors (without complex conjugation). - If both `x1` and `x2` are 2-D tensors, it is matrix multiplication. - If either `x1` or `x2` is 0-D (scalar), it is equivalent to `x1 * x2`. - If `x1` is an N-D tensor and `x2` is a 1-D tensor, it is a sum product over the last axis of `x1` and `x2`. - If `x1` is an N-D tensor and `x2` is an M-D tensor (where `M>=2`), it is a sum product over the last axis of `x1` and the second-to-last axis of `x2`: `dot(x1, x2)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])`. Args: x1: First argument. x2: Second argument. Note: Torch backend does not accept 0-D tensors as arguments. Returns: Dot product of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Dot().symbolic_call(x1, x2) return backend.numpy.dot(x1, x2) class Einsum(Operation): def __init__(self, subscripts): super().__init__() self.subscripts = subscripts def call(self, *operands): return backend.numpy.einsum(self.subscripts, *operands) def compute_output_spec(self, *operands): """Compute the output shape of `einsum`. The shape computation follows the steps below: 1. Find all letters in the input specs (left part of "->"), and break them into two categories: letters appearing more than once go to `reduced_dims`, otherwise go to `kept_dims`. 2. Adjust `reduced_dims` and `kept_dims` based on the output spec (right part of "->"). The rule is if the letter appears in the output spec, then move it to `kept_dims`, otherwise move it to `reduced_dims`. 3. Compute the target output shape. If no output spec is set, then the target output shape will be "...{kept_dims}", e.g., "...ijk", else it will be the same as output spec. "..." is a wildcard that could map shape of arbitrary length. 4. For each operand in `operands`, map the shape specified in the input spec to the output target, e.g, if operand is of shape [2,3,4], input spec is "i..." and output target is "i...jk", then 2 will go the index 0. For dims not represented by any letter, insert to the wildcard part. For each letter in output target not appearing in input spec, the dim will be 1 for broadcasting. After 4, each operand should have a target shape containing only number and `None`. 5. Broadcast all shapes computed from 4, and the result is the output shape. Let's take an example to illustrate the steps above. Let's define: ```python x = KerasTensor([None, 3, 4]) y = KerasTensor(2, 4, 3) z = knp.einsum("...ij, kji->...k", x, y) ``` 1. `reduced_dims` is {"i", "j"}, `kept_dims` is {"k"}. 2. `reduced_dims` is still {"i", "j"}, and `kept_dims` is {"k"}. 3. Output target is "...k". 4. For `x`, the input spec is "...ij", and the output target is "...k". "i" and "j" do not appear in the output target, so no replacement happens, and [None] goes to wildcard. Afterwards, "k" is replaced by 1, so we get shape [None, 1]. Applying the same logic to `y`, we get shape [2]. 5. Broadcast [None, 1] and [2], and we get [None, 2], which is the output shape. """ split_subscripts = self.subscripts.split("->") if len(split_subscripts) > 2: raise ValueError( "At most one '->' is supported in `einsum` subscripts, but " f"received {self.subscripts}." ) if len(split_subscripts) == 2: subscripts = split_subscripts[0] output_spec = split_subscripts[1] else: subscripts = self.subscripts output_spec = None input_specs = subscripts.split(",") if len(input_specs) != len(operands): raise ValueError( f"Number of operands ({len(operands)}) does not match the " f"number of input specs ({len(input_specs)}) in `einsum`, " f"received subscripts={self.subscripts}." ) reduced_dims = set() kept_dims = set() for s in subscripts: if not s.isalpha(): continue if s not in reduced_dims and s not in kept_dims: kept_dims.add(s) elif s in kept_dims: kept_dims.remove(s) reduced_dims.add(s) if output_spec is not None: # The output spec changes the rule of kept_dims and reduced_dims. # In short, dims appearing in the output spec will be kept, and # dims not appearing in the output spec will be reduced. kept_dims_copy = kept_dims.copy() reduced_dims_copy = reduced_dims.copy() for dim in kept_dims: if dim not in output_spec: kept_dims_copy.remove(dim) reduced_dims_copy.add(dim) for dim in reduced_dims: if dim in output_spec: reduced_dims_copy.remove(dim) kept_dims_copy.add(dim) kept_dims = kept_dims_copy reduced_dims = reduced_dims_copy reduced_dims = sorted(reduced_dims) kept_dims = sorted(kept_dims) if output_spec is None: target_broadcast_spec = "..." + "".join(kept_dims) else: target_broadcast_spec = output_spec expanded_operands_shapes = [] for x, spec in zip(operands, input_specs): x_shape = getattr(x, "shape", []) x_shape = [-1 if size is None else size for size in x_shape] split_spec = spec.split("...") expanded_shape = target_broadcast_spec if len(split_spec) == 1: # In this case, the input spec is just a string of letters, # e.g., "ijk". if len(x_shape) != len(split_spec[0]): raise ValueError( "Number of dimensions in the subscript does not " "match the number of dimensions in the operand, " f"received subscript `{spec}` and operand of shape " f"{x_shape}." ) for size, s in zip(x_shape, split_spec[0]): # Replace the letter with the right shape. expanded_shape = expanded_shape.replace(s, str(size) + " ") expanded_shape = expanded_shape.replace("...", "") else: # In this case, the input spec has "...", e.g., "i...j", "i...", # or "...j". for i in range(len(split_spec[0])): expanded_shape = expanded_shape.replace( split_spec[0][i], str(x_shape[i]) + " " ) for i in range(len(split_spec[1])): expanded_shape = expanded_shape.replace( split_spec[1][-i - 1], str(x_shape[-i - 1]) + " " ) # Shape matched by "..." will be inserted to the position of # "...". wildcard_shape_start_index = len(split_spec[0]) wildcard_shape_end_index = ( len(x_shape) if len(split_spec[1]) == 0 else -len(split_spec[1]) ) wildcard_shape = x_shape[ wildcard_shape_start_index:wildcard_shape_end_index ] wildcard_shape_str = ( " ".join([str(size) for size in wildcard_shape]) + " " ) expanded_shape = expanded_shape.replace( "...", wildcard_shape_str ) # Replace all letters not yet handled with "1" for broadcasting. expanded_shape = re.sub("[a-z]", "1 ", expanded_shape) expanded_shape = expanded_shape.split() expanded_shape = [ None if size == "-1" else int(size) for size in expanded_shape ] expanded_operands_shapes.append(expanded_shape) output_shape = expanded_operands_shapes[0] for shape in expanded_operands_shapes[1:]: output_shape = broadcast_shapes(output_shape, shape) dtypes_to_resolve = [] for x in operands: dtypes_to_resolve.append(getattr(x, "dtype", type(x))) dtype = dtypes.result_type(*dtypes_to_resolve) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.einsum", "keras.ops.numpy.einsum"]) def einsum(subscripts, *operands): """Evaluates the Einstein summation convention on the operands. Args: subscripts: Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator `->` is included as well as subscript labels of the precise output form. operands: The operands to compute the Einstein sum of. Returns: The calculation based on the Einstein summation convention. Example: >>> from keras import ops >>> a = ops.arange(25).reshape(5, 5) >>> b = ops.arange(5) >>> c = ops.arange(6).reshape(2, 3) Trace of a matrix: >>> ops.einsum("ii", a) 60 >>> ops.einsum(a, [0, 0]) 60 >>> ops.trace(a) 60 Extract the diagonal: >>> ops.einsum("ii -> i", a) array([ 0, 6, 12, 18, 24]) >>> ops.einsum(a, [0, 0], [0]) array([ 0, 6, 12, 18, 24]) >>> ops.diag(a) array([ 0, 6, 12, 18, 24]) Sum over an axis: >>> ops.einsum("ij -> i", a) array([ 10, 35, 60, 85, 110]) >>> ops.einsum(a, [0, 1], [0]) array([ 10, 35, 60, 85, 110]) >>> ops.sum(a, axis=1) array([ 10, 35, 60, 85, 110]) For higher dimensional tensors summing a single axis can be done with ellipsis: >>> ops.einsum("...j -> ...", a) array([ 10, 35, 60, 85, 110]) >>> np.einsum(a, [..., 1], [...]) array([ 10, 35, 60, 85, 110]) Compute a matrix transpose or reorder any number of axes: >>> ops.einsum("ji", c) array([[0, 3], [1, 4], [2, 5]]) >>> ops.einsum("ij -> ji", c) array([[0, 3], [1, 4], [2, 5]]) >>> ops.einsum(c, [1, 0]) array([[0, 3], [1, 4], [2, 5]]) >>> ops.transpose(c) array([[0, 3], [1, 4], [2, 5]]) Matrix vector multiplication: >>> ops.einsum("ij, j", a, b) array([ 30, 80, 130, 180, 230]) >>> ops.einsum(a, [0, 1], b, [1]) array([ 30, 80, 130, 180, 230]) >>> ops.einsum("...j, j", a, b) array([ 30, 80, 130, 180, 230]) """ if any_symbolic_tensors(operands): return Einsum(subscripts).symbolic_call(*operands) return backend.numpy.einsum(subscripts, *operands) class Empty(Operation): def call(self, shape, dtype=None): return backend.numpy.empty(shape, dtype=dtype) def compute_output_spec(self, shape, dtype=None): dtype = dtype or backend.floatx() return KerasTensor(shape, dtype=dtype) @keras_export(["keras.ops.empty", "keras.ops.numpy.empty"]) def empty(shape, dtype=None): """Return a tensor of given shape and type filled with uninitialized data. Args: shape: Shape of the empty tensor. dtype: Desired data type of the empty tensor. Returns: The empty tensor. """ return backend.numpy.empty(shape, dtype=dtype) class Equal(Operation): def call(self, x1, x2): return backend.numpy.equal(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export(["keras.ops.equal", "keras.ops.numpy.equal"]) def equal(x1, x2): """Returns `(x1 == x2)` element-wise. Args: x1: Tensor to compare. x2: Tensor to compare. Returns: Output tensor, element-wise comparison of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Equal().symbolic_call(x1, x2) return backend.numpy.equal(x1, x2) class Exp(Operation): def call(self, x): return backend.numpy.exp(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(x.dtype) if "int" in dtype or dtype == "bool": dtype = backend.floatx() return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.exp", "keras.ops.numpy.exp"]) def exp(x): """Calculate the exponential of all elements in the input tensor. Args: x: Input tensor. Returns: Output tensor, element-wise exponential of `x`. """ if any_symbolic_tensors((x,)): return Exp().symbolic_call(x) return backend.numpy.exp(x) class ExpandDims(Operation): def __init__(self, axis): super().__init__() if isinstance(axis, list): raise ValueError( "The `axis` argument to `expand_dims` should be an integer, " f"but received a list: {axis}." ) self.axis = axis def call(self, x): return backend.numpy.expand_dims(x, self.axis) def compute_output_spec(self, x): output_shape = operation_utils.compute_expand_dims_output_shape( x.shape, self.axis ) sparse = getattr(x, "sparse", False) return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) @keras_export( [ "keras.ops.expand_dims", "keras.ops.numpy.expand_dims", ] ) def expand_dims(x, axis): """Expand the shape of a tensor. Insert a new axis at the `axis` position in the expanded tensor shape. Args: x: Input tensor. axis: Position in the expanded axes where the new axis (or axes) is placed. Returns: Output tensor with the number of dimensions increased. """ if any_symbolic_tensors((x,)): return ExpandDims(axis=axis).symbolic_call(x) return backend.numpy.expand_dims(x, axis) class Expm1(Operation): def call(self, x): return backend.numpy.expm1(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(x.dtype) if "int" in dtype or dtype == "bool": dtype = backend.floatx() sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.expm1", "keras.ops.numpy.expm1"]) def expm1(x): """Calculate `exp(x) - 1` for all elements in the tensor. Args: x: Input values. Returns: Output tensor, element-wise exponential minus one. """ if any_symbolic_tensors((x,)): return Expm1().symbolic_call(x) return backend.numpy.expm1(x) class Flip(Operation): def __init__(self, axis=None): super().__init__() self.axis = axis def call(self, x): return backend.numpy.flip(x, axis=self.axis) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype=x.dtype) @keras_export(["keras.ops.flip", "keras.ops.numpy.flip"]) def flip(x, axis=None): """Reverse the order of elements in the tensor along the given axis. The shape of the tensor is preserved, but the elements are reordered. Args: x: Input tensor. axis: Axis or axes along which to flip the tensor. The default, `axis=None`, will flip over all of the axes of the input tensor. Returns: Output tensor with entries of `axis` reversed. """ if any_symbolic_tensors((x,)): return Flip(axis=axis).symbolic_call(x) return backend.numpy.flip(x, axis=axis) class Floor(Operation): def call(self, x): return backend.numpy.floor(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) dtype = ( backend.floatx() if backend.standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.floor", "keras.ops.numpy.floor"]) def floor(x): """Return the floor of the input, element-wise. The floor of the scalar `x` is the largest integer `i`, such that `i <= x`. Args: x: Input tensor. Returns: Output tensor, element-wise floor of `x`. """ if any_symbolic_tensors((x,)): return Floor().symbolic_call(x) return backend.numpy.floor(x) class Full(Operation): def call(self, shape, fill_value, dtype=None): return backend.numpy.full(shape, fill_value, dtype=dtype) def compute_output_spec(self, shape, fill_value, dtype=None): dtype = dtype or backend.floatx() return KerasTensor(shape, dtype=dtype) @keras_export(["keras.ops.full", "keras.ops.numpy.full"]) def full(shape, fill_value, dtype=None): """Return a new tensor of given shape and type, filled with `fill_value`. Args: shape: Shape of the new tensor. fill_value: Fill value. dtype: Desired data type of the tensor. Returns: Output tensor. """ return backend.numpy.full(shape, fill_value, dtype=dtype) class FullLike(Operation): def call(self, x, fill_value, dtype=None): return backend.numpy.full_like(x, fill_value, dtype=dtype) def compute_output_spec(self, x, fill_value, dtype=None): dtype = dtype or x.dtype return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.full_like", "keras.ops.numpy.full_like"]) def full_like(x, fill_value, dtype=None): """Return a full tensor with the same shape and type as the given tensor. Args: x: Input tensor. fill_value: Fill value. dtype: Overrides data type of the result. Returns: Tensor of `fill_value` with the same shape and type as `x`. """ if any_symbolic_tensors((x,)): return FullLike().symbolic_call(x, fill_value, dtype=dtype) return backend.numpy.full_like(x, fill_value, dtype=dtype) class GetItem(Operation): def call(self, x, key): if isinstance(key, list): key = tuple(key) return x[key] def compute_output_spec(self, x, key): remaining_shape = list(x.shape) new_shape = [] if isinstance(key, int): remaining_key = [key] elif isinstance(key, tuple): remaining_key = list(key) else: raise ValueError( f"Unsupported key type for array slice. Recieved: `{key}`" ) num_ellipses = remaining_key.count(Ellipsis) if num_ellipses > 1: raise ValueError( f"Slice should only have one ellipsis. Recieved: `{key}`" ) elif num_ellipses == 0: # Add an implicit final ellipsis. remaining_key.append(Ellipsis) # Consume slice key element by element. while True: if not remaining_key: break subkey = remaining_key.pop(0) # Check for `newaxis` and `Ellipsis`. if subkey == Ellipsis: # Keep as many slices remain in our key, omitting `newaxis`. needed = len(remaining_key) - remaining_key.count(np.newaxis) consumed = len(remaining_shape) - needed new_shape += remaining_shape[:consumed] remaining_shape = remaining_shape[consumed:] continue # All frameworks follow numpy for newaxis. `np.newaxis == None`. if subkey == np.newaxis: new_shape.append(1) continue # At this point, we need to consume a new axis from the shape. if not remaining_shape: raise ValueError( f"Array has shape {x.shape} but slice " f"has to many indices. Recieved: `{key}`" ) length = remaining_shape.pop(0) if isinstance(subkey, int): if length is not None: index = subkey if subkey >= 0 else subkey + length if index < 0 or index >= length: raise ValueError( f"Array has shape {x.shape} but out-of-bounds " f"index {key} was requested." ) elif isinstance(subkey, slice): if length is not None: # python3 friendly way to compute a slice length. new_length = len(range(*subkey.indices(length))) new_shape.append(new_length) else: new_shape.append(length) else: raise ValueError( f"Unsupported key type for array slice. Recieved: `{key}`" ) return KerasTensor(tuple(new_shape), dtype=x.dtype) @keras_export(["keras.ops.get_item", "keras.ops.numpy.get_item"]) def get_item(x, key): """Return `x[key]`.""" if any_symbolic_tensors((x,)): return GetItem().symbolic_call(x, key) return x[key] class Greater(Operation): def call(self, x1, x2): return backend.numpy.greater(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export(["keras.ops.greater", "keras.ops.numpy.greater"]) def greater(x1, x2): """Return the truth value of `x1 > x2` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparison of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Greater().symbolic_call(x1, x2) return backend.numpy.greater(x1, x2) class GreaterEqual(Operation): def call(self, x1, x2): return backend.numpy.greater_equal(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export( [ "keras.ops.greater_equal", "keras.ops.numpy.greater_equal", ] ) def greater_equal(x1, x2): """Return the truth value of `x1 >= x2` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparison of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return GreaterEqual().symbolic_call(x1, x2) return backend.numpy.greater_equal(x1, x2) class Hstack(Operation): def call(self, xs): return backend.numpy.hstack(xs) def compute_output_spec(self, xs): first_shape = xs[0].shape total_size_on_axis = 0 dtypes_to_resolve = [] for x in xs: if not shape_equal(x.shape, first_shape, axis=[1], allow_none=True): raise ValueError( "Every value in `xs` must have the same shape except on " f"the `axis` dim. But found element of shape {x.shape}, " f"which is different from the first element's " f"shape {first_shape}." ) if total_size_on_axis is None or x.shape[1] is None: total_size_on_axis = None else: total_size_on_axis += x.shape[1] dtypes_to_resolve.append(getattr(x, "dtype", type(x))) output_shape = list(first_shape) output_shape[1] = total_size_on_axis dtype = dtypes.result_type(*dtypes_to_resolve) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.hstack", "keras.ops.numpy.hstack"]) def hstack(xs): """Stack tensors in sequence horizontally (column wise). This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors. Args: xs: Sequence of tensors. Returns: The tensor formed by stacking the given tensors. """ if any_symbolic_tensors((xs,)): return Hstack().symbolic_call(xs) return backend.numpy.hstack(xs) class Identity(Operation): def call(self, n, dtype=None): return backend.numpy.identity(n, dtype=dtype) def compute_output_spec(self, n, dtype=None): dtype = dtype or backend.floatx() return KerasTensor([n, n], dtype=dtype) @keras_export(["keras.ops.identity", "keras.ops.numpy.identity"]) def identity(n, dtype=None): """Return the identity tensor. The identity tensor is a square tensor with ones on the main diagonal and zeros elsewhere. Args: n: Number of rows (and columns) in the `n x n` output tensor. dtype: Data type of the output tensor. Returns: The identity tensor. """ return backend.numpy.identity(n, dtype=dtype) class Imag(Operation): def call(self, x): return backend.numpy.imag(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.imag", "keras.ops.numpy.imag"]) def imag(x): """Return the imaginary part of the complex argument. Args: x: Input tensor. Returns: The imaginary component of the complex argument. """ if any_symbolic_tensors((x,)): return Imag().symbolic_call(x) return backend.numpy.imag(x) class Isclose(Operation): def call(self, x1, x2): return backend.numpy.isclose(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export(["keras.ops.isclose", "keras.ops.numpy.isclose"]) def isclose(x1, x2): """Return whether two tensors are element-wise almost equal. Args: x1: First input tensor. x2: Second input tensor. Returns: Output boolean tensor. """ if any_symbolic_tensors((x1, x2)): return Isclose().symbolic_call(x1, x2) return backend.numpy.isclose(x1, x2) class Isfinite(Operation): def call(self, x): return backend.numpy.isfinite(x) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype="bool") @keras_export(["keras.ops.isfinite", "keras.ops.numpy.isfinite"]) def isfinite(x): """Return whether a tensor is finite, element-wise. Real values are finite when they are not NaN, not positive infinity, and not negative infinity. Complex values are finite when both their real and imaginary parts are finite. Args: x: Input tensor. Returns: Output boolean tensor. """ if any_symbolic_tensors((x,)): return Isfinite().symbolic_call(x) return backend.numpy.isfinite(x) class Isinf(Operation): def call(self, x): return backend.numpy.isinf(x) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype="bool") @keras_export(["keras.ops.isinf", "keras.ops.numpy.isinf"]) def isinf(x): """Test element-wise for positive or negative infinity. Args: x: Input tensor. Returns: Output boolean tensor. """ if any_symbolic_tensors((x,)): return Isinf().symbolic_call(x) return backend.numpy.isinf(x) class Isnan(Operation): def call(self, x): return backend.numpy.isnan(x) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype="bool") @keras_export(["keras.ops.isnan", "keras.ops.numpy.isnan"]) def isnan(x): """Test element-wise for NaN and return result as a boolean tensor. Args: x: Input tensor. Returns: Output boolean tensor. """ if any_symbolic_tensors((x,)): return Isnan().symbolic_call(x) return backend.numpy.isnan(x) class Less(Operation): def call(self, x1, x2): return backend.numpy.less(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export(["keras.ops.less", "keras.ops.numpy.less"]) def less(x1, x2): """Return the truth value of `x1 < x2` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparison of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Less().symbolic_call(x1, x2) return backend.numpy.less(x1, x2) class LessEqual(Operation): def call(self, x1, x2): return backend.numpy.less_equal(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export( [ "keras.ops.less_equal", "keras.ops.numpy.less_equal", ] ) def less_equal(x1, x2): """Return the truth value of `x1 <= x2` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparison of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return LessEqual().symbolic_call(x1, x2) return backend.numpy.less_equal(x1, x2) class Linspace(Operation): def __init__( self, num=50, endpoint=True, retstep=False, dtype=float, axis=0 ): super().__init__() self.num = num self.endpoint = endpoint self.retstep = retstep self.dtype = dtype self.axis = axis def call(self, start, stop): return backend.numpy.linspace( start, stop, num=self.num, endpoint=self.endpoint, retstep=self.retstep, dtype=self.dtype, axis=self.axis, ) def compute_output_spec(self, start, stop): start_shape = getattr(start, "shape", []) stop_shape = getattr(stop, "shape", []) output_shape = broadcast_shapes(start_shape, stop_shape) if self.axis == -1: output_shape = output_shape + [self.num] elif self.axis >= 0: output_shape = ( output_shape[: self.axis] + [self.num] + output_shape[self.axis :] ) else: output_shape = ( output_shape[: self.axis + 1] + [self.num] + output_shape[self.axis + 1 :] ) dtype = ( self.dtype if self.dtype is not None else getattr(start, "dtype", type(start)) ) dtype = backend.result_type(dtype, float) if self.retstep: return (KerasTensor(output_shape, dtype=dtype), None) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.linspace", "keras.ops.numpy.linspace"]) def linspace( start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0 ): """Return evenly spaced numbers over a specified interval. Returns `num` evenly spaced samples, calculated over the interval `[start, stop]`. The endpoint of the interval can optionally be excluded. Args: start: The starting value of the sequence. stop: The end value of the sequence, unless `endpoint` is set to `False`. In that case, the sequence consists of all but the last of `num + 1` evenly spaced samples, so that `stop` is excluded. Note that the step size changes when `endpoint` is `False`. num: Number of samples to generate. Defaults to `50`. Must be non-negative. endpoint: If `True`, `stop` is the last sample. Otherwise, it is not included. Defaults to`True`. retstep: If `True`, return `(samples, step)`, where `step` is the spacing between samples. dtype: The type of the output tensor. axis: The axis in the result to store the samples. Relevant only if start or stop are array-like. Defaults to `0`. Note: Torch backend does not support `axis` argument. Returns: A tensor of evenly spaced numbers. If `retstep` is `True`, returns `(samples, step)` """ if any_symbolic_tensors((start, stop)): return Linspace(num, endpoint, retstep, dtype, axis)(start, stop) return backend.numpy.linspace( start, stop, num=num, endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis, ) class Log(Operation): def call(self, x): return backend.numpy.log(x) def compute_output_spec(self, x): dtype = ( backend.floatx() if backend.standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.log", "keras.ops.numpy.log"]) def log(x): """Natural logarithm, element-wise. Args: x: Input tensor. Returns: Output tensor, element-wise natural logarithm of `x`. """ if any_symbolic_tensors((x,)): return Log().symbolic_call(x) return backend.numpy.log(x) class Log10(Operation): def call(self, x): return backend.numpy.log10(x) def compute_output_spec(self, x): dtype = ( backend.floatx() if backend.standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.log10", "keras.ops.numpy.log10"]) def log10(x): """Return the base 10 logarithm of the input tensor, element-wise. Args: x: Input tensor. Returns: Output tensor, element-wise base 10 logarithm of `x`. """ if any_symbolic_tensors((x,)): return Log10().symbolic_call(x) return backend.numpy.log10(x) class Log1p(Operation): def call(self, x): return backend.numpy.log1p(x) def compute_output_spec(self, x): dtype = ( backend.floatx() if backend.standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.log1p", "keras.ops.numpy.log1p"]) def log1p(x): """Returns the natural logarithm of one plus the `x`, element-wise. Calculates `log(1 + x)`. Args: x: Input tensor. Returns: Output tensor, element-wise natural logarithm of `1 + x`. """ if any_symbolic_tensors((x,)): return Log1p().symbolic_call(x) return backend.numpy.log1p(x) class Log2(Operation): def call(self, x): return backend.numpy.log2(x) def compute_output_spec(self, x): dtype = ( backend.floatx() if backend.standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.log2", "keras.ops.numpy.log2"]) def log2(x): """Base-2 logarithm of `x`, element-wise. Args: x: Input tensor. Returns: Output tensor, element-wise base-2 logarithm of `x`. """ if any_symbolic_tensors((x,)): return Log2().symbolic_call(x) return backend.numpy.log2(x) class Logaddexp(Operation): def call(self, x1, x2): return backend.numpy.logaddexp(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), float, ) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.logaddexp", "keras.ops.numpy.logaddexp"]) def logaddexp(x1, x2): """Logarithm of the sum of exponentiations of the inputs. Calculates `log(exp(x1) + exp(x2))`. Args: x1: Input tensor. x2: Input tensor. Returns: Output tensor, element-wise logarithm of the sum of exponentiations of the inputs. """ if any_symbolic_tensors((x1, x2)): return Logaddexp().symbolic_call(x1, x2) return backend.numpy.logaddexp(x1, x2) class LogicalAnd(Operation): def call(self, x1, x2): return backend.numpy.logical_and(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export( [ "keras.ops.logical_and", "keras.ops.numpy.logical_and", ] ) def logical_and(x1, x2): """Computes the element-wise logical AND of the given input tensors. Zeros are treated as `False` and non-zeros are treated as `True`. Args: x1: Input tensor. x2: Input tensor. Returns: Output tensor, element-wise logical AND of the inputs. """ if any_symbolic_tensors((x1, x2)): return LogicalAnd().symbolic_call(x1, x2) return backend.numpy.logical_and(x1, x2) class LogicalNot(Operation): def call(self, x): return backend.numpy.logical_not(x) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype="bool") @keras_export( [ "keras.ops.logical_not", "keras.ops.numpy.logical_not", ] ) def logical_not(x): """Computes the element-wise NOT of the given input tensor. Zeros are treated as `False` and non-zeros are treated as `True`. Args: x: Input tensor. Returns: Output tensor, element-wise logical NOT of the input. """ if any_symbolic_tensors((x,)): return LogicalNot().symbolic_call(x) return backend.numpy.logical_not(x) class LogicalOr(Operation): def call(self, x1, x2): return backend.numpy.logical_or(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export( [ "keras.ops.logical_or", "keras.ops.numpy.logical_or", ] ) def logical_or(x1, x2): """Computes the element-wise logical OR of the given input tensors. Zeros are treated as `False` and non-zeros are treated as `True`. Args: x1: Input tensor. x2: Input tensor. Returns: Output tensor, element-wise logical OR of the inputs. """ if any_symbolic_tensors((x1, x2)): return LogicalOr().symbolic_call(x1, x2) return backend.numpy.logical_or(x1, x2) class Logspace(Operation): def __init__(self, num=50, endpoint=True, base=10, dtype=float, axis=0): super().__init__() self.num = num self.endpoint = endpoint self.base = base self.dtype = dtype self.axis = axis def call(self, start, stop): return backend.numpy.logspace( start, stop, num=self.num, endpoint=self.endpoint, base=self.base, dtype=self.dtype, axis=self.axis, ) def compute_output_spec(self, start, stop): start_shape = getattr(start, "shape", []) stop_shape = getattr(stop, "shape", []) output_shape = broadcast_shapes(start_shape, stop_shape) if self.axis == -1: output_shape = output_shape + [self.num] elif self.axis >= 0: output_shape = ( output_shape[: self.axis] + [self.num] + output_shape[self.axis :] ) else: output_shape = ( output_shape[: self.axis + 1] + [self.num] + output_shape[self.axis + 1 :] ) dtype = ( self.dtype if self.dtype is not None else getattr(start, "dtype", type(start)) ) dtype = backend.result_type(dtype, float) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.logspace", "keras.ops.numpy.logspace"]) def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0): """Returns numbers spaced evenly on a log scale. In linear space, the sequence starts at `base ** start` and ends with `base ** stop` (see `endpoint` below). Args: start: The starting value of the sequence. stop: The final value of the sequence, unless `endpoint` is `False`. In that case, `num + 1` values are spaced over the interval in log-space, of which all but the last (a sequence of length `num`) are returned. num: Number of samples to generate. Defaults to `50`. endpoint: If `True`, `stop` is the last sample. Otherwise, it is not included. Defaults to`True`. base: The base of the log space. Defaults to `10`. dtype: The type of the output tensor. axis: The axis in the result to store the samples. Relevant only if start or stop are array-like. Note: Torch backend does not support `axis` argument. Returns: A tensor of evenly spaced samples on a log scale. """ if any_symbolic_tensors((start, stop)): return Logspace(num, endpoint, base, dtype, axis)(start, stop) return backend.numpy.logspace( start, stop, num=num, endpoint=endpoint, base=base, dtype=dtype, axis=axis, ) class Matmul(Operation): def call(self, x1, x2): return backend.numpy.matmul(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = operation_utils.compute_matmul_output_shape( x1_shape, x2_shape ) x1_sparse = getattr(x1, "sparse", True) x2_sparse = getattr(x2, "sparse", True) output_sparse = x1_sparse and x2_sparse x1_dtype = backend.standardize_dtype(getattr(x1, "dtype", type(x1))) x2_dtype = backend.standardize_dtype(getattr(x2, "dtype", type(x2))) if x1_dtype == "int8" and x2_dtype == "int8": dtype = "int32" else: dtype = dtypes.result_type(x1_dtype, x2_dtype) return KerasTensor(output_shape, dtype=dtype, sparse=output_sparse) @keras_export(["keras.ops.matmul", "keras.ops.numpy.matmul"]) def matmul(x1, x2): """Matrix product of two tensors. - If both tensors are 1-dimensional, the dot product (scalar) is returned. - If either tensor is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. - If the first tensor is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed. - If the second tensor is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed. Args: x1: First tensor. x2: Second tensor. Returns: Output tensor, matrix product of the inputs. """ if any_symbolic_tensors((x1, x2)): return Matmul().symbolic_call(x1, x2) return backend.numpy.matmul(x1, x2) class Max(Operation): def __init__(self, axis=None, keepdims=False, initial=None): super().__init__() if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims self.initial = initial def call(self, x): return backend.numpy.max( x, axis=self.axis, keepdims=self.keepdims, initial=self.initial ) def compute_output_spec(self, x): return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype, ) @keras_export(["keras.ops.max", "keras.ops.numpy.max"]) def max(x, axis=None, keepdims=False, initial=None): """Return the maximum of a tensor or maximum along an axis. Args: x: Input tensor. axis: Axis or axes along which to operate. By default, flattened input is used. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Defaults to`False`. initial: The minimum value of an output element. Defaults to`None`. Returns: Maximum of `x`. """ if any_symbolic_tensors((x,)): return Max(axis=axis, keepdims=keepdims, initial=initial).symbolic_call( x ) return backend.numpy.max(x, axis=axis, keepdims=keepdims, initial=initial) class Maximum(Operation): def call(self, x1, x2): return backend.numpy.maximum(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1_sparse = getattr(x1, "sparse", False) x2_sparse = getattr(x2, "sparse", False) output_sparse = x1_sparse and x2_sparse return KerasTensor( output_shape, dtype=output_dtype, sparse=output_sparse ) @keras_export(["keras.ops.maximum", "keras.ops.numpy.maximum"]) def maximum(x1, x2): """Element-wise maximum of `x1` and `x2`. Args: x1: First tensor. x2: Second tensor. Returns: Output tensor, element-wise maximum of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Maximum().symbolic_call(x1, x2) return backend.numpy.maximum(x1, x2) class Median(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): axis = [axis] self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.median(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): output_shape = reduce_shape( x.shape, axis=self.axis, keepdims=self.keepdims ) if backend.standardize_dtype(x.dtype) == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(x.dtype, float) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.median", "keras.ops.numpy.median"]) def median(x, axis=None, keepdims=False): """Compute the median along the specified axis. Args: x: Input tensor. axis: Axis or axes along which the medians are computed. Defaults to `axis=None` which is to compute the median(s) along a flattened version of the array. keepdims: If this is set to `True`, the axes which are reduce are left in the result as dimensions with size one. Returns: The output tensor. """ if any_symbolic_tensors((x,)): return Median(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.median(x, axis=axis, keepdims=keepdims) class Meshgrid(Operation): def __init__(self, indexing="xy"): super().__init__() if indexing not in ("xy", "ij"): raise ValueError( "Valid values for `indexing` are 'xy' and 'ij', " "but received {index}." ) self.indexing = indexing def call(self, *x): return backend.numpy.meshgrid(*x, indexing=self.indexing) def compute_output_spec(self, *x): output_shape = [] for xi in x: if len(xi.shape) == 0: size = 1 else: if None in xi.shape: size = None else: size = int(np.prod(xi.shape)) output_shape.append(size) if self.indexing == "ij": return [KerasTensor(output_shape) for _ in range(len(x))] tmp = output_shape[0] output_shape[0] = output_shape[1] output_shape[1] = tmp return [ KerasTensor(output_shape, dtype=xi.dtype) for _ in range(len(x)) ] @keras_export(["keras.ops.meshgrid", "keras.ops.numpy.meshgrid"]) def meshgrid(*x, indexing="xy"): """Creates grids of coordinates from coordinate vectors. Given `N` 1-D tensors `T0, T1, ..., TN-1` as inputs with corresponding lengths `S0, S1, ..., SN-1`, this creates an `N` N-dimensional tensors `G0, G1, ..., GN-1` each with shape `(S0, ..., SN-1)` where the output `Gi` is constructed by expanding `Ti` to the result shape. Args: x: 1-D tensors representing the coordinates of a grid. indexing: `"xy"` or `"ij"`. "xy" is cartesian; `"ij"` is matrix indexing of output. Defaults to `"xy"`. Returns: Sequence of N tensors. Example: >>> from keras import ops >>> x = ops.array([1, 2, 3]) >>> y = ops.array([4, 5, 6]) >>> grid_x, grid_y = ops.meshgrid(x, y, indexing="ij") >>> grid_x array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> grid_y array([[4, 5, 6], [4, 5, 6], [4, 5, 6]]) """ if any_symbolic_tensors(x): return Meshgrid(indexing=indexing).symbolic_call(*x) return backend.numpy.meshgrid(*x, indexing=indexing) class Min(Operation): def __init__(self, axis=None, keepdims=False, initial=None): super().__init__() if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims self.initial = initial def call(self, x): return backend.numpy.min( x, axis=self.axis, keepdims=self.keepdims, initial=self.initial ) def compute_output_spec(self, x): return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=x.dtype, ) @keras_export(["keras.ops.min", "keras.ops.numpy.min"]) def min(x, axis=None, keepdims=False, initial=None): """Return the minimum of a tensor or minimum along an axis. Args: x: Input tensor. axis: Axis or axes along which to operate. By default, flattened input is used. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Defaults to`False`. initial: The maximum value of an output element. Defaults to`None`. Returns: Minimum of `x`. """ if any_symbolic_tensors((x,)): return Min(axis=axis, keepdims=keepdims, initial=initial).symbolic_call( x ) return backend.numpy.min(x, axis=axis, keepdims=keepdims, initial=initial) class Minimum(Operation): def call(self, x1, x2): return backend.numpy.minimum(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) x1_sparse = getattr(x1, "sparse", False) x2_sparse = getattr(x2, "sparse", False) output_sparse = x1_sparse and x2_sparse return KerasTensor( output_shape, dtype=output_dtype, sparse=output_sparse ) @keras_export(["keras.ops.minimum", "keras.ops.numpy.minimum"]) def minimum(x1, x2): """Element-wise minimum of `x1` and `x2`. Args: x1: First tensor. x2: Second tensor. Returns: Output tensor, element-wise minimum of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Minimum().symbolic_call(x1, x2) return backend.numpy.minimum(x1, x2) class Mod(Operation): def call(self, x1, x2): return backend.numpy.mod(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) if output_dtype == "bool": output_dtype = "int32" return KerasTensor(output_shape, dtype=output_dtype) @keras_export(["keras.ops.mod", "keras.ops.numpy.mod"]) def mod(x1, x2): """Returns the element-wise remainder of division. Args: x1: First tensor. x2: Second tensor. Returns: Output tensor, element-wise remainder of division. """ if any_symbolic_tensors((x1, x2)): return Mod().symbolic_call(x1, x2) return backend.numpy.mod(x1, x2) class Moveaxis(Operation): def __init__(self, source, destination): super().__init__() if isinstance(source, int): self.source = [source] else: self.source = source if isinstance(destination, int): self.destination = [destination] else: self.destination = destination if len(self.source) != len(self.destination): raise ValueError( "`source` and `destination` arguments must have the same " f"number of elements, but received `source={source}` and " f"`destination={destination}`." ) def call(self, x): return backend.numpy.moveaxis(x, self.source, self.destination) def compute_output_spec(self, x): x_shape = list(x.shape) output_shape = [-1 for _ in range(len(x.shape))] for sc, dst in zip(self.source, self.destination): output_shape[dst] = x_shape[sc] x_shape[sc] = -1 i, j = 0, 0 while i < len(output_shape): while i < len(output_shape) and output_shape[i] != -1: # Find the first dim unset. i += 1 while j < len(output_shape) and x_shape[j] == -1: # Find the first dim not being passed. j += 1 if i == len(output_shape): break output_shape[i] = x_shape[j] i += 1 j += 1 return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.moveaxis", "keras.ops.numpy.moveaxis"]) def moveaxis(x, source, destination): """Move axes of a tensor to new positions. Other axes remain in their original order. Args: x: Tensor whose axes should be reordered. source: Original positions of the axes to move. These must be unique. destination: Destinations positions for each of the original axes. These must also be unique. Returns: Tensor with moved axes. """ if any_symbolic_tensors((x,)): return Moveaxis(source, destination).symbolic_call(x) return backend.numpy.moveaxis(x, source=source, destination=destination) class NanToNum(Operation): def call(self, x): return backend.numpy.nan_to_num(x) @keras_export( [ "keras.ops.nan_to_num", "keras.ops.numpy.nan_to_num", ] ) def nan_to_num(x): """Replace NaN with zero and infinity with large finite numbers. Args: x: Input data. Returns: `x`, with non-finite values replaced. """ return backend.numpy.nan_to_num(x) class Ndim(Operation): def call(self, x): return backend.numpy.ndim( x, ) def compute_output_spec(self, x): return KerasTensor([len(x.shape)]) @keras_export(["keras.ops.ndim", "keras.ops.numpy.ndim"]) def ndim(x): """Return the number of dimensions of a tensor. Args: x: Input tensor. Returns: The number of dimensions in `x`. """ if any_symbolic_tensors((x,)): return Ndim().symbolic_call(x) return backend.numpy.ndim(x) class Nonzero(Operation): def call(self, x): return backend.numpy.nonzero(x) @keras_export(["keras.ops.nonzero", "keras.ops.numpy.nonzero"]) def nonzero(x): """Return the indices of the elements that are non-zero. Args: x: Input tensor. Returns: Indices of elements that are non-zero. """ return backend.numpy.nonzero(x) class NotEqual(Operation): def call(self, x1, x2): return backend.numpy.not_equal(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export(["keras.ops.not_equal", "keras.ops.numpy.not_equal"]) def not_equal(x1, x2): """Return `(x1 != x2)` element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise comparsion of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return NotEqual().symbolic_call(x1, x2) return backend.numpy.not_equal(x1, x2) class OnesLike(Operation): def call(self, x, dtype=None): return backend.numpy.ones_like(x, dtype=dtype) def compute_output_spec(self, x, dtype=None): if dtype is None: dtype = x.dtype return KerasTensor(x.shape, dtype=dtype) @keras_export(["keras.ops.ones_like", "keras.ops.numpy.ones_like"]) def ones_like(x, dtype=None): """Return a tensor of ones with the same shape and type of `x`. Args: x: Input tensor. dtype: Overrides the data type of the result. Returns: A tensor of ones with the same shape and type as `x`. """ if any_symbolic_tensors((x,)): return OnesLike().symbolic_call(x, dtype=dtype) return backend.numpy.ones_like(x, dtype=dtype) class ZerosLike(Operation): def call(self, x, dtype=None): return backend.numpy.zeros_like(x, dtype=dtype) def compute_output_spec(self, x, dtype=None): if dtype is None: dtype = x.dtype return KerasTensor(x.shape, dtype=dtype) @keras_export( [ "keras.ops.zeros_like", "keras.ops.numpy.zeros_like", ] ) def zeros_like(x, dtype=None): """Return a tensor of zeros with the same shape and type as `x`. Args: x: Input tensor. dtype: Overrides the data type of the result. Returns: A tensor of zeros with the same shape and type as `x`. """ if any_symbolic_tensors((x,)): return ZerosLike().symbolic_call(x, dtype=dtype) return backend.numpy.zeros_like(x, dtype=dtype) class Outer(Operation): def call(self, x1, x2): return backend.numpy.outer(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", [1]) x2_shape = getattr(x2, "shape", [1]) if None in x1_shape: x1_flatten_shape = None else: x1_flatten_shape = int(np.prod(x1_shape)) if None in x2_shape: x2_flatten_shape = None else: x2_flatten_shape = int(np.prod(x2_shape)) output_shape = [x1_flatten_shape, x2_flatten_shape] output_dtype = backend.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) return KerasTensor(output_shape, dtype=output_dtype) @keras_export(["keras.ops.outer", "keras.ops.numpy.outer"]) def outer(x1, x2): """Compute the outer product of two vectors. Given two vectors `x1` and `x2`, the outer product is: ``` out[i, j] = x1[i] * x2[j] ``` Args: x1: First input tensor. x2: Second input tensor. Returns: Outer product of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Outer().symbolic_call(x1, x2) return backend.numpy.outer(x1, x2) class Pad(Operation): def __init__(self, pad_width, mode="constant"): super().__init__() self.pad_width = self._process_pad_width(pad_width) self.mode = mode def _process_pad_width(self, pad_width): if isinstance(pad_width, int): return ((pad_width, pad_width),) if isinstance(pad_width, (tuple, list)) and isinstance( pad_width[0], int ): return (pad_width,) first_len = len(pad_width[0]) for i, pw in enumerate(pad_width): if len(pw) != first_len: raise ValueError( "`pad_width` should be a list of tuples of length " f"1 or 2. Received: pad_width={pad_width}" ) if len(pw) == 1: pad_width[i] = (pw[0], pw[0]) return pad_width def call(self, x, constant_values=None): return backend.numpy.pad( x, pad_width=self.pad_width, mode=self.mode, constant_values=constant_values, ) def compute_output_spec(self, x, constant_values=None): output_shape = list(x.shape) if len(self.pad_width) == 1: pad_width = [self.pad_width[0] for _ in range(len(output_shape))] elif len(self.pad_width) == len(output_shape): pad_width = self.pad_width else: raise ValueError( "`pad_width` must have the same length as `x.shape`. " f"Received: pad_width={self.pad_width} " f"(of length {len(self.pad_width)}) and x.shape={x.shape} " f"(of length {len(x.shape)})" ) for i in range(len(output_shape)): if output_shape[i] is None: output_shape[i] = None else: output_shape[i] += pad_width[i][0] + pad_width[i][1] return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.pad", "keras.ops.numpy.pad"]) def pad(x, pad_width, mode="constant", constant_values=None): """Pad a tensor. Args: x: Tensor to pad. pad_width: Number of values padded to the edges of each axis. `((before_1, after_1), ...(before_N, after_N))` unique pad widths for each axis. `((before, after),)` yields same before and after pad for each axis. `(pad,)` or `int` is a shortcut for `before = after = pad` width for all axes. mode: One of `"constant"`, `"edge"`, `"linear_ramp"`, `"maximum"`, `"mean"`, `"median"`, `"minimum"`, `"reflect"`, `"symmetric"`, `"wrap"`, `"empty"`, `"circular"`. Defaults to`"constant"`. constant_values: value to pad with if `mode == "constant"`. Defaults to `0`. A `ValueError` is raised if not None and `mode != "constant"`. Note: Torch backend only supports modes `"constant"`, `"reflect"`, `"symmetric"` and `"circular"`. Only Torch backend supports `"circular"` mode. Note: Tensorflow backend only supports modes `"constant"`, `"reflect"` and `"symmetric"`. Returns: Padded tensor. """ return Pad(pad_width, mode=mode)(x, constant_values=constant_values) class Prod(Operation): def __init__(self, axis=None, keepdims=False, dtype=None): super().__init__() if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims self.dtype = dtype def call(self, x): return backend.numpy.prod( x, axis=self.axis, keepdims=self.keepdims, dtype=self.dtype, ) def compute_output_spec(self, x): if self.dtype is not None: dtype = self.dtype else: dtype = backend.result_type(x.dtype) if dtype == "bool": dtype = "int32" elif dtype in ("int8", "int16"): dtype = "int32" elif dtype in ("uint8", "uint16"): dtype = "uint32" # TODO: torch doesn't support uint32 if backend.backend() == "torch" and dtype == "uint32": dtype = "int32" return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=dtype, ) @keras_export(["keras.ops.prod", "keras.ops.numpy.prod"]) def prod(x, axis=None, keepdims=False, dtype=None): """Return the product of tensor elements over a given axis. Args: x: Input tensor. axis: Axis or axes along which a product is performed. The default, `axis=None`, will compute the product of all elements in the input tensor. keepdims: If this is set to `True`, the axes which are reduce are left in the result as dimensions with size one. dtype: Data type of the returned tensor. Returns: Product of elements of `x` over the given axis or axes. """ if any_symbolic_tensors((x,)): return Prod(axis=axis, keepdims=keepdims, dtype=dtype).symbolic_call(x) return backend.numpy.prod(x, axis=axis, keepdims=keepdims, dtype=dtype) class Quantile(Operation): def __init__(self, axis=None, method="linear", keepdims=False): super().__init__() if isinstance(axis, int): axis = [axis] self.axis = axis self.method = method self.keepdims = keepdims def call(self, x, q): return backend.numpy.quantile( x, q, axis=self.axis, keepdims=self.keepdims ) def compute_output_spec(self, x, q): output_shape = reduce_shape( x.shape, axis=self.axis, keepdims=self.keepdims ) if hasattr(q, "shape"): if len(q.shape) > 0: output_shape = (q.shape[0],) + output_shape if backend.standardize_dtype(x.dtype) == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(x.dtype, float) return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.quantile", "keras.ops.numpy.quantile"]) def quantile(x, q, axis=None, method="linear", keepdims=False): """Compute the q-th quantile(s) of the data along the specified axis. Args: x: Input tensor. q: Probability or sequence of probabilities for the quantiles to compute. Values must be between 0 and 1 inclusive. axis: Axis or axes along which the quantiles are computed. Defaults to `axis=None` which is to compute the quantile(s) along a flattened version of the array. method: A string specifies the method to use for estimating the quantile. Available methods are `"linear"`, `"lower"`, `"higher"`, `"midpoint"`, and `"nearest"`. Defaults to `"linear"`. If the desired quantile lies between two data points `i < j`: - `"linear"`: `i + (j - i) * fraction`, where fraction is the fractional part of the index surrounded by `i` and `j`. - `"lower"`: `i`. - `"higher"`: `j`. - `"midpoint"`: `(i + j) / 2` - `"nearest"`: `i` or `j`, whichever is nearest. keepdims: If this is set to `True`, the axes which are reduce are left in the result as dimensions with size one. Returns: The quantile(s). If `q` is a single probability and `axis=None`, then the result is a scalar. If multiple probabilies levels are given, first axis of the result corresponds to the quantiles. The other axes are the axes that remain after the reduction of `x`. """ if any_symbolic_tensors((x, q)): return Quantile( axis=axis, method=method, keepdims=keepdims ).symbolic_call(x, q) return backend.numpy.quantile( x, q, axis=axis, method=method, keepdims=keepdims ) class Ravel(Operation): def call(self, x): return backend.numpy.ravel(x) def compute_output_spec(self, x): if None in x.shape: output_shape = [ None, ] else: output_shape = [int(np.prod(x.shape))] return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.ravel", "keras.ops.numpy.ravel"]) def ravel(x): """Return a contiguous flattened tensor. A 1-D tensor, containing the elements of the input, is returned. Args: x: Input tensor. Returns: Output tensor. """ if any_symbolic_tensors((x,)): return Ravel().symbolic_call(x) return backend.numpy.ravel(x) class Real(Operation): def call(self, x): return backend.numpy.real(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.real", "keras.ops.numpy.real"]) def real(x): """Return the real part of the complex argument. Args: x: Input tensor. Returns: The real component of the complex argument. """ if any_symbolic_tensors((x,)): return Real().symbolic_call(x) return backend.numpy.real(x) class Reciprocal(Operation): def call(self, x): return backend.numpy.reciprocal(x) def compute_output_spec(self, x): return KerasTensor(x.shape) @keras_export( [ "keras.ops.reciprocal", "keras.ops.numpy.reciprocal", ] ) def reciprocal(x): """Return the reciprocal of the argument, element-wise. Calculates `1/x`. Args: x: Input tensor. Returns: Output tensor, element-wise reciprocal of `x`. """ if any_symbolic_tensors((x,)): return Reciprocal().symbolic_call(x) return backend.numpy.reciprocal(x) class Repeat(Operation): def __init__(self, repeats, axis=None): super().__init__() self.axis = axis self.repeats = repeats def call(self, x): return backend.numpy.repeat(x, self.repeats, axis=self.axis) def compute_output_spec(self, x): x_shape = list(x.shape) if self.axis is None: if None in x_shape: return KerasTensor([None], dtype=x.dtype) x_flatten_size = int(np.prod(x_shape)) if isinstance(self.repeats, int): output_shape = [x_flatten_size * self.repeats] else: output_shape = [int(np.sum(self.repeats))] return KerasTensor(output_shape, dtype=x.dtype) size_on_ax = x_shape[self.axis] output_shape = x_shape if isinstance(self.repeats, int): if size_on_ax is None: output_shape[self.axis] = None else: output_shape[self.axis] = size_on_ax * self.repeats else: output_shape[self.axis] = int(np.sum(self.repeats)) return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.repeat", "keras.ops.numpy.repeat"]) def repeat(x, repeats, axis=None): """Repeat each element of a tensor after themselves. Args: x: Input tensor. repeats: The number of repetitions for each element. axis: The axis along which to repeat values. By default, use the flattened input array, and return a flat output array. Returns: Output tensor. """ if any_symbolic_tensors((x,)): return Repeat(repeats, axis=axis).symbolic_call(x) return backend.numpy.repeat(x, repeats, axis=axis) class Reshape(Operation): def __init__(self, newshape): super().__init__() self.newshape = newshape def call(self, x): return backend.numpy.reshape(x, self.newshape) def compute_output_spec(self, x): output_shape = operation_utils.compute_reshape_output_shape( x.shape, self.newshape, "newshape" ) sparse = getattr(x, "sparse", False) return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.reshape", "keras.ops.numpy.reshape"]) def reshape(x, newshape): """Gives a new shape to a tensor without changing its data. Args: x: Input tensor. newshape: The new shape should be compatible with the original shape. One shape dimension can be -1 in which case the value is inferred from the length of the array and remaining dimensions. Returns: The reshaped tensor. """ if any_symbolic_tensors((x,)): return Reshape(newshape).symbolic_call(x) return backend.numpy.reshape(x, newshape) class Roll(Operation): def __init__(self, shift, axis=None): super().__init__() self.shift = shift self.axis = axis def call(self, x): return backend.numpy.roll(x, self.shift, self.axis) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype=x.dtype) @keras_export(["keras.ops.roll", "keras.ops.numpy.roll"]) def roll(x, shift, axis=None): """Roll tensor elements along a given axis. Elements that roll beyond the last position are re-introduced at the first. Args: x: Input tensor. shift: The number of places by which elements are shifted. axis: The axis along which elements are shifted. By default, the array is flattened before shifting, after which the original shape is restored. Returns: Output tensor. """ if any_symbolic_tensors((x,)): return Roll(shift, axis=axis).symbolic_call(x) return backend.numpy.roll(x, shift, axis=axis) class Round(Operation): def __init__(self, decimals=0): super().__init__() self.decimals = decimals def call(self, x): return backend.numpy.round(x, self.decimals) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.round", "keras.ops.numpy.round"]) def round(x, decimals=0): """Evenly round to the given number of decimals. Args: x: Input tensor. decimals: Number of decimal places to round to. Defaults to `0`. Returns: Output tensor. """ if any_symbolic_tensors((x,)): return Round(decimals).symbolic_call(x) return backend.numpy.round(x, decimals) class Sign(Operation): def call(self, x): return backend.numpy.sign(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.sign", "keras.ops.numpy.sign"]) def sign(x): """Returns a tensor with the signs of the elements of `x`. Args: x: Input tensor. Returns: Output tensor of same shape as `x`. """ if any_symbolic_tensors((x,)): return Sign().symbolic_call(x) return backend.numpy.sign(x) class Sin(Operation): def call(self, x): return backend.numpy.sin(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.sin", "keras.ops.numpy.sin"]) def sin(x): """Trigonomeric sine, element-wise. Arguments: x: Input tensor. Returns: Output tensor of same shape as `x`. """ if any_symbolic_tensors((x,)): return Sin().symbolic_call(x) return backend.numpy.sin(x) class Sinh(Operation): def call(self, x): return backend.numpy.sinh(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.sinh", "keras.ops.numpy.sinh"]) def sinh(x): """Hyperbolic sine, element-wise. Arguments: x: Input tensor. Returns: Output tensor of same shape as `x`. """ if any_symbolic_tensors((x,)): return Sinh().symbolic_call(x) return backend.numpy.sinh(x) class Size(Operation): def call(self, x): return backend.numpy.size(x) def compute_output_spec(self, x): return KerasTensor([], dtype="int32") @keras_export(["keras.ops.size", "keras.ops.numpy.size"]) def size(x): """Return the number of elements in a tensor. Args: x: Input tensor. Returns: Number of elements in `x`. """ if any_symbolic_tensors((x,)): return Size().symbolic_call(x) return backend.numpy.size(x) class Sort(Operation): def __init__(self, axis=-1): super().__init__() self.axis = axis def call(self, x): return backend.numpy.sort(x, axis=self.axis) def compute_output_spec(self, x): return KerasTensor(x.shape, x.dtype) @keras_export(["keras.ops.sort", "keras.ops.numpy.sort"]) def sort(x, axis=-1): """Sorts the elements of `x` along a given axis in ascending order. Args: x: Input tensor. axis: Axis along which to sort. If `None`, the tensor is flattened before sorting. Defaults to `-1`; the last axis. Returns: Sorted tensor. """ if any_symbolic_tensors((x,)): return Sort(axis=axis).symbolic_call(x) return backend.numpy.sort(x, axis=axis) class Split(Operation): def __init__(self, indices_or_sections, axis=0): super().__init__() if not isinstance(indices_or_sections, int): indices_or_sections = tuple(indices_or_sections) self.indices_or_sections = indices_or_sections self.axis = axis def call(self, x): return backend.numpy.split(x, self.indices_or_sections, axis=self.axis) def compute_output_spec(self, x): x_shape = list(x.shape) x_size_on_axis = x_shape[self.axis] if isinstance(self.indices_or_sections, int): if x_size_on_axis is None: x_shape[self.axis] = None return [ KerasTensor(x_shape, dtype=x.dtype) for _ in range(self.indices_or_sections) ] if np.mod(x_size_on_axis, self.indices_or_sections) != 0: raise ValueError( "`x` size on given `axis` must be dividible by " "`indices_or_sections` when `indices_or_sections` is an " f"int. But received {x_size_on_axis} and " f"{self.indices_or_sections}." ) size = x_size_on_axis // self.indices_or_sections x_shape[self.axis] = size return [ KerasTensor(x_shape, dtype=x.dtype) for _ in range(self.indices_or_sections) ] indices_or_sections = (0, *self.indices_or_sections, x_size_on_axis) output_size = np.diff(indices_or_sections) outputs = [] for i in range(len(output_size)): output_shape = list(x_shape) output_shape[self.axis] = int(output_size[i]) outputs.append(KerasTensor(output_shape, dtype=x.dtype)) return outputs @keras_export(["keras.ops.split", "keras.ops.numpy.split"]) def split(x, indices_or_sections, axis=0): """Split a tensor into chunks. Args: x: Input tensor. indices_or_sections: If an integer, N, the tensor will be split into N equal sections along `axis`. If a 1-D array of sorted integers, the entries indicate indices at which the tensor will be split along `axis`. axis: Axis along which to split. Defaults to `0`. Note: A split does not have to result in equal division when using Torch backend. Returns: A list of tensors. """ if any_symbolic_tensors((x,)): return Split(indices_or_sections, axis=axis).symbolic_call(x) return backend.numpy.split(x, indices_or_sections, axis=axis) class Stack(Operation): def __init__(self, axis=0): super().__init__() self.axis = axis def call(self, xs): return backend.numpy.stack(xs, axis=self.axis) def compute_output_spec(self, xs): first_shape = xs[0].shape dtypes_to_resolve = [] for x in xs: if not shape_equal(x.shape, first_shape, axis=[], allow_none=True): raise ValueError( "Every value in `xs` must have the same shape. But found " f"element of shape {x.shape}, which is different from the " f"first element's shape {first_shape}." ) dtypes_to_resolve.append(getattr(x, "dtype", type(x))) size_on_axis = len(xs) output_shape = list(first_shape) if self.axis == -1: output_shape = output_shape + [size_on_axis] elif self.axis >= 0: output_shape.insert(self.axis, size_on_axis) else: output_shape.insert(self.axis + 1, size_on_axis) output_dtype = dtypes.result_type(*dtypes_to_resolve) return KerasTensor(output_shape, dtype=output_dtype) @keras_export(["keras.ops.stack", "keras.ops.numpy.stack"]) def stack(x, axis=0): """Join a sequence of tensors along a new axis. The `axis` parameter specifies the index of the new axis in the dimensions of the result. Args: x: A sequence of tensors. axis: Axis along which to stack. Defaults to `0`. Returns: The stacked tensor. """ if any_symbolic_tensors((x,)): return Stack(axis=axis).symbolic_call(x) return backend.numpy.stack(x, axis=axis) class Std(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.std(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): output_dtype = backend.standardize_dtype(x.dtype) if "int" in output_dtype or output_dtype == "bool": output_dtype = backend.floatx() return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=output_dtype, ) @keras_export(["keras.ops.std", "keras.ops.numpy.std"]) def std(x, axis=None, keepdims=False): """Compute the standard deviation along the specified axis. Args: x: Input tensor. axis: Axis along which to compute standard deviation. Default is to compute the standard deviation of the flattened tensor. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Returns: Output tensor containing the standard deviation values. """ if any_symbolic_tensors((x,)): return Std(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.std(x, axis=axis, keepdims=keepdims) class Swapaxes(Operation): def __init__(self, axis1, axis2): super().__init__() self.axis1 = axis1 self.axis2 = axis2 def call(self, x): return backend.numpy.swapaxes(x, self.axis1, self.axis2) def compute_output_spec(self, x): x_shape = list(x.shape) tmp = x_shape[self.axis1] x_shape[self.axis1] = x_shape[self.axis2] x_shape[self.axis2] = tmp return KerasTensor(x_shape, dtype=x.dtype) @keras_export(["keras.ops.swapaxes", "keras.ops.numpy.swapaxes"]) def swapaxes(x, axis1, axis2): """Interchange two axes of a tensor. Args: x: Input tensor. axis1: First axis. axis2: Second axis. Returns: A tensor with the axes swapped. """ if any_symbolic_tensors((x,)): return Swapaxes(axis1, axis2).symbolic_call(x) return backend.numpy.swapaxes(x, axis1=axis1, axis2=axis2) class Take(Operation): def __init__(self, axis=None): super().__init__() self.axis = axis def call(self, x, indices): return backend.numpy.take(x, indices, axis=self.axis) def compute_output_spec(self, x, indices): x_shape = list(x.shape) if isinstance(indices, KerasTensor): indices_shape = list(indices.shape) else: indices_shape = list(getattr(np.array(indices), "shape", [])) if self.axis is None: return KerasTensor(indices_shape, dtype=x.dtype) # make sure axis is non-negative axis = len(x_shape) + self.axis if self.axis < 0 else self.axis output_shape = x_shape[:axis] + indices_shape + x_shape[axis + 1 :] return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.take", "keras.ops.numpy.take"]) def take(x, indices, axis=None): """Take elements from a tensor along an axis. Args: x: Source tensor. indices: The indices of the values to extract. axis: The axis over which to select values. By default, the flattened input tensor is used. Returns: The corresponding tensor of values. """ if any_symbolic_tensors((x, indices)): return Take(axis=axis).symbolic_call(x, indices) return backend.numpy.take(x, indices, axis=axis) class TakeAlongAxis(Operation): def __init__(self, axis=None): super().__init__() self.axis = axis def call(self, x, indices): return backend.numpy.take_along_axis(x, indices, axis=self.axis) def compute_output_spec(self, x, indices): x_shape = list(x.shape) indices_shape = list(indices.shape) if self.axis is None: x_shape = [None] if None in x_shape else [int(np.prod(x_shape))] if len(x_shape) != len(indices_shape): raise ValueError( "`x` and `indices` must have the same number of dimensions, " f"but receive shape {x_shape} and {indices_shape}." ) del x_shape[self.axis] del indices_shape[self.axis] output_shape = broadcast_shapes(x_shape, indices_shape) size_on_axis = indices.shape[self.axis] if self.axis == -1: output_shape = output_shape + [size_on_axis] elif self.axis >= 0: output_shape.insert(self.axis, size_on_axis) else: output_shape.insert(self.axis + 1, size_on_axis) return KerasTensor(output_shape, dtype=x.dtype) @keras_export( [ "keras.ops.take_along_axis", "keras.ops.numpy.take_along_axis", ] ) def take_along_axis(x, indices, axis=None): """Select values from `x` at the 1-D `indices` along the given axis. Args: x: Source tensor. indices: The indices of the values to extract. axis: The axis over which to select values. By default, the flattened input tensor is used. Returns: The corresponding tensor of values. """ if any_symbolic_tensors((x, indices)): return TakeAlongAxis(axis=axis).symbolic_call(x, indices) return backend.numpy.take_along_axis(x, indices, axis=axis) class Tan(Operation): def call(self, x): return backend.numpy.tan(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.tan", "keras.ops.numpy.tan"]) def tan(x): """Compute tangent, element-wise. Args: x: Input tensor. Returns: Output tensor of same shape as `x`. """ if any_symbolic_tensors((x,)): return Tan().symbolic_call(x) return backend.numpy.tan(x) class Tanh(Operation): def call(self, x): return backend.numpy.tanh(x) def compute_output_spec(self, x): dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx())) if dtype == "int64": dtype = backend.floatx() else: dtype = dtypes.result_type(dtype, float) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.tanh", "keras.ops.numpy.tanh"]) def tanh(x): """Hyperbolic tangent, element-wise. Arguments: x: Input tensor. Returns: Output tensor of same shape as `x`. """ if any_symbolic_tensors((x,)): return Tanh().symbolic_call(x) return backend.numpy.tanh(x) class Tensordot(Operation): def __init__(self, axes=2): super().__init__() self.axes = axes def call(self, x1, x2): return backend.numpy.tensordot(x1, x2, axes=self.axes) def compute_output_spec(self, x1, x2): x1_shape = list(getattr(x1, "shape", [])) x2_shape = list(getattr(x2, "shape", [])) dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) if not isinstance(self.axes, int): x1_select_shape = [x1_shape[ax] for ax in self.axes[0]] x2_select_shape = [x2_shape[ax] for ax in self.axes[1]] if not shape_equal( x1_select_shape, x2_select_shape, allow_none=True ): raise ValueError( "Shape mismatch on `x1[axes[0]]` and `x2[axes[1]]`, " f"received {x1_select_shape} and {x2_select_shape}." ) for ax in self.axes[0]: x1_shape[ax] = -1 for ax in self.axes[1]: x2_shape[ax] = -1 x1_shape = list(filter((-1).__ne__, x1_shape)) x2_shape = list(filter((-1).__ne__, x2_shape)) output_shape = x1_shape + x2_shape return KerasTensor(output_shape, dtype=dtype) if self.axes <= 0: output_shape = x1_shape + x2_shape else: output_shape = x1_shape[: -self.axes] + x2_shape[self.axes :] return KerasTensor(output_shape, dtype=dtype) @keras_export(["keras.ops.tensordot", "keras.ops.numpy.tensordot"]) def tensordot(x1, x2, axes=2): """Compute the tensor dot product along specified axes. Args: x1: First tensor. x2: Second tensor. axes: - If an integer, N, sum over the last N axes of `x1` and the first N axes of `x2` in order. The sizes of the corresponding axes must match. - Or, a list of axes to be summed over, first sequence applying to `x1`, second to `x2`. Both sequences must be of the same length. Returns: The tensor dot product of the inputs. """ if any_symbolic_tensors((x1, x2)): return Tensordot(axes=axes).symbolic_call(x1, x2) return backend.numpy.tensordot(x1, x2, axes=axes) class Tile(Operation): def __init__(self, repeats): super().__init__() self.repeats = repeats def call(self, x): return backend.numpy.tile(x, self.repeats) def compute_output_spec(self, x): x_shape = list(x.shape) repeats = self.repeats if len(x_shape) > len(repeats): repeats = [1] * (len(x_shape) - len(repeats)) + repeats else: x_shape = [1] * (len(repeats) - len(x_shape)) + x_shape output_shape = [] for x_size, repeat in zip(x_shape, repeats): if x_size is None: output_shape.append(None) else: output_shape.append(x_size * repeat) return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.tile", "keras.ops.numpy.tile"]) def tile(x, repeats): """Repeat `x` the number of times given by `repeats`. If `repeats` has length `d`, the result will have dimension of `max(d, x.ndim)`. If `x.ndim < d`, `x` is promoted to be d-dimensional by prepending new axes. If `x.ndim > d`, `repeats` is promoted to `x.ndim` by prepending 1's to it. Args: x: Input tensor. repeats: The number of repetitions of `x` along each axis. Returns: The tiled output tensor. """ if any_symbolic_tensors((x,)): return Tile( repeats, ).symbolic_call(x) return backend.numpy.tile(x, repeats) class Trace(Operation): def __init__(self, offset=0, axis1=0, axis2=1): super().__init__() self.offset = offset self.axis1 = axis1 self.axis2 = axis2 def call(self, x): return backend.numpy.trace( x, offset=self.offset, axis1=self.axis1, axis2=self.axis2 ) def compute_output_spec(self, x): x_shape = list(x.shape) x_shape[self.axis1] = -1 x_shape[self.axis2] = -1 output_shape = list(filter((-1).__ne__, x_shape)) output_dtype = backend.standardize_dtype(x.dtype) if output_dtype not in ("int64", "uint32", "uint64"): output_dtype = dtypes.result_type(output_dtype, "int32") return KerasTensor(output_shape, dtype=output_dtype) @keras_export(["keras.ops.trace", "keras.ops.numpy.trace"]) def trace(x, offset=0, axis1=0, axis2=1): """Return the sum along diagonals of the tensor. If `x` is 2-D, the sum along its diagonal with the given offset is returned, i.e., the sum of elements `x[i, i+offset]` for all `i`. If a has more than two dimensions, then the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-arrays whose traces are returned. The shape of the resulting tensor is the same as that of `x` with `axis1` and `axis2` removed. Args: x: Input tensor. offset: Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to `0`. axis1: Axis to be used as the first axis of the 2-D sub-arrays. Defaults to `0`.(first axis). axis2: Axis to be used as the second axis of the 2-D sub-arrays. Defaults to `1` (second axis). Returns: If `x` is 2-D, the sum of the diagonal is returned. If `x` has larger dimensions, then a tensor of sums along diagonals is returned. """ if any_symbolic_tensors((x,)): return Trace(offset, axis1, axis2).symbolic_call(x) return backend.numpy.trace(x, offset=offset, axis1=axis1, axis2=axis2) class Tri(Operation): def call(self, N, M=None, k=0, dtype=None): return backend.numpy.tri(N, M=M, k=k, dtype=dtype) def compute_output_spec(self, N, M=None, k=0, dtype=None): if M is None: M = N dtype = dtype or backend.floatx() return KerasTensor((N, M), dtype=dtype) @keras_export(["keras.ops.tri", "keras.ops.numpy.tri"]) def tri(N, M=None, k=0, dtype=None): """Return a tensor with ones at and below a diagonal and zeros elsewhere. Args: N: Number of rows in the tensor. M: Number of columns in the tensor. k: The sub-diagonal at and below which the array is filled. `k = 0` is the main diagonal, while `k < 0` is below it, and `k > 0` is above. The default is 0. dtype: Data type of the returned tensor. The default is "float32". Returns: Tensor with its lower triangle filled with ones and zeros elsewhere. `T[i, j] == 1` for `j <= i + k`, 0 otherwise. """ return backend.numpy.tri(N, M=M, k=k, dtype=dtype) class Tril(Operation): def __init__(self, k=0): super().__init__() self.k = k def call(self, x): return backend.numpy.tril(x, k=self.k) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype=x.dtype) @keras_export(["keras.ops.tril", "keras.ops.numpy.tril"]) def tril(x, k=0): """Return lower triangle of a tensor. For tensors with `ndim` exceeding 2, `tril` will apply to the final two axes. Args: x: Input tensor. k: Diagonal above which to zero elements. Defaults to `0`. the main diagonal. `k < 0` is below it, and `k > 0` is above it. Returns: Lower triangle of `x`, of same shape and data type as `x`. """ if any_symbolic_tensors((x,)): return Tril(k=k).symbolic_call(x) return backend.numpy.tril(x, k=k) class Triu(Operation): def __init__(self, k=0): super().__init__() self.k = k def call(self, x): return backend.numpy.triu(x, k=self.k) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype=x.dtype) @keras_export(["keras.ops.triu", "keras.ops.numpy.triu"]) def triu(x, k=0): """Return upper triangle of a tensor. For tensors with `ndim` exceeding 2, `triu` will apply to the final two axes. Args: x: Input tensor. k: Diagonal below which to zero elements. Defaults to `0`. the main diagonal. `k < 0` is below it, and `k > 0` is above it. Returns: Upper triangle of `x`, of same shape and data type as `x`. """ if any_symbolic_tensors((x,)): return Triu(k=k).symbolic_call(x) return backend.numpy.triu(x, k=k) class Vdot(Operation): def call(self, x1, x2): return backend.numpy.vdot(x1, x2) def compute_output_spec(self, x1, x2): dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) return KerasTensor([], dtype=dtype) @keras_export(["keras.ops.vdot", "keras.ops.numpy.vdot"]) def vdot(x1, x2): """Return the dot product of two vectors. If the first argument is complex, the complex conjugate of the first argument is used for the calculation of the dot product. Multidimensional tensors are flattened before the dot product is taken. Args: x1: First input tensor. If complex, its complex conjugate is taken before calculation of the dot product. x2: Second input tensor. Returns: Output tensor. """ if any_symbolic_tensors((x1, x2)): return Vdot().symbolic_call(x1, x2) return backend.numpy.vdot(x1, x2) class Vstack(Operation): def call(self, xs): return backend.numpy.vstack(xs) def compute_output_spec(self, xs): first_shape = xs[0].shape total_size_on_axis = 0 dtypes_to_resolve = [] for x in xs: if not shape_equal(x.shape, first_shape, axis=[0], allow_none=True): raise ValueError( "Every value in `xs` must have the same shape except on " f"the `axis` dim. But found element of shape {x.shape}, " f"which is different from the first element's " f"shape {first_shape}." ) if total_size_on_axis is None or x.shape[0] is None: total_size_on_axis = None else: total_size_on_axis += x.shape[0] dtypes_to_resolve.append(getattr(x, "dtype", type(x))) output_shape = list(first_shape) output_shape[0] = total_size_on_axis output_dtype = dtypes.result_type(*dtypes_to_resolve) return KerasTensor(output_shape, output_dtype) @keras_export(["keras.ops.vstack", "keras.ops.numpy.vstack"]) def vstack(xs): """Stack tensors in sequence vertically (row wise). Args: xs: Sequence of tensors. Returns: Tensor formed by stacking the given tensors. """ if any_symbolic_tensors((xs,)): return Vstack().symbolic_call(xs) return backend.numpy.vstack(xs) class Where(Operation): def call(self, condition, x1=None, x2=None): return backend.numpy.where(condition, x1, x2) def compute_output_spec(self, condition, x1, x2): condition_shape = getattr(condition, "shape", []) x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(condition_shape, x1_shape) output_shape = broadcast_shapes(output_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1) if x1 is not None else "int"), getattr(x2, "dtype", type(x2) if x2 is not None else "int"), ) return KerasTensor(output_shape, dtype=output_dtype) @keras_export(["keras.ops.where", "keras.ops.numpy.where"]) def where(condition, x1=None, x2=None): """Return elements chosen from `x1` or `x2` depending on `condition`. Args: condition: Where `True`, yield `x1`, otherwise yield `x2`. x1: Values from which to choose when `condition` is `True`. x2: Values from which to choose when `condition` is `False`. Returns: A tensor with elements from `x1` where `condition` is `True`, and elements from `x2` where `condition` is `False`. """ if (x1 is None and x2 is not None) or (x1 is not None and x2 is None): raise ValueError( "`x1` and `x2` either both should be `None`" " or both should have non-None value." ) if any_symbolic_tensors((condition, x1, x2)): return Where().symbolic_call(condition, x1, x2) return backend.numpy.where(condition, x1, x2) class Subtract(Operation): def call(self, x1, x2): return backend.numpy.subtract(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) x1_sparse = getattr(x1, "sparse", False) x2_sparse = getattr(x2, "sparse", False) output_sparse = x1_sparse and x2_sparse dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) return KerasTensor(output_shape, dtype=dtype, sparse=output_sparse) @keras_export(["keras.ops.subtract", "keras.ops.numpy.subtract"]) def subtract(x1, x2): """Subtract arguments element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise difference of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Subtract().symbolic_call(x1, x2) return backend.numpy.subtract(x1, x2) class Multiply(Operation): def call(self, x1, x2): return backend.numpy.multiply(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) x1_sparse = getattr(x1, "sparse", True) x2_sparse = getattr(x2, "sparse", True) output_sparse = x1_sparse or x2_sparse dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) return KerasTensor(output_shape, dtype=dtype, sparse=output_sparse) @keras_export(["keras.ops.multiply", "keras.ops.numpy.multiply"]) def multiply(x1, x2): """Multiply arguments element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, element-wise product of `x1` and `x2`. """ if any_symbolic_tensors((x1, x2)): return Multiply().symbolic_call(x1, x2) return backend.numpy.multiply(x1, x2) class Divide(Operation): def call(self, x1, x2): return backend.numpy.divide(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), float, ) x1_sparse = getattr(x1, "sparse", False) x2_sparse = getattr(x2, "sparse", False) output_sparse = x1_sparse and not x2_sparse return KerasTensor( output_shape, dtype=output_dtype, sparse=output_sparse ) @keras_export(["keras.ops.divide", "keras.ops.numpy.divide"]) def divide(x1, x2): """Divide arguments element-wise. `keras.ops.true_divide` is an alias for this function. Args: x1: First input tensor. x2: Second input tensor. Returns: Output tensor, the quotient `x1/x2`, element-wise. """ if any_symbolic_tensors((x1, x2)): return Divide().symbolic_call(x1, x2) return backend.numpy.divide(x1, x2) class DivideNoNan(Operation): def call(self, x1, x2): return backend.numpy.divide_no_nan(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), float, ) x1_sparse = getattr(x1, "sparse", False) x2_sparse = getattr(x2, "sparse", False) output_sparse = x1_sparse and not x2_sparse return KerasTensor( output_shape, dtype=output_dtype, sparse=output_sparse ) @keras_export(["keras.ops.divide_no_nan", "keras.ops.numpy.divide_no_nan"]) def divide_no_nan(x1, x2): """Safe element-wise division which returns 0 where the denominator is 0. Args: x1: First input tensor. x2: Second input tensor. Returns: The quotient `x1/x2`, element-wise, with zero where x2 is zero. """ if any_symbolic_tensors((x1, x2)): return DivideNoNan().symbolic_call(x1, x2) return backend.numpy.divide_no_nan(x1, x2) class TrueDivide(Operation): def call(self, x1, x2): return backend.numpy.true_divide(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), float, ) x1_sparse = getattr(x1, "sparse", False) x2_sparse = getattr(x2, "sparse", False) output_sparse = x1_sparse and not x2_sparse return KerasTensor( output_shape, dtype=output_dtype, sparse=output_sparse ) @keras_export( [ "keras.ops.true_divide", "keras.ops.numpy.true_divide", ] ) def true_divide(x1, x2): """Alias for `keras.ops.divide`.""" if any_symbolic_tensors((x1, x2)): return TrueDivide().symbolic_call(x1, x2) return backend.numpy.true_divide(x1, x2) class Power(Operation): def call(self, x1, x2): return backend.numpy.power(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)) ) return KerasTensor(output_shape, dtype=output_dtype) @keras_export(["keras.ops.power", "keras.ops.numpy.power"]) def power(x1, x2): """First tensor elements raised to powers from second tensor, element-wise. Args: x1: The bases. x2: The exponents. Returns: Output tensor, the bases in `x1` raised to the exponents in `x2`. """ if any_symbolic_tensors((x1, x2)): return Power().symbolic_call(x1, x2) return backend.numpy.power(x1, x2) class Negative(Operation): def call(self, x): return backend.numpy.negative(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.negative", "keras.ops.numpy.negative"]) def negative(x): """Numerical negative, element-wise. Args: x: Input tensor. Returns: Output tensor, `y = -x`. """ if any_symbolic_tensors((x,)): return Negative().symbolic_call(x) return backend.numpy.negative(x) class Square(Operation): def call(self, x): return backend.numpy.square(x) def compute_output_spec(self, x): sparse = getattr(x, "sparse", False) dtype = backend.standardize_dtype(x.dtype) if dtype == "bool": dtype = "int32" return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.square", "keras.ops.numpy.square"]) def square(x): """Return the element-wise square of the input. Args: x: Input tensor. Returns: Output tensor, the square of `x`. """ if any_symbolic_tensors((x,)): return Square().symbolic_call(x) return backend.numpy.square(x) class Sqrt(Operation): def call(self, x): x = backend.convert_to_tensor(x) return backend.numpy.sqrt(x) def compute_output_spec(self, x): dtype = ( backend.floatx() if backend.standardize_dtype(x.dtype) == "int64" else dtypes.result_type(x.dtype, float) ) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype=dtype, sparse=sparse) @keras_export(["keras.ops.sqrt", "keras.ops.numpy.sqrt"]) def sqrt(x): """Return the non-negative square root of a tensor, element-wise. Args: x: Input tensor. Returns: Output tensor, the non-negative square root of `x`. """ if any_symbolic_tensors((x,)): return Sqrt().symbolic_call(x) x = backend.convert_to_tensor(x) return backend.numpy.sqrt(x) class Squeeze(Operation): def __init__(self, axis=None): super().__init__() self.axis = axis def call(self, x): return backend.numpy.squeeze(x, axis=self.axis) def compute_output_spec(self, x): input_shape = list(x.shape) sparse = getattr(x, "sparse", False) if self.axis is None: output_shape = list(filter((1).__ne__, input_shape)) return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) else: if input_shape[self.axis] != 1: raise ValueError( f"Cannot squeeze axis {self.axis}, because the dimension " "is not 1." ) del input_shape[self.axis] return KerasTensor(input_shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.squeeze", "keras.ops.numpy.squeeze"]) def squeeze(x, axis=None): """Remove axes of length one from `x`. Args: x: Input tensor. axis: Select a subset of the entries of length one in the shape. Returns: The input tensor with all or a subset of the dimensions of length 1 removed. """ if any_symbolic_tensors((x,)): return Squeeze(axis=axis).symbolic_call(x) return backend.numpy.squeeze(x, axis=axis) class Transpose(Operation): def __init__(self, axes=None): super().__init__() self.axes = axes def call(self, x): return backend.numpy.transpose(x, axes=self.axes) def compute_output_spec(self, x): output_shape = operation_utils.compute_transpose_output_shape( x.shape, self.axes ) sparse = getattr(x, "sparse", False) return KerasTensor(output_shape, dtype=x.dtype, sparse=sparse) @keras_export(["keras.ops.transpose", "keras.ops.numpy.transpose"]) def transpose(x, axes=None): """Returns a tensor with `axes` transposed. Args: x: Input tensor. axes: Sequence of integers. Permutation of the dimensions of `x`. By default, the order of the axes are reversed. Returns: `x` with its axes permuted. """ if any_symbolic_tensors((x,)): return Transpose(axes=axes).symbolic_call(x) return backend.numpy.transpose(x, axes=axes) class Mean(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): axis = [axis] self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.mean(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): ori_dtype = backend.standardize_dtype(x.dtype) compute_dtype = dtypes.result_type(x.dtype, "float32") if "int" in ori_dtype or ori_dtype == "bool": result_dtype = compute_dtype else: result_dtype = ori_dtype sparse = getattr(x, "sparse", False) return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=result_dtype, sparse=sparse, ) @keras_export(["keras.ops.mean", "keras.ops.numpy.mean"]) def mean(x, axis=None, keepdims=False): """Compute the arithmetic mean along the specified axes. Args: x: Input tensor. axis: Axis or axes along which the means are computed. The default is to compute the mean of the flattened tensor. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Returns: Output tensor containing the mean values. """ if any_symbolic_tensors((x,)): return Mean(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.mean(x, axis=axis, keepdims=keepdims) class Var(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): axis = [axis] self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.var(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): output_dtype = backend.result_type(getattr(x, "dtype", type(x)), float) return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=output_dtype, ) @keras_export(["keras.ops.var", "keras.ops.numpy.var"]) def var(x, axis=None, keepdims=False): """Compute the variance along the specified axes. Args: x: Input tensor. axis: Axis or axes along which the variance is computed. The default is to compute the variance of the flattened tensor. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Returns: Output tensor containing the variance. """ if any_symbolic_tensors((x,)): return Var(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.var(x, axis=axis, keepdims=keepdims) class Sum(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() if isinstance(axis, int): axis = [axis] self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.sum(x, axis=self.axis, keepdims=self.keepdims) def compute_output_spec(self, x): dtype = dtypes.result_type(getattr(x, "dtype", backend.floatx())) # follow jax's rule if dtype in ("bool", "int8", "int16"): dtype = "int32" elif dtype in ("uint8", "uint16"): dtype = "uint32" # TODO: torch doesn't support uint32 if backend.backend() == "torch" and dtype == "uint32": dtype = "int32" return KerasTensor( reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims), dtype=dtype, ) @keras_export(["keras.ops.sum", "keras.ops.numpy.sum"]) def sum(x, axis=None, keepdims=False): """Sum of a tensor over the given axes. Args: x: Input tensor. axis: Axis or axes along which the sum is computed. The default is to compute the sum of the flattened tensor. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Returns: Output tensor containing the sum. """ if any_symbolic_tensors((x,)): return Sum(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.sum(x, axis=axis, keepdims=keepdims) class Zeros(Operation): def call(self, shape, dtype=None): return backend.numpy.zeros(shape, dtype=dtype) def compute_output_spec(self, shape, dtype=None): dtype = dtype or backend.floatx() return KerasTensor(shape, dtype=dtype) @keras_export(["keras.ops.zeros", "keras.ops.numpy.zeros"]) def zeros(shape, dtype=None): """Return a new tensor of given shape and type, filled with zeros. Args: shape: Shape of the new tensor. dtype: Desired data type of the tensor. Returns: Tensor of zeros with the given shape and dtype. """ return backend.numpy.zeros(shape, dtype=dtype) class Ones(Operation): def call(self, shape, dtype=None): return backend.numpy.ones(shape, dtype=dtype) def compute_output_spec(self, shape, dtype=None): dtype = dtype or backend.floatx() return KerasTensor(shape, dtype=dtype) @keras_export(["keras.ops.ones", "keras.ops.numpy.ones"]) def ones(shape, dtype=None): """Return a new tensor of given shape and type, filled with ones. Args: shape: Shape of the new tensor. dtype: Desired data type of the tensor. Returns: Tensor of ones with the given shape and dtype. """ return backend.numpy.ones(shape, dtype=dtype) class Eye(Operation): def call(self, N, M=None, k=0, dtype=None): return backend.numpy.eye(N, M=M, k=k, dtype=dtype) def compute_output_spec(self, N, M=None, k=0, dtype=None): if M is None: M = N dtype = dtype or backend.floatx() return KerasTensor((N, M), dtype=dtype) @keras_export(["keras.ops.eye", "keras.ops.numpy.eye"]) def eye(N, M=None, k=0, dtype=None): """Return a 2-D tensor with ones on the diagonal and zeros elsewhere. Args: N: Number of rows in the output. M: Number of columns in the output. If `None`, defaults to `N`. k: Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype: Data type of the returned tensor. Returns: Tensor with ones on the k-th diagonal and zeros elsewhere. """ return backend.numpy.eye(N, M=M, k=k, dtype=dtype) class FloorDivide(Operation): def call(self, x1, x2): return backend.numpy.floor_divide(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) output_dtype = dtypes.result_type( getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2)), ) return KerasTensor(output_shape, dtype=output_dtype) @keras_export(["keras.ops.floor_divide", "keras.ops.numpy.floor_divide"]) def floor_divide(x1, x2): """Returns the largest integer smaller or equal to the division of inputs. Args: x1: Numerator. x2: Denominator. Returns: Output tensor, `y = floor(x1/x2)` """ if any_symbolic_tensors((x1, x2)): return FloorDivide().symbolic_call(x1, x2) return backend.numpy.floor_divide(x1, x2) class LogicalXor(Operation): def call(self, x1, x2): return backend.numpy.logical_xor(x1, x2) def compute_output_spec(self, x1, x2): x1_shape = getattr(x1, "shape", []) x2_shape = getattr(x2, "shape", []) output_shape = broadcast_shapes(x1_shape, x2_shape) return KerasTensor(output_shape, dtype="bool") @keras_export(["keras.ops.logical_xor", "keras.ops.numpy.logical_xor"]) def logical_xor(x1, x2): """Compute the truth value of `x1 XOR x2`, element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: Output boolean tensor. """ if any_symbolic_tensors((x1, x2)): return LogicalXor().symbolic_call(x1, x2) return backend.numpy.logical_xor(x1, x2)
keras/keras/ops/numpy.py/0
{ "file_path": "keras/keras/ops/numpy.py", "repo_id": "keras", "token_count": 86791 }
148
import numpy as np import pytest import keras from keras import backend from keras import ops from keras import testing from keras.optimizers.adam import Adam class AdamTest(testing.TestCase): def test_config(self): optimizer = Adam( learning_rate=0.5, beta_1=0.5, beta_2=0.67, epsilon=1e-5, amsgrad=True, ) self.run_class_serialization_test(optimizer) def test_single_step(self): optimizer = Adam(learning_rate=0.5) grads = ops.array([1.0, 6.0, 7.0, 2.0]) vars = backend.Variable([1.0, 2.0, 3.0, 4.0]) optimizer.apply_gradients(zip([grads], [vars])) self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4) def test_weight_decay(self): grads, var1, var2, var3 = ( ops.zeros(()), backend.Variable(2.0), backend.Variable(2.0, name="exclude"), backend.Variable(2.0), ) optimizer_1 = Adam(learning_rate=1.0, weight_decay=0.004) optimizer_1.apply_gradients(zip([grads], [var1])) optimizer_2 = Adam(learning_rate=1.0, weight_decay=0.004) optimizer_2.exclude_from_weight_decay(var_names=["exclude"]) optimizer_2.apply_gradients(zip([grads, grads], [var1, var2])) optimizer_3 = Adam(learning_rate=1.0, weight_decay=0.004) optimizer_3.exclude_from_weight_decay(var_list=[var3]) optimizer_3.apply_gradients(zip([grads, grads], [var1, var3])) self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6) self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6) self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6) def test_correctness_with_golden(self): optimizer = Adam(amsgrad=True) x = backend.Variable(np.ones([10])) grads = ops.arange(0.1, 1.1, 0.1) first_grads = ops.full((10,), 0.01) golden = np.tile( [[0.999], [0.9982], [0.9974], [0.9965], [0.9955]], (1, 10) ) optimizer.apply_gradients(zip([first_grads], [x])) for i in range(5): self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4) optimizer.apply_gradients(zip([grads], [x])) def test_clip_norm(self): optimizer = Adam(clipnorm=1) grad = [np.array([100.0, 100.0])] clipped_grad = optimizer._clip_gradients(grad) self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2]) def test_clip_value(self): optimizer = Adam(clipvalue=1) grad = [np.array([100.0, 100.0])] clipped_grad = optimizer._clip_gradients(grad) self.assertAllClose(clipped_grad[0], [1.0, 1.0]) @pytest.mark.requires_trainable_backend def test_ema(self): # TODO: test correctness model = keras.Sequential([keras.layers.Dense(10)]) model.compile(optimizer=Adam(use_ema=True), loss="mse") x = keras.ops.zeros((1, 5)) y = keras.ops.zeros((1, 10)) model.fit(x, y) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="The IndexedSlices test can only run with TF backend.", ) def test_clipnorm_indexed_slices(self): # https://github.com/keras-team/keras/issues/18985 model = keras.Sequential( [ keras.layers.Embedding(10, 4), keras.layers.Flatten(), keras.layers.Dense(2), ] ) model.compile(optimizer=Adam(clipnorm=100), loss="mse") x = keras.ops.ones((8, 5)) y = keras.ops.zeros((8, 2)) model.fit(x, y, verbose=0)
keras/keras/optimizers/adam_test.py/0
{ "file_path": "keras/keras/optimizers/adam_test.py", "repo_id": "keras", "token_count": 1857 }
149
import numpy as np import pytest from keras import backend from keras import constraints from keras import layers from keras import models from keras import optimizers from keras import testing class OptimizerTest(testing.TestCase): def test_iterations_counter(self): v = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) optimizer = optimizers.Adam(learning_rate=1.0) self.assertAllClose(optimizer.iterations, 0) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(optimizer.iterations, 1) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(optimizer.iterations, 2) def test_ema(self): v = backend.Variable([[3.0, 4.0], [5.0, 6.0]]) grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) optimizer = optimizers.SGD( learning_rate=1.0, use_ema=True, ema_momentum=0.9, ema_overwrite_frequency=3, ) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(v, [[2.0, 3.0], [4.0, 5.0]]) self.assertAllClose( optimizer._model_variables_moving_average[0], [[2.0, 3.0], [4.0, 5.0]], # initialized after first step ) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]]) self.assertAllClose( optimizer._model_variables_moving_average[0], [[1.9, 2.9], [3.9, 4.9]], ) optimizer.apply_gradients([(grads, v)]) # Variables were overwritten with EMA self.assertAllClose(v, [[1.71, 2.71], [3.71, 4.71]]) self.assertAllClose( optimizer._model_variables_moving_average[0], [[1.71, 2.71], [3.71, 4.71]], ) @pytest.mark.requires_trainable_backend def test_ema_with_model_fit(self): x_train = np.ones((1, 1)).astype("float32") y_train = np.zeros((1, 1)).astype("float32") optimizer = optimizers.SGD( learning_rate=0.1, use_ema=True, ema_momentum=0.9 ) model = models.Sequential( [layers.Dense(2, kernel_initializer="ones", use_bias=False)] ) model.compile(loss="mse", optimizer=optimizer, run_eagerly=True) model.fit(x_train, y_train, batch_size=1, epochs=2) self.assertAllClose( optimizer._model_variables_moving_average[0].numpy(), [[0.891, 0.891]], atol=1e-5, ) self.assertAllClose( model.trainable_variables[0].numpy(), [[0.891, 0.891]], atol=1e-5, ) def test_constraints_are_applied(self): v = backend.Variable(np.random.random((2, 2)) - 1.0) v.constraint = constraints.NonNeg() optimizer = optimizers.SGD(learning_rate=0.0001) grad = backend.numpy.zeros((2, 2)) optimizer.apply_gradients([(grad, v)]) self.assertAlmostEqual(np.min(v), 0.0) def test_get_method(self): obj = optimizers.get("sgd") self.assertIsInstance(obj, optimizers.SGD) obj = optimizers.get("adamw") self.assertIsInstance(obj, optimizers.AdamW) obj = optimizers.get(None) self.assertEqual(obj, None) with self.assertRaises(ValueError): optimizers.get("typo") def test_static_loss_scaling(self): v = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]]) * 1024.0 optimizer = optimizers.SGD(learning_rate=1.0, loss_scale_factor=1024.0) optimizer.apply_gradients([(grads, v)]) self.assertEqual(optimizer.scale_loss(1.0), 1024.0) self.assertAllClose(v, [[0.0, 0.0], [0.0, 0.0]]) def test_set_weights(self): x = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) optimizer_1 = optimizers.Adam() grads = backend.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]]) optimizer_1.apply_gradients(zip([grads], [x])) optimizer_2 = optimizers.Adam() with self.assertRaisesRegex(ValueError, "You are calling*"): optimizer_2.set_weights(optimizer_1.variables) optimizer_2.build([x]) optimizer_2.set_weights(optimizer_1.variables) for i in range(len(optimizer_1.variables)): self.assertAllClose( optimizer_1.variables[i], optimizer_2.variables[i], ) def test_gradient_accumulation(self): v = backend.Variable([[1.0, 2.0], [3.0, 4.0]]) grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]]) optimizer = optimizers.SGD( learning_rate=1.0, gradient_accumulation_steps=3 ) self.assertEqual(optimizer.gradient_accumulation_steps, 3) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]]) self.assertAllClose( optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]] ) self.assertAllClose(optimizer.iterations, 1) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]]) self.assertAllClose( optimizer._accumulated_gradients[0], [[2.0, 2.0], [2.0, 2.0]] ) self.assertAllClose(optimizer.iterations, 2) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(v, [[0.0, 1.0], [2.0, 3.0]]) self.assertAllClose( optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]] ) self.assertAllClose(optimizer.iterations, 3) optimizer.apply_gradients([(grads, v)]) self.assertAllClose(v, [[0.0, 1.0], [2.0, 3.0]]) self.assertAllClose( optimizer._accumulated_gradients[0], [[1.0, 1.0], [1.0, 1.0]] ) self.assertAllClose(optimizer.iterations, 4)
keras/keras/optimizers/optimizer_test.py/0
{ "file_path": "keras/keras/optimizers/optimizer_test.py", "repo_id": "keras", "token_count": 3018 }
150
import numpy as np from keras import backend from keras import regularizers from keras import testing from keras.regularizers.regularizers import validate_float_arg class RegularizersTest(testing.TestCase): def test_config(self): reg = regularizers.L1(0.1) self.run_class_serialization_test(reg) reg = regularizers.L2(0.1) self.run_class_serialization_test(reg) reg = regularizers.L1L2(l1=0.1, l2=0.2) self.run_class_serialization_test(reg) reg = regularizers.OrthogonalRegularizer(factor=0.1, mode="rows") self.run_class_serialization_test(reg) def test_l1(self): value = np.random.random((4, 4)) x = backend.Variable(value) y = regularizers.L1(0.1)(x) self.assertAllClose(y, 0.1 * np.sum(np.abs(value))) def test_l2(self): value = np.random.random((4, 4)) x = backend.Variable(value) y = regularizers.L2(0.1)(x) self.assertAllClose(y, 0.1 * np.sum(np.square(value))) def test_l1_l2(self): value = np.random.random((4, 4)) x = backend.Variable(value) y = regularizers.L1L2(l1=0.1, l2=0.2)(x) self.assertAllClose( y, 0.1 * np.sum(np.abs(value)) + 0.2 * np.sum(np.square(value)) ) def test_orthogonal_regularizer(self): value = np.random.random((4, 4)) x = backend.Variable(value) y = regularizers.OrthogonalRegularizer(factor=0.1, mode="rows")(x) l2_norm = np.linalg.norm(value, axis=1, keepdims=True) inputs = value / l2_norm self.assertAllClose( y, 0.1 * 0.5 * np.sum( np.abs(np.dot(inputs, np.transpose(inputs)) * (1.0 - np.eye(4))) ) / (4.0 * (4.0 - 1.0) / 2.0), ) def test_get_method(self): obj = regularizers.get("l1l2") self.assertIsInstance(obj, regularizers.L1L2) obj = regularizers.get("l1") self.assertIsInstance(obj, regularizers.L1) obj = regularizers.get("l2") self.assertIsInstance(obj, regularizers.L2) obj = regularizers.get("orthogonal_regularizer") self.assertIsInstance(obj, regularizers.OrthogonalRegularizer) obj = regularizers.get(None) self.assertEqual(obj, None) with self.assertRaises(ValueError): regularizers.get("typo") def test_l1l2_get_config(self): l1 = 0.01 l2 = 0.02 reg = regularizers.L1L2(l1=l1, l2=l2) config = reg.get_config() self.assertEqual(config, {"l1": l1, "l2": l2}) reg_from_config = regularizers.L1L2.from_config(config) config_from_config = reg_from_config.get_config() self.assertDictEqual(config, config_from_config) self.assertEqual(reg_from_config.l1, l1) self.assertEqual(reg_from_config.l2, l2) def test_orthogonal_regularizer_mode_validation(self): with self.assertRaises(ValueError) as context: regularizers.OrthogonalRegularizer(factor=0.01, mode="invalid_mode") expected_message = ( 'Invalid value for argument `mode`. Expected one of {"rows", ' '"columns"}. Received: mode=invalid_mode' ) self.assertEqual(str(context.exception), expected_message) def test_orthogonal_regularizer_input_rank_validation(self): with self.assertRaises(ValueError) as context: value = np.random.random((4, 4, 4)) x = backend.Variable(value) regularizers.OrthogonalRegularizer(factor=0.1)(x) expected_message = ( "Inputs to OrthogonalRegularizer must have rank 2. " f"Received: inputs.shape={(4, 4, 4)}" ) self.assertEqual(str(context.exception), expected_message) def test_orthogonal_regularizer_get_config(self): factor = 0.01 mode = "columns" regularizer = regularizers.OrthogonalRegularizer( factor=factor, mode=mode ) config = regularizer.get_config() self.assertAlmostEqual(config["factor"], factor, 7) self.assertEqual(config["mode"], mode) reg_from_config = regularizers.OrthogonalRegularizer.from_config(config) config_from_config = reg_from_config.get_config() self.assertAlmostEqual(config_from_config["factor"], factor, 7) self.assertEqual(config_from_config["mode"], mode) class ValidateFloatArgTest(testing.TestCase): def test_validate_float_with_valid_args(self): self.assertEqual(validate_float_arg(1, "test"), 1.0) self.assertEqual(validate_float_arg(1.0, "test"), 1.0) def test_validate_float_with_invalid_types(self): with self.assertRaisesRegex( ValueError, "expected a non-negative float" ): validate_float_arg("not_a_number", "test") def test_validate_float_with_nan(self): with self.assertRaisesRegex( ValueError, "expected a non-negative float" ): validate_float_arg(float("nan"), "test") def test_validate_float_with_inf(self): with self.assertRaisesRegex( ValueError, "expected a non-negative float" ): validate_float_arg(float("inf"), "test") with self.assertRaisesRegex( ValueError, "expected a non-negative float" ): validate_float_arg(-float("inf"), "test") def test_validate_float_with_negative_number(self): with self.assertRaisesRegex( ValueError, "expected a non-negative float" ): validate_float_arg(-1, "test")
keras/keras/regularizers/regularizers_test.py/0
{ "file_path": "keras/keras/regularizers/regularizers_test.py", "repo_id": "keras", "token_count": 2627 }
151
import os import numpy as np from keras import backend from keras import testing from keras.utils import image_dataset_utils from keras.utils import image_utils from keras.utils.module_utils import tensorflow as tf class ImageDatasetFromDirectoryTest(testing.TestCase): def _get_images(self, count=16, color_mode="rgb"): width = height = 24 imgs = [] for _ in range(count): if color_mode == "grayscale": img = np.random.randint(0, 256, size=(height, width, 1)) elif color_mode == "rgba": img = np.random.randint(0, 256, size=(height, width, 4)) else: img = np.random.randint(0, 256, size=(height, width, 3)) if backend.config.image_data_format() == "channels_first": img = np.transpose(img, (2, 0, 1)) img = image_utils.array_to_img(img) imgs.append(img) return imgs def _prepare_directory( self, num_classes=2, nested_dirs=False, color_mode="rgb", count=16, ): # Generate paths to class subdirectories temp_dir = self.get_temp_dir() paths = [] for class_index in range(num_classes): class_directory = f"class_{class_index}" if nested_dirs: class_paths = [ class_directory, os.path.join(class_directory, "subfolder_1"), os.path.join(class_directory, "subfolder_2"), os.path.join( class_directory, "subfolder_1", "sub-subfolder" ), ] else: class_paths = [class_directory] for path in class_paths: os.mkdir(os.path.join(temp_dir, path)) paths += class_paths # Save images to the paths i = 0 for img in self._get_images(color_mode=color_mode, count=count): path = paths[i % len(paths)] if color_mode == "rgb": ext = "jpg" else: ext = "png" filename = os.path.join(path, f"image_{i}.{ext}") img.save(os.path.join(temp_dir, filename)) i += 1 return temp_dir def test_image_dataset_from_directory_no_labels(self): # Test retrieving images without labels from a directory and its # subdirs. # Save a few extra images in the parent directory. directory = self._prepare_directory(count=7, num_classes=2) for i, img in enumerate(self._get_images(3)): filename = f"image_{i}.jpg" img.save(os.path.join(directory, filename)) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=5, image_size=(18, 18), labels=None ) if backend.config.image_data_format() == "channels_last": output_shape = [5, 18, 18, 3] else: output_shape = [5, 3, 18, 18] self.assertEqual(dataset.class_names, None) batch = next(iter(dataset)) # We return plain images self.assertEqual(batch.shape, output_shape) self.assertEqual(batch.dtype.name, "float32") # Count samples batch_count = 0 sample_count = 0 for batch in dataset: batch_count += 1 sample_count += batch.shape[0] self.assertEqual(batch_count, 2) self.assertEqual(sample_count, 10) def test_image_dataset_from_directory_binary(self): directory = self._prepare_directory(num_classes=2) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode="int" ) if backend.config.image_data_format() == "channels_last": output_shape = [8, 18, 18, 3] else: output_shape = [8, 3, 18, 18] batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, output_shape) self.assertEqual(batch[0].dtype.name, "float32") self.assertEqual(batch[1].shape, (8,)) self.assertEqual(batch[1].dtype.name, "int32") dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode="binary" ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, output_shape) self.assertEqual(batch[0].dtype.name, "float32") self.assertEqual(batch[1].shape, (8, 1)) self.assertEqual(batch[1].dtype.name, "float32") dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode="categorical", ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, output_shape) self.assertEqual(batch[0].dtype.name, "float32") self.assertEqual(batch[1].shape, (8, 2)) self.assertEqual(batch[1].dtype.name, "float32") def test_static_shape_in_graph(self): directory = self._prepare_directory(num_classes=2) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode="int" ) test_case = self if backend.config.image_data_format() == "channels_last": output_shape = [None, 18, 18, 3] else: output_shape = [None, 3, 18, 18] @tf.function def symbolic_fn(ds): for x, _ in ds.take(1): test_case.assertListEqual(x.shape.as_list(), output_shape) symbolic_fn(dataset) def test_sample_count(self): directory = self._prepare_directory(num_classes=4, count=15) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode=None ) sample_count = 0 for batch in dataset: sample_count += batch.shape[0] self.assertEqual(sample_count, 15) def test_image_dataset_from_directory_multiclass(self): directory = self._prepare_directory(num_classes=4, count=15) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode=None ) if backend.config.image_data_format() == "channels_last": output_shape = [8, 18, 18, 3] else: output_shape = [8, 3, 18, 18] batch = next(iter(dataset)) self.assertEqual(batch.shape, output_shape) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode=None ) sample_count = 0 iterator = iter(dataset) for batch in dataset: sample_count += next(iterator).shape[0] self.assertEqual(sample_count, 15) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode="int" ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, output_shape) self.assertEqual(batch[0].dtype.name, "float32") self.assertEqual(batch[1].shape, (8,)) self.assertEqual(batch[1].dtype.name, "int32") dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode="categorical", ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, (output_shape)) self.assertEqual(batch[0].dtype.name, "float32") self.assertEqual(batch[1].shape, (8, 4)) self.assertEqual(batch[1].dtype.name, "float32") def test_image_dataset_from_directory_color_modes(self): directory = self._prepare_directory(num_classes=4, color_mode="rgba") dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), color_mode="rgba" ) if backend.config.image_data_format() == "channels_last": output_shape = [8, 18, 18, 4] else: output_shape = [8, 4, 18, 18] batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, output_shape) self.assertEqual(batch[0].dtype.name, "float32") directory = self._prepare_directory( num_classes=4, color_mode="grayscale" ) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), color_mode="grayscale" ) if backend.config.image_data_format() == "channels_last": output_shape = [8, 18, 18, 1] else: output_shape = [8, 1, 18, 18] batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, output_shape) self.assertEqual(batch[0].dtype.name, "float32") def test_image_dataset_from_directory_validation_split(self): directory = self._prepare_directory(num_classes=2, count=10) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=10, image_size=(18, 18), validation_split=0.2, subset="training", seed=1337, ) batch = next(iter(dataset)) self.assertLen(batch, 2) if backend.config.image_data_format() == "channels_last": train_output_shape = [8, 18, 18, 3] val_output_shape = [2, 18, 18, 3] else: train_output_shape = [8, 3, 18, 18] val_output_shape = [2, 3, 18, 18] self.assertEqual(batch[0].shape, train_output_shape) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=10, image_size=(18, 18), validation_split=0.2, subset="validation", seed=1337, ) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, val_output_shape) ( train_dataset, val_dataset, ) = image_dataset_utils.image_dataset_from_directory( directory, batch_size=10, image_size=(18, 18), validation_split=0.2, subset="both", seed=1337, ) batch = next(iter(train_dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, train_output_shape) batch = next(iter(val_dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, val_output_shape) def test_image_dataset_from_directory_manual_labels(self): # Case: wrong number of labels directory = self._prepare_directory(num_classes=1, count=4) with self.assertRaisesRegex(ValueError, "match the number of files"): image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), labels=[0, 1, 0], shuffle=False, ) # Case: single directory directory = self._prepare_directory(num_classes=1, count=4) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), labels=[0, 1, 0, 1], shuffle=False, ) if backend.config.image_data_format() == "channels_last": output_shape = [18, 18, 3] else: output_shape = [3, 18, 18] self.assertEqual(dataset.class_names, ["0", "1"]) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, [4] + output_shape) self.assertAllClose(batch[1], [0, 1, 0, 1]) # Case: multiple directories directory = self._prepare_directory(num_classes=3, count=6) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), labels=[0, 1, 0, 1, 1, 1], shuffle=False, ) self.assertEqual(dataset.class_names, ["0", "1"]) batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, [6] + output_shape) self.assertAllClose(batch[1], [0, 1, 0, 1, 1, 1]) def test_image_dataset_from_directory_follow_links(self): directory = self._prepare_directory( num_classes=2, count=25, nested_dirs=True ) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=8, image_size=(18, 18), label_mode=None, follow_links=True, ) sample_count = 0 for batch in dataset: sample_count += batch.shape[0] self.assertEqual(sample_count, 25) def test_image_dataset_from_directory_no_images(self): directory = self._prepare_directory(num_classes=2, count=0) with self.assertRaisesRegex(ValueError, "No images found."): _ = image_dataset_utils.image_dataset_from_directory(directory) def test_image_dataset_from_directory_crop_to_aspect_ratio(self): directory = self._prepare_directory(num_classes=2, count=5) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=5, image_size=(18, 18), crop_to_aspect_ratio=True, ) if backend.config.image_data_format() == "channels_last": output_shape = [5, 18, 18, 3] else: output_shape = [5, 3, 18, 18] batch = next(iter(dataset)) self.assertLen(batch, 2) self.assertEqual(batch[0].shape, output_shape) def test_image_dataset_from_directory_errors(self): directory = self._prepare_directory(num_classes=3, count=5) with self.assertRaisesRegex(ValueError, "`labels` argument should be"): _ = image_dataset_utils.image_dataset_from_directory( directory, labels="other" ) with self.assertRaisesRegex( ValueError, "`label_mode` argument must be" ): _ = image_dataset_utils.image_dataset_from_directory( directory, label_mode="other" ) with self.assertRaisesRegex(ValueError, "`color_mode` must be one of"): _ = image_dataset_utils.image_dataset_from_directory( directory, color_mode="other" ) with self.assertRaisesRegex( ValueError, 'only pass `class_names` if `labels="inferred"`' ): _ = image_dataset_utils.image_dataset_from_directory( directory, labels=[0, 0, 1, 1, 1], class_names=["class_0", "class_1", "class_2"], ) with self.assertRaisesRegex( ValueError, "Expected the lengths of `labels` to match the number of files", ): _ = image_dataset_utils.image_dataset_from_directory( directory, labels=[0, 0, 1, 1] ) with self.assertRaisesRegex( ValueError, "`class_names` passed did not match" ): _ = image_dataset_utils.image_dataset_from_directory( directory, class_names=["class_0", "wrong_class"] ) with self.assertRaisesRegex(ValueError, "there must be exactly 2"): _ = image_dataset_utils.image_dataset_from_directory( directory, label_mode="binary" ) with self.assertRaisesRegex( ValueError, "`validation_split` must be between 0 and 1" ): _ = image_dataset_utils.image_dataset_from_directory( directory, validation_split=2 ) with self.assertRaisesRegex( ValueError, '`subset` must be either "training", "validation" or "both"', ): _ = image_dataset_utils.image_dataset_from_directory( directory, validation_split=0.2, subset="other" ) with self.assertRaisesRegex( ValueError, "`validation_split` must be set" ): _ = image_dataset_utils.image_dataset_from_directory( directory, validation_split=0.0, subset="training" ) with self.assertRaisesRegex(ValueError, "must provide a `seed`"): _ = image_dataset_utils.image_dataset_from_directory( directory, validation_split=0.2, subset="training" ) def test_image_dataset_from_directory_not_batched(self): directory = self._prepare_directory(num_classes=2, count=2) dataset = image_dataset_utils.image_dataset_from_directory( directory, batch_size=None, image_size=(18, 18), label_mode=None, shuffle=False, ) sample = next(iter(dataset)) self.assertEqual(len(sample.shape), 3)
keras/keras/utils/image_dataset_utils_test.py/0
{ "file_path": "keras/keras/utils/image_dataset_utils_test.py", "repo_id": "keras", "token_count": 8647 }
152
import numpy as np import pytest import tensorflow as tf import keras from keras import backend from keras.testing import test_case from keras.utils import rng_utils class TestRandomSeedSetting(test_case.TestCase): @pytest.mark.skipif( backend.backend() == "numpy", reason="Numpy backend does not support random seed setting.", ) def test_set_random_seed(self): def get_model_output(): model = keras.Sequential( [ keras.layers.Dense(10), keras.layers.Dropout(0.5), keras.layers.Dense(10), ] ) x = np.random.random((32, 10)).astype("float32") ds = tf.data.Dataset.from_tensor_slices(x).shuffle(32).batch(16) return model.predict(ds) rng_utils.set_random_seed(42) y1 = get_model_output() rng_utils.set_random_seed(42) y2 = get_model_output() self.assertAllClose(y1, y2)
keras/keras/utils/rng_utils_test.py/0
{ "file_path": "keras/keras/utils/rng_utils_test.py", "repo_id": "keras", "token_count": 502 }
153
from keras.api_export import keras_export # Unique source of truth for the version number. __version__ = "3.1.0" @keras_export("keras.version") def version(): return __version__
keras/keras/version.py/0
{ "file_path": "keras/keras/version.py", "repo_id": "keras", "token_count": 62 }
154
#!/bin/bash sudo pip install -r requirements.txt sudo pip uninstall keras-nightly -y wget https://github.com/cli/cli/releases/download/v2.17.0/gh_2.17.0_linux_amd64.deb -P /tmp sudo apt install /tmp/gh_2.17.0_linux_amd64.deb -y
tf-keras/.devcontainer/setup.sh/0
{ "file_path": "tf-keras/.devcontainer/setup.sh", "repo_id": "tf-keras", "token_count": 92 }
155
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Setup script for the TF-Keras pip package.""" import os import setuptools DESCRIPTION = """TF-Keras is a deep learning API written in Python, running on top of the machine learning platform TensorFlow. It was developed with a focus on enabling fast experimentation and providing a delightful developer experience. The purpose of TF-Keras is to give an *unfair advantage* to any developer looking to ship ML-powered apps. """ with open(os.path.abspath(__file__)) as f: contents = f.read() if contents.count("{PACKAGE}") > 1 or contents.count("{VERSION}") > 1: raise ValueError( "You must fill the 'PACKAGE' and 'VERSION' " "tags before running setup.py. If you are trying to " "build a fresh package, you should be using " "`pip_build.py` instead of `setup.py`." ) # pin version to that of tensorflow or tf_nightly. if "nightly" in "{{PACKAGE}}": version = "{{VERSION}}" # 2.17.0.dev2024021419 base_version = version.split(".dev")[0] install_requires = [f"tf-nightly~={base_version}.dev"] else: install_requires = ["tensorflow~={{VERSION}}"] setuptools.setup( name="{{PACKAGE}}", # Version strings with `-` characters are semver compatible, # but incompatible with pip. For pip, we will remove all `-`` characters. version="{{VERSION}}", description="Deep learning for humans.", long_description=DESCRIPTION, url="https://keras.io/", download_url="https://github.com/keras-team/tf-keras/tags", author="Keras team", author_email="[email protected]", packages=setuptools.find_packages(), install_requires=install_requires, # Supported Python versions python_requires=">=3.9", # PyPI package information. classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3 :: Only", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules", ], license="Apache 2.0", keywords=["keras", "tensorflow", "machine learning", "deep learning"], )
tf-keras/oss_setup.py/0
{ "file_path": "tf-keras/oss_setup.py", "repo_id": "tf-keras", "token_count": 1149 }
156
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MobileNet v3 models for TF-Keras.""" import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras import models from tf_keras.applications import imagenet_utils from tf_keras.layers import VersionAwareLayers from tf_keras.utils import data_utils from tf_keras.utils import layer_utils # isort: off from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export # TODO(scottzhu): Change this to the GCS path. BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/" ) WEIGHTS_HASHES = { "large_224_0.75_float": ( "765b44a33ad4005b3ac83185abf1d0eb", "40af19a13ebea4e2ee0c676887f69a2e", ), "large_224_1.0_float": ( "59e551e166be033d707958cf9e29a6a7", "07fb09a5933dd0c8eaafa16978110389", ), "large_minimalistic_224_1.0_float": ( "675e7b876c45c57e9e63e6d90a36599c", "ec5221f64a2f6d1ef965a614bdae7973", ), "small_224_0.75_float": ( "cb65d4e5be93758266aa0a7f2c6708b7", "ebdb5cc8e0b497cd13a7c275d475c819", ), "small_224_1.0_float": ( "8768d4c2e7dee89b9d02b2d03d65d862", "d3e8ec802a04aa4fc771ee12a9a9b836", ), "small_minimalistic_224_1.0_float": ( "99cd97fb2fcdad2bf028eb838de69e37", "cde8136e733e811080d9fcd8a252f7e4", ), } layers = VersionAwareLayers() BASE_DOCSTRING = """Instantiates the {name} architecture. Reference: - [Searching for MobileNetV3]( https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019) The following table describes the performance of MobileNets v3: ------------------------------------------------------------------------ MACs stands for Multiply Adds |Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1 CPU(ms)| |---|---|---|---|---| | mobilenet_v3_large_1.0_224 | 217 | 5.4 | 75.6 | 51.2 | | mobilenet_v3_large_0.75_224 | 155 | 4.0 | 73.3 | 39.8 | | mobilenet_v3_large_minimalistic_1.0_224 | 209 | 3.9 | 72.3 | 44.1 | | mobilenet_v3_small_1.0_224 | 66 | 2.9 | 68.1 | 15.8 | | mobilenet_v3_small_0.75_224 | 44 | 2.4 | 65.4 | 12.8 | | mobilenet_v3_small_minimalistic_1.0_224 | 65 | 2.0 | 61.9 | 12.2 | For image classification use cases, see [this page for detailed examples]( https://keras.io/api/applications/#usage-examples-for-image-classification-models). For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning]( https://keras.io/guides/transfer_learning/). Note: each TF-Keras Application expects a specific kind of input preprocessing. For MobileNetV3, by default input preprocessing is included as a part of the model (as a `Rescaling` layer), and thus `tf.keras.applications.mobilenet_v3.preprocess_input` is actually a pass-through function. In this use case, MobileNetV3 models expect their inputs to be float tensors of pixels with values in the [0-255] range. At the same time, preprocessing as a part of the model (i.e. `Rescaling` layer) can be disabled by setting `include_preprocessing` argument to False. With preprocessing disabled MobileNetV3 models expect their inputs to be float tensors of pixels with values in the [-1, 1] range. Args: input_shape: Optional shape tuple, to be specified if you would like to use a model with an input image resolution that is not (224, 224, 3). It should have exactly 3 inputs channels (224, 224, 3). You can also omit this option if you would like to infer input_shape from an input_tensor. If you choose to include both input_tensor and input_shape then input_shape will be used if they match, if the shapes do not match then we will throw an error. E.g. `(160, 160, 3)` would be one valid value. alpha: controls the width of the network. This is known as the depth multiplier in the MobileNetV3 paper, but the name is kept for consistency with MobileNetV1 in TF-Keras. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. minimalistic: In addition to large and small models this module also contains so-called minimalistic models, these models have the same per-layer dimensions characteristic as MobilenetV3 however, they don't utilize any of the advanced blocks (squeeze-and-excite units, hard-swish, and 5x5 convolutions). While these models are less efficient on CPU, they are much more performant on GPU/DSP. include_top: Boolean, whether to include the fully-connected layer at the top of the network. Defaults to `True`. weights: String, one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: Optional TF-Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: String, optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Integer, optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. dropout_rate: fraction of the input units to drop on the last layer. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. When loading pretrained weights, `classifier_activation` can only be `None` or `"softmax"`. include_preprocessing: Boolean, whether to include the preprocessing layer (`Rescaling`) at the bottom of the network. Defaults to `True`. Call arguments: inputs: A floating point `numpy.array` or a `tf.Tensor`, 4D with 3 color channels, with values in the range [0, 255] if `include_preprocessing` is True and in the range [-1, 1] otherwise. Returns: A `keras.Model` instance. """ def MobileNetV3( stack_fn, last_point_ch, input_shape=None, alpha=1.0, model_type="large", minimalistic=False, include_top=True, weights="imagenet", input_tensor=None, classes=1000, pooling=None, dropout_rate=0.2, classifier_activation="softmax", include_preprocessing=True, ): if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)): raise ValueError( "The `weights` argument should be either " "`None` (random initialization), `imagenet` " "(pre-training on ImageNet), " "or the path to the weights file to be loaded. " f"Received weights={weights}" ) if weights == "imagenet" and include_top and classes != 1000: raise ValueError( 'If using `weights` as `"imagenet"` with `include_top` ' "as true, `classes` should be 1000. " f"Received classes={classes}" ) # Determine proper input shape and default size. # If both input_shape and input_tensor are used, they should match if input_shape is not None and input_tensor is not None: try: is_input_t_tensor = backend.is_keras_tensor(input_tensor) except ValueError: try: is_input_t_tensor = backend.is_keras_tensor( layer_utils.get_source_inputs(input_tensor) ) except ValueError: raise ValueError( "input_tensor: ", input_tensor, "is not type input_tensor. " f"Received type(input_tensor)={type(input_tensor)}", ) if is_input_t_tensor: if backend.image_data_format() == "channels_first": if backend.int_shape(input_tensor)[1] != input_shape[1]: raise ValueError( "When backend.image_data_format()=channels_first, " "input_shape[1] must equal " "backend.int_shape(input_tensor)[1]. Received " f"input_shape={input_shape}, " "backend.int_shape(input_tensor)=" f"{backend.int_shape(input_tensor)}" ) else: if backend.int_shape(input_tensor)[2] != input_shape[1]: raise ValueError( "input_shape[1] must equal " "backend.int_shape(input_tensor)[2]. Received " f"input_shape={input_shape}, " "backend.int_shape(input_tensor)=" f"{backend.int_shape(input_tensor)}" ) else: raise ValueError( "input_tensor specified: ", input_tensor, "is not a keras tensor", ) # If input_shape is None, infer shape from input_tensor if input_shape is None and input_tensor is not None: try: backend.is_keras_tensor(input_tensor) except ValueError: raise ValueError( "input_tensor: ", input_tensor, "is type: ", type(input_tensor), "which is not a valid type", ) if backend.is_keras_tensor(input_tensor): if backend.image_data_format() == "channels_first": rows = backend.int_shape(input_tensor)[2] cols = backend.int_shape(input_tensor)[3] input_shape = (3, cols, rows) else: rows = backend.int_shape(input_tensor)[1] cols = backend.int_shape(input_tensor)[2] input_shape = (cols, rows, 3) # If input_shape is None and input_tensor is None using standard shape if input_shape is None and input_tensor is None: if backend.image_data_format() == "channels_last": input_shape = (None, None, 3) else: input_shape = (3, None, None) if backend.image_data_format() == "channels_last": row_axis, col_axis = (0, 1) else: row_axis, col_axis = (1, 2) rows = input_shape[row_axis] cols = input_shape[col_axis] if rows and cols and (rows < 32 or cols < 32): raise ValueError( "Input size must be at least 32x32; Received `input_shape=" f"{input_shape}`" ) if weights == "imagenet": if ( not minimalistic and alpha not in [0.75, 1.0] or minimalistic and alpha != 1.0 ): raise ValueError( "If imagenet weights are being loaded, " "alpha can be one of `0.75`, `1.0` for non minimalistic " "or `1.0` for minimalistic only." ) if rows != cols or rows != 224: logging.warning( "`input_shape` is undefined or non-square, " "or `rows` is not 224. " "Weights for input shape (224, 224) will be " "loaded as the default." ) if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor channel_axis = 1 if backend.image_data_format() == "channels_first" else -1 if minimalistic: kernel = 3 activation = relu se_ratio = None else: kernel = 5 activation = hard_swish se_ratio = 0.25 x = img_input if include_preprocessing: x = layers.Rescaling(scale=1.0 / 127.5, offset=-1.0)(x) x = layers.Conv2D( 16, kernel_size=3, strides=(2, 2), padding="same", use_bias=False, name="Conv", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv/BatchNorm" )(x) x = activation(x) x = stack_fn(x, kernel, activation, se_ratio) last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6) # if the width multiplier is greater than 1 we # increase the number of output channels if alpha > 1.0: last_point_ch = _depth(last_point_ch * alpha) x = layers.Conv2D( last_conv_ch, kernel_size=1, padding="same", use_bias=False, name="Conv_1", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv_1/BatchNorm" )(x) x = activation(x) if include_top: x = layers.GlobalAveragePooling2D(keepdims=True)(x) x = layers.Conv2D( last_point_ch, kernel_size=1, padding="same", use_bias=True, name="Conv_2", )(x) x = activation(x) if dropout_rate > 0: x = layers.Dropout(dropout_rate)(x) x = layers.Conv2D( classes, kernel_size=1, padding="same", name="Logits" )(x) x = layers.Flatten()(x) imagenet_utils.validate_activation(classifier_activation, weights) x = layers.Activation( activation=classifier_activation, name="Predictions" )(x) else: if pooling == "avg": x = layers.GlobalAveragePooling2D(name="avg_pool")(x) elif pooling == "max": x = layers.GlobalMaxPooling2D(name="max_pool")(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = layer_utils.get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = models.Model(inputs, x, name="MobilenetV3" + model_type) # Load weights. if weights == "imagenet": model_name = "{}{}_224_{}_float".format( model_type, "_minimalistic" if minimalistic else "", str(alpha) ) if include_top: file_name = "weights_mobilenet_v3_" + model_name + ".h5" file_hash = WEIGHTS_HASHES[model_name][0] else: file_name = "weights_mobilenet_v3_" + model_name + "_no_top_v2.h5" file_hash = WEIGHTS_HASHES[model_name][1] weights_path = data_utils.get_file( file_name, BASE_WEIGHT_PATH + file_name, cache_subdir="models", file_hash=file_hash, ) model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model @keras_export("keras.applications.MobileNetV3Small") def MobileNetV3Small( input_shape=None, alpha=1.0, minimalistic=False, include_top=True, weights="imagenet", input_tensor=None, classes=1000, pooling=None, dropout_rate=0.2, classifier_activation="softmax", include_preprocessing=True, ): def stack_fn(x, kernel, activation, se_ratio): def depth(d): return _depth(d * alpha) x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0) x = _inverted_res_block(x, 72.0 / 16, depth(24), 3, 2, None, relu, 1) x = _inverted_res_block(x, 88.0 / 24, depth(24), 3, 1, None, relu, 2) x = _inverted_res_block( x, 4, depth(40), kernel, 2, se_ratio, activation, 3 ) x = _inverted_res_block( x, 6, depth(40), kernel, 1, se_ratio, activation, 4 ) x = _inverted_res_block( x, 6, depth(40), kernel, 1, se_ratio, activation, 5 ) x = _inverted_res_block( x, 3, depth(48), kernel, 1, se_ratio, activation, 6 ) x = _inverted_res_block( x, 3, depth(48), kernel, 1, se_ratio, activation, 7 ) x = _inverted_res_block( x, 6, depth(96), kernel, 2, se_ratio, activation, 8 ) x = _inverted_res_block( x, 6, depth(96), kernel, 1, se_ratio, activation, 9 ) x = _inverted_res_block( x, 6, depth(96), kernel, 1, se_ratio, activation, 10 ) return x return MobileNetV3( stack_fn, 1024, input_shape, alpha, "small", minimalistic, include_top, weights, input_tensor, classes, pooling, dropout_rate, classifier_activation, include_preprocessing, ) @keras_export("keras.applications.MobileNetV3Large") def MobileNetV3Large( input_shape=None, alpha=1.0, minimalistic=False, include_top=True, weights="imagenet", input_tensor=None, classes=1000, pooling=None, dropout_rate=0.2, classifier_activation="softmax", include_preprocessing=True, ): def stack_fn(x, kernel, activation, se_ratio): def depth(d): return _depth(d * alpha) x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0) x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1) x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2) x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3) x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4) x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5) x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6) x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7) x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8) x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9) x = _inverted_res_block( x, 6, depth(112), 3, 1, se_ratio, activation, 10 ) x = _inverted_res_block( x, 6, depth(112), 3, 1, se_ratio, activation, 11 ) x = _inverted_res_block( x, 6, depth(160), kernel, 2, se_ratio, activation, 12 ) x = _inverted_res_block( x, 6, depth(160), kernel, 1, se_ratio, activation, 13 ) x = _inverted_res_block( x, 6, depth(160), kernel, 1, se_ratio, activation, 14 ) return x return MobileNetV3( stack_fn, 1280, input_shape, alpha, "large", minimalistic, include_top, weights, input_tensor, classes, pooling, dropout_rate, classifier_activation, include_preprocessing, ) MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name="MobileNetV3Small") MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name="MobileNetV3Large") def relu(x): return layers.ReLU()(x) def hard_sigmoid(x): return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0) def hard_swish(x): return layers.Multiply()([x, hard_sigmoid(x)]) # This function is taken from the original tf repo. # It ensures that all layers have a channel number that is divisible by 8 # It can be seen here: # https://github.com/tensorflow/models/blob/master/research/ # slim/nets/mobilenet/mobilenet.py def _depth(v, divisor=8, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v def _se_block(inputs, filters, se_ratio, prefix): x = layers.GlobalAveragePooling2D( keepdims=True, name=prefix + "squeeze_excite/AvgPool" )(inputs) x = layers.Conv2D( _depth(filters * se_ratio), kernel_size=1, padding="same", name=prefix + "squeeze_excite/Conv", )(x) x = layers.ReLU(name=prefix + "squeeze_excite/Relu")(x) x = layers.Conv2D( filters, kernel_size=1, padding="same", name=prefix + "squeeze_excite/Conv_1", )(x) x = hard_sigmoid(x) x = layers.Multiply(name=prefix + "squeeze_excite/Mul")([inputs, x]) return x def _inverted_res_block( x, expansion, filters, kernel_size, stride, se_ratio, activation, block_id ): channel_axis = 1 if backend.image_data_format() == "channels_first" else -1 shortcut = x prefix = "expanded_conv/" infilters = backend.int_shape(x)[channel_axis] if block_id: # Expand prefix = f"expanded_conv_{block_id}/" x = layers.Conv2D( _depth(infilters * expansion), kernel_size=1, padding="same", use_bias=False, name=prefix + "expand", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + "expand/BatchNorm", )(x) x = activation(x) if stride == 2: x = layers.ZeroPadding2D( padding=imagenet_utils.correct_pad(x, kernel_size), name=prefix + "depthwise/pad", )(x) x = layers.DepthwiseConv2D( kernel_size, strides=stride, padding="same" if stride == 1 else "valid", use_bias=False, name=prefix + "depthwise", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + "depthwise/BatchNorm", )(x) x = activation(x) if se_ratio: x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix) x = layers.Conv2D( filters, kernel_size=1, padding="same", use_bias=False, name=prefix + "project", )(x) x = layers.BatchNormalization( axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + "project/BatchNorm", )(x) if stride == 1 and infilters == filters: x = layers.Add(name=prefix + "Add")([shortcut, x]) return x @keras_export("keras.applications.mobilenet_v3.preprocess_input") def preprocess_input(x, data_format=None): """A placeholder method for backward compatibility. The preprocessing logic has been included in the mobilenet_v3 model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. `None` means the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it uses "channels_last"). Defaults to `None`. Returns: Unchanged `numpy.array` or `tf.Tensor`. """ return x @keras_export("keras.applications.mobilenet_v3.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
tf-keras/tf_keras/applications/mobilenet_v3.py/0
{ "file_path": "tf-keras/tf_keras/applications/mobilenet_v3.py", "repo_id": "tf-keras", "token_count": 11289 }
157
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common utils for benchmarks.""" import timeit import numpy as np from tf_keras import callbacks from tf_keras.benchmarks import distribution_util def get_benchmark_name(name): """Split the suffix of the benchmark name. For example, for the name = 'benchmark_layer_call__Conv2D_small_shape', the return value is ['Conv2D', 'small', 'shape']. This is to generate the metadata of the benchmark test. Args: name: A string, the benchmark name. Returns: A list of strings of the suffix in the benchmark name. """ if "__" not in name or "_" not in name: raise ValueError("The format of the benchmark name is wrong.") return name.split("__")[-1].split("_") def generate_benchmark_params_cpu_gpu(*params_list): """Extend the benchmark names with CPU and GPU suffix. Args: *params_list: A list of tuples represents the benchmark parameters. Returns: A list of strings with the benchmark name extended with CPU and GPU suffix. """ benchmark_params = [] for params in params_list: benchmark_params.extend( [((param[0] + "_CPU",) + param[1:]) for param in params] ) benchmark_params.extend( [((param[0] + "_GPU",) + param[1:]) for param in params] ) return benchmark_params def get_keras_examples_metadata( keras_model, batch_size, impl=".keras.cfit_graph" ): return { "model_name": "keras_examples", "implementation": keras_model + impl, "parameters": "bs_" + str(batch_size), } class TimerCallBack(callbacks.Callback): """Callback for logging time in each epoch or batch.""" def __init__(self): self.times = [] self.timer = timeit.default_timer self.startup_time = timeit.default_timer() self.recorded_startup = False def on_epoch_begin(self, e, logs): self.epoch_start_time = self.timer() def on_epoch_end(self, e, logs): self.times.append(self.timer() - self.epoch_start_time) def on_batch_end(self, e, logs): if not self.recorded_startup: self.startup_time = self.timer() - self.startup_time self.recorded_startup = True def measure_performance( model_fn, x=None, y=None, epochs=2, batch_size=32, run_iters=4, optimizer=None, loss=None, metrics=None, verbose=0, num_gpus=0, distribution_strategy="off", ): """Run models and measure the performance. Args: model_fn: Model function to be benchmarked. x: Input data. See `x` in the `fit()` method of `keras.Model`. y: Target data. See `y` in the `fit()` method of `keras.Model`. epochs: Integer. Number of epochs to train the model. If unspecified, `epochs` will default to 2. batch_size: Integer. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. run_iters: Integer. Number of iterations to run the performance measurement. If unspecified, `run_iters` will default to 4. optimizer: String (name of optimizer) or optimizer instance. See `keras.optimizers`. loss: String (name of objective function), objective function or `keras.losses.Loss` instance. See `keras.losses`. metrics: Lists of metrics to be evaluated by the model during training. See `metrics` in the `compile()` method of `keras.Model`. verbose: 0, 1, 2. Verbosity mode. See `verbose` in the `fit()` method of `keras.Model`. If unspecified, `verbose` will default to 0. num_gpus: Number of GPUs to run the model. distribution_strategy: Distribution strategies. It could be `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified, `distribution_strategy` will default to 'off'. Note that, `TPU` and `parameter_server` are not supported yet. Returns: Performance summary, which contains build_time, compile_time, startup_time, avg_epoch_time, wall_time, exp_per_sec, epochs, distribution_strategy. Raise: ValueError: If `x` is none or if `optimizer` is not provided or if `loss` is not provided or if `num_gpus` is negative. """ if x is None: raise ValueError("Input data is required.") elif optimizer is None: raise ValueError("Optimizer is required.") elif loss is None: raise ValueError("Loss function is required.") elif num_gpus < 0: raise ValueError("`num_gpus` cannot be negative") # TODO(xingyulong): we will add tfds support later and # get the `num_examples` from info. num_examples = x.shape[0] build_time_list, compile_time_list, startup_time_list = [], [], [] avg_epoch_time_list, wall_time_list, exp_per_sec_list = [], [], [] total_num_examples = epochs * num_examples strategy = distribution_util.get_distribution_strategy( distribution_strategy=distribution_strategy, num_gpus=num_gpus ) for _ in range(run_iters): timer = timeit.default_timer start_time = timer() # Init the distribution strategy scope for each iteration. strategy_scope = distribution_util.get_strategy_scope(strategy) with strategy_scope: t0 = timer() model = model_fn() build_time = timer() - t0 t1 = timer() model.compile( optimizer=optimizer, loss=loss, metrics=metrics, ) compile_time = timer() - t1 # Run one warm up epoch. model.fit(x=x, y=y, batch_size=batch_size, epochs=1) cbk = TimerCallBack() t2 = timer() model.fit( x=x, y=y, batch_size=batch_size, epochs=epochs, callbacks=[cbk], verbose=verbose, ) end_time = timer() build_time_list.append(build_time) compile_time_list.append(compile_time) startup_time_list.append(cbk.startup_time) avg_epoch_time_list.append(np.mean(cbk.times)) wall_time_list.append(end_time - start_time) exp_per_sec_list.append(total_num_examples / (end_time - t2)) metrics = [] metrics.append({"name": "build_time", "value": np.mean(build_time_list)}) metrics.append( {"name": "compile_time", "value": np.mean(compile_time_list)} ) metrics.append( {"name": "startup_time", "value": np.mean(startup_time_list)} ) metrics.append( {"name": "avg_epoch_time", "value": np.mean(avg_epoch_time_list)} ) metrics.append({"name": "exp_per_sec", "value": np.mean(exp_per_sec_list)}) metrics.append({"name": "epochs", "value": epochs}) wall_time = np.mean(wall_time_list) extras = { "distribution_strategy": distribution_strategy, "num_gpus": num_gpus, } return metrics, wall_time, extras
tf-keras/tf_keras/benchmarks/benchmark_util.py/0
{ "file_path": "tf-keras/tf_keras/benchmarks/benchmark_util.py", "repo_id": "tf-keras", "token_count": 3074 }
158
# Description: # Implementation of benchmarks on TF-Keras layers. # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = ["//visibility:public"], licenses = ["notice"], ) BECHMARK_TAGS = [ "no_oss_py38", # TODO(b/162044699) "no_pip", # TODO(b/161253163) "no_windows", # TODO(b/160628318) ] # To run CPU benchmarks: # bazel run -c opt benchmarks_test -- --benchmarks=. # To run GPU benchmarks: # bazel run -c opt --config=cuda benchmarks_test -- \ # --benchmarks=. # To run benchmarks with TFRT: # bazel run -c opt --config=cuda --test_env=EXPERIMENTAL_ENABLE_TFRT=1 benchmarks_test -- \ # --benchmarks=. # To run a subset of benchmarks using --benchmarks flag. # --benchmarks: the list of benchmarks to run. The specified value is interpreted # as a regular expression and any benchmark whose name contains a partial match # to the regular expression is executed. # e.g. --benchmarks=".*lstm*." will run all lstm layer related benchmarks. py_library( name = "run_xprof", srcs = ["run_xprof.py"], srcs_version = "PY3", visibility = ["//visibility:private"], ) py_library( name = "layer_benchmarks_test_base", srcs = ["layer_benchmarks_test_base.py"], srcs_version = "PY3", visibility = ["//visibility:private"], deps = [ ":run_xprof", "//:expect_tensorflow_installed", "//tf_keras/benchmarks:profiler_lib", ], ) tf_py_test( name = "layer_benchmarks_test", srcs = ["layer_benchmarks_test.py"], python_version = "PY3", tags = BECHMARK_TAGS, deps = [ ":layer_benchmarks_test_base", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/benchmarks:benchmark_util", ], )
tf-keras/tf_keras/benchmarks/layer_benchmarks/BUILD/0
{ "file_path": "tf-keras/tf_keras/benchmarks/layer_benchmarks/BUILD", "repo_id": "tf-keras", "token_count": 780 }
159
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MNIST handwritten digits dataset.""" import numpy as np from tf_keras.utils.data_utils import get_file # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.datasets.mnist.load_data") def load_data(path="mnist.npz"): """Loads the MNIST dataset. This is a dataset of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images. More info can be found at the [MNIST homepage](http://yann.lecun.com/exdb/mnist/). Args: path: path where to cache the dataset locally (relative to `~/.keras/datasets`). Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train**: uint8 NumPy array of grayscale image data with shapes `(60000, 28, 28)`, containing the training data. Pixel values range from 0 to 255. **y_train**: uint8 NumPy array of digit labels (integers in range 0-9) with shape `(60000,)` for the training data. **x_test**: uint8 NumPy array of grayscale image data with shapes (10000, 28, 28), containing the test data. Pixel values range from 0 to 255. **y_test**: uint8 NumPy array of digit labels (integers in range 0-9) with shape `(10000,)` for the test data. Example: ```python (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() assert x_train.shape == (60000, 28, 28) assert x_test.shape == (10000, 28, 28) assert y_train.shape == (60000,) assert y_test.shape == (10000,) ``` License: Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset, which is a derivative work from original NIST datasets. MNIST dataset is made available under the terms of the [Creative Commons Attribution-Share Alike 3.0 license.]( https://creativecommons.org/licenses/by-sa/3.0/) """ origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) path = get_file( path, origin=origin_folder + "mnist.npz", file_hash=( # noqa: E501 "731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1" ), ) with np.load(path, allow_pickle=True) as f: x_train, y_train = f["x_train"], f["y_train"] x_test, y_test = f["x_test"], f["y_test"] return (x_train, y_train), (x_test, y_test)
tf-keras/tf_keras/datasets/mnist.py/0
{ "file_path": "tf-keras/tf_keras/datasets/mnist.py", "repo_id": "tf-keras", "token_count": 1148 }
160
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities that help manage directory path in distributed settings. In multi-worker training, the need to write a file to distributed file location often requires only one copy done by one worker despite many workers that are involved in training. The option to only perform saving by chief is not feasible for a couple of reasons: 1) Chief and workers may each contain a client that runs the same piece of code and it's preferred not to make any distinction between the code run by chief and other workers, and 2) saving of model or model's related information may require SyncOnRead variables to be read, which needs the cooperation of all workers to perform all-reduce. This set of utility is used so that only one copy is written to the needed directory, by supplying a temporary write directory path for workers that don't need to save, and removing the temporary directory once file writing is done. Example usage: ``` # Before using a directory to write file to. self.log_write_dir = write_dirpath(self.log_dir, get_distribution_strategy()) # Now `self.log_write_dir` can be safely used to write file to. ... # After the file is written to the directory. remove_temp_dirpath(self.log_dir, get_distribution_strategy()) ``` Experimental. API is subject to change. """ import os import requests import tensorflow.compat.v2 as tf GCP_METADATA_HEADER = {"Metadata-Flavor": "Google"} _GCE_METADATA_URL_ENV_VARIABLE = "GCE_METADATA_IP" def _get_base_dirpath(strategy): task_id = strategy.extended._task_id return "workertemp_" + str(task_id) def _is_temp_dir(dirpath, strategy): return dirpath.endswith(_get_base_dirpath(strategy)) def _get_temp_dir(dirpath, strategy): if _is_temp_dir(dirpath, strategy): temp_dir = dirpath else: temp_dir = os.path.join(dirpath, _get_base_dirpath(strategy)) tf.io.gfile.makedirs(temp_dir) return temp_dir def write_dirpath(dirpath, strategy): """Returns the writing dir that should be used to save file distributedly. `dirpath` would be created if it doesn't exist. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing dir path that should be used to save with distribution. """ if strategy is None: # Infer strategy from `tf.distribute` if not given. strategy = tf.distribute.get_strategy() if strategy is None: # If strategy is still not available, this is not in distributed # training. Fallback to original dirpath. return dirpath if not strategy.extended._in_multi_worker_mode(): return dirpath if strategy.extended.should_checkpoint: return dirpath # If this worker is not chief and hence should not save file, save it to a # temporary directory to be removed later. return _get_temp_dir(dirpath, strategy) def remove_temp_dirpath(dirpath, strategy): """Removes the temp path after writing is finished. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used. """ if strategy is None: # Infer strategy from `tf.distribute` if not given. strategy = tf.distribute.get_strategy() if strategy is None: # If strategy is still not available, this is not in distributed # training. Fallback to no-op. return # TODO(anjalisridhar): Consider removing the check for multi worker mode # since it is redundant when used with the should_checkpoint property. if ( strategy.extended._in_multi_worker_mode() and not strategy.extended.should_checkpoint ): # If this worker is not chief and hence should not save file, remove # the temporary directory. tf.compat.v1.gfile.DeleteRecursively(_get_temp_dir(dirpath, strategy)) def write_filepath(filepath, strategy): """Returns the writing file path to be used to save file distributedly. Directory to contain `filepath` would be created if it doesn't exist. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing filepath that should be used to save file with distribution. """ dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) return os.path.join(write_dirpath(dirpath, strategy), base) def remove_temp_dir_with_filepath(filepath, strategy): """Removes the temp path for file after writing is finished. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used. """ remove_temp_dirpath(os.path.dirname(filepath), strategy) def _on_gcp(): """Detect whether the current running environment is on GCP.""" gce_metadata_endpoint = "http://" + os.environ.get( _GCE_METADATA_URL_ENV_VARIABLE, "metadata.google.internal" ) try: # Timeout in 5 seconds, in case the test environment has connectivity # issue. There is not default timeout, which means it might block # forever. response = requests.get( f"{gce_metadata_endpoint}/computeMetadata/v1/{'instance/hostname'}", headers=GCP_METADATA_HEADER, timeout=5, ) return response.status_code except requests.exceptions.RequestException: return False def support_on_demand_checkpoint_callback(strategy): if _on_gcp() and isinstance( strategy, tf.distribute.MultiWorkerMirroredStrategy ): return True return False
tf-keras/tf_keras/distribute/distributed_file_utils.py/0
{ "file_path": "tf-keras/tf_keras/distribute/distributed_file_utils.py", "repo_id": "tf-keras", "token_count": 2096 }
161
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.keras models with callbacks, checkpointing with dist strategy.""" import collections import tempfile import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras import losses from tf_keras.distribute import distribute_strategy_test as keras_test_lib from tf_keras.distribute import distributed_training_utils_v1 from tf_keras.distribute import optimizer_combinations class Counter(keras.callbacks.Callback): """Counts the number of times each callback method was run. Attributes: method_counts: dict. Contains the counts of time each callback method was run. """ def __init__(self): self.method_counts = collections.defaultdict(int) methods_to_count = [ "on_batch_begin", "on_batch_end", "on_epoch_begin", "on_epoch_end", "on_predict_batch_begin", "on_predict_batch_end", "on_predict_begin", "on_predict_end", "on_test_batch_begin", "on_test_batch_end", "on_test_begin", "on_test_end", "on_train_batch_begin", "on_train_batch_end", "on_train_begin", "on_train_end", ] for method_name in methods_to_count: setattr( self, method_name, self.wrap_with_counts(method_name, getattr(self, method_name)), ) def wrap_with_counts(self, method_name, method): def _call_and_count(*args, **kwargs): self.method_counts[method_name] += 1 return method(*args, **kwargs) return _call_and_count class TestDistributionStrategyWithCallbacks( tf.test.TestCase, parameterized.TestCase ): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.all_strategy_combinations() ) ) def test_callbacks_in_fit(self, distribution): with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer="sgd", loss="mse", metrics=["mae"]) dataset = keras_test_lib.get_dataset(distribution) counter = Counter() epochs = 2 steps_per_epoch = 5 validation_steps = 3 model.fit( dataset, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=0, validation_data=dataset, validation_steps=validation_steps, callbacks=[counter], ) if ( isinstance( distribution, tf.compat.v1.distribute.experimental.TPUStrategy ) and not tf.executing_eagerly() ): # TPU Strategy can have multi step training, from # extended.steps_per_run if steps_per_run = 1, then # num_batch_call_per_epoch = steps_per_epoch steps_per_run = distribution.extended.steps_per_run num_batch_call_per_epoch = steps_per_epoch // steps_per_run if steps_per_epoch % steps_per_run: num_batch_call_per_epoch += 1 else: num_batch_call_per_epoch = steps_per_epoch self.assertDictEqual( counter.method_counts, { "on_batch_begin": epochs * num_batch_call_per_epoch, "on_batch_end": epochs * num_batch_call_per_epoch, "on_epoch_begin": epochs, "on_epoch_end": epochs, "on_test_batch_begin": epochs * validation_steps, "on_test_batch_end": epochs * validation_steps, "on_test_begin": epochs, "on_test_end": epochs, "on_train_batch_begin": epochs * num_batch_call_per_epoch, "on_train_batch_end": epochs * num_batch_call_per_epoch, "on_train_begin": 1, "on_train_end": 1, }, ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.all_strategy_combinations() ) ) def test_callbacks_in_eval(self, distribution): with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer="sgd", loss="mse", metrics=["mae"]) dataset = keras_test_lib.get_dataset(distribution) counter = Counter() model.evaluate(dataset, steps=5, callbacks=[counter]) self.assertDictEqual( counter.method_counts, { "on_test_batch_begin": 5, "on_test_batch_end": 5, "on_test_begin": 1, "on_test_end": 1, }, ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.all_strategy_combinations() ) ) def test_callbacks_in_predict(self, distribution): with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer="sgd", loss="mse", metrics=["mae"]) dataset = keras_test_lib.get_dataset(distribution) counter = Counter() model.predict( keras_test_lib.get_predict_dataset(dataset), steps=5, callbacks=[counter], ) self.assertDictEqual( counter.method_counts, { "on_predict_batch_begin": 5, "on_predict_batch_end": 5, "on_predict_begin": 1, "on_predict_end": 1, }, ) class TestDistributionStrategyErrorCases( tf.test.TestCase, parameterized.TestCase ): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 ], mode=["graph"], ) ) def test_validating_dataset_input_tensors_with_shape_mismatch( self, distribution ): with self.cached_session(): @tf.function def run(): ctx = tf.distribute.get_replica_context() if ctx.replica_id_in_sync_group.device.endswith("GPU:0"): return tf.constant([[1, 2]]) else: return tf.constant([[1, 2], [1, 2]]) x = distribution.run(run) # Removed device and input tensor shape details from the error # message since the order of the device and the corresponding input # tensor shape is not deterministic over different runs. with self.assertRaisesRegex( ValueError, "Input tensor shapes do not match for " "distributed tensor inputs " "PerReplica:.+", ): with distribution.scope(): distributed_training_utils_v1.validate_distributed_dataset_inputs( # noqa: E501 distribution, x, None ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 ], mode=["graph", "eager"], ) ) def test_validating_dataset_input_tensors_with_dtype_mismatch( self, distribution ): with self.cached_session(): @tf.function def run(): ctx = tf.distribute.get_replica_context() if ctx.replica_id_in_sync_group.device.endswith("GPU:0"): return tf.constant([[1, 2]], dtype=tf.int32) else: return tf.constant([[1, 2]], dtype=tf.float64) x = distribution.run(run) # Removed device and input tensor dtype details from the error # message since the order of the device and the corresponding input # tensor dtype is not deterministic over different runs. with self.assertRaisesRegex( ValueError, "Input tensor dtypes do not match for " "distributed tensor inputs " "PerReplica:.+", ): with distribution.scope(): distributed_training_utils_v1.validate_distributed_dataset_inputs( # noqa: E501 distribution, x, None ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 ], mode=["graph", "eager"], ) ) def test_unsupported_features(self, distribution, mode): with self.cached_session(): with distribution.scope(): model = keras_test_lib.get_model() optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.001) loss = "mse" metrics = ["mae"] model.compile(optimizer, loss, metrics=metrics) dataset = keras_test_lib.get_dataset(distribution) # Test with validation split with self.assertRaises(ValueError): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, validation_split=0.5, validation_steps=2, ) # Test with sample weight. sample_weight = np.random.random((10,)) with self.assertRaises(ValueError): model.fit( dataset, epochs=1, steps_per_epoch=2, verbose=0, sample_weight=sample_weight, ) # Test with not specifying the `steps` argument for dataset with # infinite cardinality. dataset = dataset.repeat() with self.assertRaises(ValueError): model.fit(dataset, epochs=1, verbose=0) with self.assertRaises(ValueError): model.evaluate(dataset, verbose=0) with self.assertRaises(ValueError): model.predict(dataset, verbose=0) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 tf.__internal__.distribute.combinations.one_device_strategy, ], mode=["graph", "eager"], ) ) def test_distribution_strategy_on_subclassed_model(self, distribution): with distribution.scope(): class _SimpleMLP(keras.Model): def __init__(self, num_labels): super().__init__() self.dense = keras.layers.Dense(num_labels) def call(self, inputs): return self.dense(inputs) model = _SimpleMLP(3) if not tf.executing_eagerly(): with self.assertRaisesRegex( ValueError, "We currently do not support distribution strategy with a " "`Sequential` model that is created without `input_shape`/" "`input_dim` set in its first layer or a subclassed model.", ): model.compile("sgd") else: model.compile("sgd") @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 tf.__internal__.distribute.combinations.one_device_strategy, ], mode=["graph", "eager"], ) ) def test_distribution_strategy_on_deferred_sequential_model( self, distribution ): with distribution.scope(): model = keras.models.Sequential() model.add(keras.layers.Dense(16, activation="relu")) model.add(keras.layers.Dense(3, activation="softmax")) if tf.executing_eagerly(): model.compile("sgd") else: with self.assertRaisesRegex( ValueError, "We currently do not support distribution strategy with a " "`Sequential` model that is created without " "`input_shape`/`input_dim` set in its first layer or " "a subclassed model.", ): model.compile("sgd") @tf.__internal__.distribute.combinations.generate( keras_test_lib.all_strategy_combinations_minus_default() ) def test_standalone_loss_without_loss_reduction(self, distribution): with distribution.scope(): loss_object = losses.MeanSquaredError() with self.assertRaisesRegex( ValueError, "Please use `tf.keras.losses.Reduction.SUM` or " "`tf.keras.losses.Reduction.NONE`", ): y = np.asarray([1, 0]) loss_object(y, y) class TestDistributionStrategyWithLossMasking( tf.test.TestCase, parameterized.TestCase ): # TODO(priyag): Enable all strategies for this test. Currently it does not # work for TPU due to some invalid datatype. @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 ], mode=["graph", "eager"], optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, # noqa: E501 ) ) def test_masking(self, distribution, optimizer): with self.cached_session(): np.random.seed(1337) x = np.array([[[1], [1]], [[0], [0]]]) with distribution.scope(): model = keras.models.Sequential() model.add( keras.layers.Masking(mask_value=0, input_shape=(2, 1)) ) model.add( keras.layers.TimeDistributed( keras.layers.Dense(1, kernel_initializer="one") ) ) model.compile(loss="mse", optimizer=optimizer()) y = np.array([[[1], [1]], [[1], [1]]]) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2) self.assertEqual(hist.history["loss"][0], 0) class TestDistributionStrategyWithNormalizationLayer( tf.test.TestCase, parameterized.TestCase ): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.all_strategy_combinations(), tf.__internal__.test.combinations.combine( fused=[True, False], optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn, # noqa: E501 ), ) ) def test_batchnorm_correctness(self, distribution, fused, optimizer): with self.cached_session(): with distribution.scope(): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( input_shape=( 10, 20, 30, ), momentum=0.8, fused=fused, ) model.add(norm) model.compile(loss="mse", optimizer=optimizer()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 20, 30)) x = x.astype("float32") dataset = tf.data.Dataset.from_tensor_slices((x, x)) dataset = dataset.repeat(100) dataset = keras_test_lib.batch_wrapper(dataset, 32, distribution) predict_dataset = tf.data.Dataset.from_tensor_slices(x) predict_dataset = predict_dataset.repeat(100) predict_dataset = keras_test_lib.batch_wrapper( predict_dataset, 32, distribution ) model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10) out = model.predict(predict_dataset, steps=2) out -= keras.backend.eval(norm.beta) out /= keras.backend.eval(norm.gamma) np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1e-1) # TODO(b/146181571): Enable this for all distribution strategies once # DistributedVariable.assign() returns a variable for MirroredStrategy. @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.tpu_strategy_combinations(), tf.__internal__.test.combinations.combine( optimizer=optimizer_combinations.gradient_descent_optimizer_keras_v2_fn # noqa: E501 ), ) ) def test_batchnorm_correctness_with_renorm(self, distribution, optimizer): with self.cached_session(): with distribution.scope(): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( input_shape=( 10, 20, 30, ), momentum=0.8, fused=False, renorm=True, ) model.add(norm) model.compile(loss="mse", optimizer=optimizer()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 20, 30)) x = x.astype("float32") dataset = tf.data.Dataset.from_tensor_slices((x, x)) dataset = dataset.repeat(100) dataset = keras_test_lib.batch_wrapper(dataset, 32, distribution) predict_dataset = tf.data.Dataset.from_tensor_slices(x) predict_dataset = predict_dataset.repeat(100) predict_dataset = keras_test_lib.batch_wrapper( predict_dataset, 32, distribution ) model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10) out = model.predict(predict_dataset, steps=2) out -= keras.backend.eval(norm.beta) out /= keras.backend.eval(norm.gamma) np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1e-1) class TestDistributionStrategySaveLoadWeights( tf.test.TestCase, parameterized.TestCase ): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), tf.__internal__.test.combinations.combine( optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn ), ) ) def test_save_load_h5(self, distribution, optimizer): with self.cached_session(): dataset = keras_test_lib.get_dataset(distribution) with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer(), "mse") model.fit(dataset, epochs=1, steps_per_epoch=1) weights_file = tempfile.mktemp(".h5") model.save_weights(weights_file) model_2 = keras_test_lib.get_model() model_2.compile(optimizer(), "mse") model_2.load_weights(weights_file) model_2.predict( keras_test_lib.get_predict_dataset(distribution), steps=2 ) model_2.fit(dataset, epochs=1, steps_per_epoch=1) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.all_strategy_combinations_minus_default(), tf.__internal__.test.combinations.combine( optimizer=optimizer_combinations.rmsprop_optimizer_keras_v2_fn ), ) ) def test_save_load_trackable(self, distribution, optimizer): # TODO(b/123533246): Enable the test for TPU once bug is fixed if ( isinstance( distribution, ( tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy, ), ) and distribution.extended.steps_per_run > 1 ): self.skipTest( "MultiStep TPU Strategy deadlocks with optimizer restore." ) with self.cached_session(): dataset = keras_test_lib.get_dataset(distribution) with distribution.scope(): model = keras_test_lib.get_model() model.compile(optimizer(), "mse") model.fit(dataset, epochs=1, steps_per_epoch=1) weights_file = tempfile.mktemp() model.save_weights(weights_file) model_2 = keras_test_lib.get_model() model_2.compile(optimizer(), "mse") model_2.load_weights(weights_file) model_2.predict( keras_test_lib.get_predict_dataset(distribution), steps=2 ) model_2.fit(dataset, epochs=1, steps_per_epoch=1) class TestDistributionStrategyValidation( tf.test.TestCase, parameterized.TestCase ): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( keras_test_lib.all_strategy_combinations_minus_default() ) ) def test_layer_outside_scope(self, distribution): with self.cached_session(): with self.assertRaisesRegex( ValueError, "was not created in the distribution strategy" ): x = keras.layers.Input(shape=(3,), name="input") y = keras.layers.Dense(4, name="dense")(x) with distribution.scope(): model = keras.Model(x, y) optimizer = tf.compat.v1.train.GradientDescentOptimizer( 0.001 ) loss = "mse" metrics = ["mae", keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics) @tf.__internal__.distribute.combinations.generate( keras_test_lib.all_strategy_combinations_minus_default() ) def test_model_outside_scope(self, distribution): with self.cached_session(): with self.assertRaisesRegex( ValueError, "was not created in the distribution strategy" ): x = keras.layers.Input(shape=(3,), name="input") y = keras.layers.Dense(4, name="dense")(x) model = keras.Model(x, y) with distribution.scope(): optimizer = tf.compat.v1.train.GradientDescentOptimizer( 0.001 ) loss = "mse" metrics = ["mae", keras.metrics.CategoricalAccuracy()] model.compile(optimizer, loss, metrics=metrics) class TestDistributionStrategyWithStaticShapes( tf.test.TestCase, parameterized.TestCase ): @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 ], mode=["graph", "eager"], ) ) def test_input_batch_size_not_divisible_by_num_replicas(self, distribution): with distribution.scope(): with self.assertRaisesRegex( ValueError, r"The `batch_size` argument \(5\) must be divisible by " r"the number of replicas \(2\)", ): keras.layers.Input(shape=(3,), batch_size=5, name="input") @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501 ], mode=["graph", "eager"], ) ) def test_static_input_batch_size(self, distribution): inputs = np.zeros((10, 3), dtype=np.float32) targets = np.zeros((10, 4), dtype=np.float32) dataset = tf.data.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10, drop_remainder=True) with distribution.scope(): x = keras.layers.Input(shape=(3,), batch_size=10, name="input") y = keras.layers.Dense(4, name="dense")(x) model = keras.Model(x, y) model.compile(optimizer="sgd", loss="mse", metrics=["mae"]) model.fit(dataset, epochs=1, steps_per_epoch=5) model.evaluate(dataset, steps=5) model.predict(dataset) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/distribute/keras_utils_test.py/0
{ "file_path": "tf-keras/tf_keras/distribute/keras_utils_test.py", "repo_id": "tf-keras", "token_count": 13657 }
162
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ClusterCoordinator and TF-Keras models.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.distribute import multi_worker_testing_utils from tf_keras.distribute import strategy_combinations from tf_keras.engine import base_layer class ShardedVariableTest(tf.test.TestCase, parameterized.TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.strategy = tf.distribute.experimental.ParameterServerStrategy( multi_worker_testing_utils.make_parameter_server_cluster(3, 2), variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501 2 ), ) def assert_list_all_equal(self, list1, list2): """Used in lieu of `assertAllEqual`. This is used to replace standard `assertAllEqual` for the cases where `list1` and `list2` contain `AggregatingVariable`. Lists with `AggregatingVariable` are not convertible to numpy array via `np.array` calls as numpy would raise `ValueError: setting an array element with a sequence.` Args: list1: The first list to compare equality. list2: The second list to compare equality. """ for lhs, rhs in zip(list1, list2): self.assertEqual(lhs, rhs) def test_keras_layer_setattr(self): class Layer(base_layer.Layer): def __init__(self): super().__init__() self.w = tf.Variable([0, 1]) self.b = tf.Variable([2, 3], trainable=False) with self.strategy.scope(): layer = Layer() self.assertLen(layer.trainable_weights, 2) self.assertEqual(layer.trainable_weights[0], [0]) self.assertEqual(layer.trainable_weights[1], [1]) self.assertLen(layer.non_trainable_weights, 2) self.assertEqual(layer.non_trainable_weights[0], [2]) self.assertEqual(layer.non_trainable_weights[1], [3]) self.assert_list_all_equal( layer.weights, layer.trainable_weights + layer.non_trainable_weights ) self.assert_list_all_equal( layer.trainable_weights, layer.trainable_variables ) self.assert_list_all_equal(layer.weights, layer.variables) checkpoint_deps = set(layer._trackable_children().values()) self.assertEqual(checkpoint_deps, set([layer.w, layer.b])) def test_keras_layer_add_weight(self): class Layer(base_layer.Layer): def __init__(self): super().__init__() self.w = self.add_weight( shape=(2,), initializer=lambda shape, dtype: tf.constant( [0.0, 1.0], ), trainable=True, ) self.b = self.add_weight( shape=(2,), initializer=lambda shape, dtype: tf.constant([2.0, 3.0]), trainable=False, ) with self.strategy.scope(): layer = Layer() self.assertLen(layer.trainable_weights, 2) self.assertEqual(layer.trainable_weights[0], [0.0]) self.assertEqual(layer.trainable_weights[1], [1.0]) self.assertLen(layer.non_trainable_weights, 2) self.assertEqual(layer.non_trainable_weights[0], [2.0]) self.assertEqual(layer.non_trainable_weights[1], [3.0]) self.assert_list_all_equal( layer.weights, layer.trainable_weights + layer.non_trainable_weights ) self.assert_list_all_equal( layer.trainable_weights, layer.trainable_variables ) self.assert_list_all_equal(layer.weights, layer.variables) checkpoint_deps = set(layer._trackable_children().values()) self.assertEqual(checkpoint_deps, set([layer.w, layer.b])) def test_keras_metrics(self): with self.strategy.scope(): fp = keras.metrics.FalsePositives(thresholds=[0.2, 0.5, 0.7, 0.8]) auc = keras.metrics.AUC(num_thresholds=10) @tf.function def update(): fp.update_state([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.3, 0.9]) auc.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) @tf.function def reset(): fp.reset_state() auc.reset_state() update() self.assertEqual(auc.result(), 0.75) self.assertAllEqual(fp.result(), [2.0, 1.0, 1.0, 1.0]) reset() self.assertEqual(auc.result(), 0.0) self.assertAllEqual(fp.result(), [0.0, 0.0, 0.0, 0.0]) self.assertTrue(hasattr(auc.true_positives, "variables")) self.assertTrue(hasattr(fp.accumulator, "variables")) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( shard_config=[ [2, 2], [2, 3], [3, 2], [2, 1], [1, 1], [1, 2], [1, 3], ], model_type=["dense", "embedding"], ) ) def test_saved_model_combined(self, shard_config, model_type): """Test saving and loading models with various fixed numbers of shards. Args: shard_config: The number of shards to use per variable before and after loading. For example, [1, 3] means to create and save the model with 1 shard (i.e., no variable partitioning), and load it into 3 shards per variable. model_type: Either 'dense' or 'embedding', which simple model to test. """ def create_embedding_model(): inputs = keras.layers.Input(shape=(6,)) embedding = keras.layers.Embedding(output_dim=2, input_dim=6) outputs = embedding(inputs) model = keras.Model(inputs, outputs) model.compile(optimizer="adam", loss="mean_squared_error") return model def create_dense_model(): inputs = keras.layers.Input(shape=(6,)) outputs = keras.layers.Dense(6)(inputs) model = keras.Model(inputs, outputs) model.compile(optimizer="adam", loss="mean_squared_error") return model # Maybe create new strategy with different number of shards if shard_config[0] > 2: strategy = tf.distribute.experimental.ParameterServerStrategy( multi_worker_testing_utils.make_parameter_server_cluster(3, 3), variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501 shard_config[0] ), ) elif shard_config[0] == 2: strategy = self.strategy else: # Just one shard, so use default strategy strategy = tf.distribute.get_strategy() x = tf.cast(tf.expand_dims(tf.range(6), 0), tf.float32) with strategy.scope(): model = ( create_dense_model() if model_type == "dense" else create_embedding_model() ) expect = model(x) # Dense layers have two variables (kernel and bias), embedding layers # have 1 n_expected_variables = shard_config[0] * ( 2 if model_type == "dense" else 1 ) self.assertLen(model.variables, n_expected_variables) model_weights = [v.numpy() for v in model.variables] saved_dir = self.get_temp_dir() model.save(saved_dir) if shard_config[1] > 2: strategy2 = tf.distribute.experimental.ParameterServerStrategy( multi_worker_testing_utils.make_parameter_server_cluster(3, 3), variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501 shard_config[1] ), ) elif shard_config[1] == 2: strategy2 = self.strategy else: # Just one shard, so use default strategy strategy2 = tf.distribute.get_strategy() with strategy2.scope(): loaded_model = keras.models.load_model(saved_dir) got = loaded_model(x) self.assertAllClose(got, expect) n_expected_variables = shard_config[1] * ( 2 if model_type == "dense" else 1 ) self.assertLen(loaded_model.variables, n_expected_variables) loaded_model_weights = [v.numpy() for v in loaded_model.variables] self.assertAllClose( np.concatenate([w.flatten() for w in model_weights]), np.concatenate([w.flatten() for w in loaded_model_weights]), ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( distribution=strategy_combinations.strategies_minus_tpu, model_type=["dense", "embedding"], ) ) def test_saved_model_load_non_pss(self, model_type, distribution): def create_embedding_model(): inputs = keras.layers.Input(shape=(6,)) embedding = keras.layers.Embedding(output_dim=2, input_dim=6) outputs = embedding(inputs) model = keras.Model(inputs, outputs) model.compile(optimizer="adam", loss="mean_squared_error") return model def create_dense_model(): inputs = keras.layers.Input(shape=(6,)) outputs = keras.layers.Dense(6)(inputs) model = keras.Model(inputs, outputs) model.compile(optimizer="adam", loss="mean_squared_error") return model x = tf.cast(tf.expand_dims(tf.range(6), 0), tf.float32) with self.strategy.scope(): model = ( create_dense_model() if model_type == "dense" else create_embedding_model() ) expect = model(x) model_weights = [v.numpy() for v in model.variables] saved_dir = self.get_temp_dir() model.save(saved_dir) with distribution.scope(): loaded_model = keras.models.load_model(saved_dir) got = loaded_model(x) self.assertAllClose(got, expect) n_expected_variables = 2 if model_type == "dense" else 1 self.assertLen(loaded_model.variables, n_expected_variables) loaded_model_weights = [v.numpy() for v in loaded_model.variables] self.assertAllClose( np.concatenate([w.flatten() for w in model_weights]), np.concatenate([w.flatten() for w in loaded_model_weights]), ) def test_slot_variable_checkpointing(self): with self.strategy.scope(): # Set a name so the ShardedVariable is well-named for slot var # keying var = tf.Variable([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="test") opt = keras.optimizers.legacy.adam.Adam() # Run once to trigger apply_gradients to populate optimizer slot # variables. def train_step(): with tf.GradientTape() as tape: loss = sum(var) opt.minimize(loss, var.variables, tape=tape) self.strategy.run(train_step) # Check that we can call get_slot using each slot, before and after # Checkpointing, and get the same results pre_ckpt_slots = [] for slot in opt.get_slot_names(): pre_ckpt_slots.extend([v.numpy() for v in opt.get_slot(var, slot)]) ckpt = tf.train.Checkpoint(var=var, opt=opt) # Assert that checkpoint has slots for each shard and the # ShardedVariable self.assertLen(ckpt.opt._slots, 3) for var_name in ckpt.opt._slots.keys(): self.assertLen(ckpt.opt._slots[var_name], 2) self.assertEqual(ckpt.opt._slots[var_name].keys(), {"m", "v"}) if hasattr(ckpt.opt._slots[var_name]["m"], "variables"): self.assertLen(ckpt.opt._slots[var_name]["m"].variables, 2) self.assertLen(ckpt.opt._slots[var_name]["v"].variables, 2) saved_dir = self.get_temp_dir() ckpt_prefix = f"{saved_dir}/ckpt" ckpt.save(ckpt_prefix) # Run once more to alter slot variables and ensure checkpoint restores # the earlier values. self.strategy.run(train_step) changed_ckpt_slots = [] for slot in opt.get_slot_names(): changed_ckpt_slots.extend( [v.numpy() for v in opt.get_slot(var, slot)] ) self.assertNotAllClose(pre_ckpt_slots, changed_ckpt_slots) ckpt.restore(tf.train.latest_checkpoint(saved_dir)) post_ckpt_slots = [] for slot in opt.get_slot_names(): post_ckpt_slots.extend([v.numpy() for v in opt.get_slot(var, slot)]) self.assertAllClose(pre_ckpt_slots, post_ckpt_slots) def test_slot_variable_checkpoint_load_with_diff_shards(self): with self.strategy.scope(): # Set a name so the ShardedVariable is well-named for slot var # keying var = tf.Variable([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="test") opt = keras.optimizers.legacy.adam.Adam() # Run once to trigger apply_gradients to populate optimizer slot # variables. def train_step(): with tf.GradientTape() as tape: loss = sum(var) opt.minimize(loss, var.variables, tape=tape) self.strategy.run(train_step) # Check that we can call get_slot using each slot, before and after # Checkpointing, and get the same results pre_ckpt_slots = [] for slot in opt.get_slot_names(): pre_ckpt_slots.extend( tf.concat(list(opt.get_slot(var, slot)), axis=0).numpy() ) ckpt = tf.train.Checkpoint(var=var, opt=opt) saved_dir = self.get_temp_dir() ckpt_prefix = f"{saved_dir}/ckpt" ckpt.save(ckpt_prefix) # Create new strategy with different number of shards strategy2 = tf.distribute.experimental.ParameterServerStrategy( multi_worker_testing_utils.make_parameter_server_cluster(3, 2), variable_partitioner=tf.distribute.experimental.partitioners.FixedShardsPartitioner( # noqa: E501 3 ), ) # Create new variable with different values, to be overwritten by ckpt. with strategy2.scope(): var = tf.Variable([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], name="test") opt = keras.optimizers.legacy.adam.Adam() # Run once to trigger apply_gradients to populate optimizer slot # variables. strategy2.run(train_step) new_ckpt = tf.train.Checkpoint(var=var, opt=opt) new_ckpt.restore(tf.train.latest_checkpoint(saved_dir)) post_ckpt_slots = [] for slot in new_ckpt.opt.get_slot_names(): post_ckpt_slots.extend( tf.concat( list(new_ckpt.opt.get_slot(var, slot)), axis=0 ).numpy() ) self.assertAllClose(pre_ckpt_slots, post_ckpt_slots) class ShardedVariableMixedPartitioningTest(tf.test.TestCase): def test_saved_model_min_size_partitioner(self): # set min_shard_bytes such that Dense kernel is split into 2 and bias # into 1 partitioner = ( tf.distribute.experimental.partitioners.MinSizePartitioner( min_shard_bytes=(6 * 6 * 4) // 2, max_shards=2 ) ) cluster_resolver = ( multi_worker_testing_utils.make_parameter_server_cluster(3, 2) ) strategy = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver, variable_partitioner=partitioner ) def create_dense_model(): inputs = keras.layers.Input(shape=(6,)) outputs = keras.layers.Dense(6)(inputs) model = keras.Model(inputs, outputs) model.compile(optimizer="adam", loss="mean_squared_error") return model x = tf.cast(tf.expand_dims(tf.range(6), 0), tf.float32) with strategy.scope(): model = create_dense_model() expect = model(x) # 2 kernel variables, 1 bias self.assertLen(model.variables, 3) saved_dir = self.get_temp_dir() model.save(saved_dir) # set min_shard_bytes such that Dense kernel is split into 3 and bias # into 1 partitioner2 = ( tf.distribute.experimental.partitioners.MinSizePartitioner( min_shard_bytes=(6 * 6 * 4) // 3, max_shards=3 ) ) strategy2 = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver, variable_partitioner=partitioner2 ) with strategy2.scope(): loaded_model = keras.models.load_model(saved_dir) got = loaded_model(x) self.assertAllClose(got, expect) # 3 kernel variables, 1 bias self.assertLen(loaded_model.variables, 4) if __name__ == "__main__": tf.compat.v1.enable_v2_behavior() tf.test.main()
tf-keras/tf_keras/distribute/sharded_variable_test.py/0
{ "file_path": "tf-keras/tf_keras/distribute/sharded_variable_test.py", "repo_id": "tf-keras", "token_count": 8635 }
163
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """E2E Tests for mnist_model.""" import numpy as np import tensorflow.compat.v2 as tf from tensorflow.compat.v2.experimental import dtensor from tf_keras import backend from tf_keras.dtensor import integration_test_utils from tf_keras.dtensor import layout_map as layout_map_lib from tf_keras.dtensor import test_util from tf_keras.optimizers import adam from tf_keras.utils import tf_utils class MnistTest(test_util.DTensorBaseTest): def setUp(self): super().setUp() backend.enable_tf_random_generator() tf_utils.set_random_seed(1337) global_ids = test_util.create_device_ids_array((2,)) local_device_ids = np.ravel(global_ids).tolist() mesh_dict = { device: tf.experimental.dtensor.Mesh( ["batch"], global_ids, local_device_ids, test_util.create_device_list((2,), device), ) for device in ("CPU", "GPU", "TPU") } self.mesh = self.configTestMesh(mesh_dict) def test_mnist_training(self): layout_map = layout_map_lib.LayoutMap(self.mesh) with layout_map.scope(): model = integration_test_utils.get_model() optimizer = adam.Adam(learning_rate=0.001, mesh=self.mesh) optimizer.build(model.trainable_variables) train_losses = integration_test_utils.train_mnist_model_batch_sharded( model, optimizer, self.mesh, num_epochs=3, steps_per_epoch=20, global_batch_size=64, ) # Make sure the losses are decreasing self.assertEqual(train_losses, sorted(train_losses, reverse=True)) def test_model_fit(self): layout_map = layout_map_lib.LayoutMap(self.mesh) with layout_map.scope(): model = integration_test_utils.get_model() optimizer = adam.Adam(learning_rate=0.001, mesh=self.mesh) global_batch_size = 64 model.compile( loss="CategoricalCrossentropy", optimizer=optimizer, metrics="acc" ) train_ds, eval_ds = integration_test_utils.get_mnist_datasets( integration_test_utils.NUM_CLASS, global_batch_size ) def distribute_ds(dataset): dataset = dataset.unbatch() def _create_batch_layout(tensor_spec): rank = len(tensor_spec.shape) + 1 return dtensor.Layout.batch_sharded( self.mesh, batch_dim="batch", rank=rank ) layouts = tf.nest.map_structure( _create_batch_layout, dataset.element_spec ) return dtensor.DTensorDataset( dataset=dataset, mesh=self.mesh, layouts=layouts, global_batch_size=global_batch_size, dataset_already_batched=False, batch_dim="batch", prefetch=None, tf_data_service_config=None, ) train_ds = distribute_ds(train_ds) eval_ds = distribute_ds(eval_ds) model.fit(train_ds, steps_per_epoch=10) model.evaluate(eval_ds, steps=10) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/dtensor/mnist_model_test.py/0
{ "file_path": "tf-keras/tf_keras/dtensor/mnist_model_test.py", "repo_id": "tf-keras", "token_count": 1755 }
164
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for `Model.compile`.""" import copy import tensorflow.compat.v2 as tf from tf_keras import losses as losses_mod from tf_keras import metrics as metrics_mod from tf_keras.saving import saving_lib from tf_keras.utils import generic_utils from tf_keras.utils import losses_utils from tf_keras.utils import tf_utils class Container: """Base Container class.""" def __init__(self, output_names=None, mesh=None): self._output_names = output_names # Used by DTensor layout map use case. Can be removed after DTensor # based distribution strategy. self._mesh = mesh def build(self, y_pred): if self._output_names is None: # In Subclass API, output names like 'output_1' are used for # `Metric` names. self._output_names = create_pseudo_output_names(y_pred) def _conform_to_outputs(self, outputs, struct): """Convenience method to conform `struct` to `outputs` structure. Mappings performed: (1) Map a dict to a list of outputs, using the output names. (2) Fill missing keys in a dict w/ `None`s. (3) Map a single item to all outputs. Args: outputs: Model predictions. struct: Arbitrary nested structure (e.g. of labels, sample_weights, losses, or metrics). Returns: Mapping of `struct` to `outputs` structure. """ struct = map_to_output_names(outputs, self._output_names, struct) struct = map_missing_dict_keys(outputs, struct) # Allow passing one object that applies to all outputs. if not tf.nest.is_nested(struct) and tf.nest.is_nested(outputs): struct = tf.nest.map_structure(lambda _: struct, outputs) return struct def _maybe_broadcast_to_outputs(self, outputs, objects): """Determines if losses / metrics should be applied to all outputs. NOTE: This method should only be called for Metrics / Losses, not for y_true / sample_weight. Args: outputs: Model predictions. objects: Arbitrary nested structure (e.g. of losses or metrics) Returns: Arbitrary nested structure of objects, maybe copied to each output. Applies a Loss / Metric to all outputs. """ if not self._should_broadcast(objects): return objects # When there is more than one Model output, this is needed to keep # each Metric / Loss separate. When there is only one Model output, # the user-supplied object should be used. should_copy_objects = len(tf.nest.flatten(outputs)) > 1 def _broadcast_fn(): if should_copy_objects: return tf.nest.map_structure(self._copy_object, objects) return objects return tf.nest.map_structure(lambda _: _broadcast_fn(), outputs) def _should_broadcast(self, objects): raise NotImplementedError def _copy_object(self, obj): raise NotImplementedError class LossesContainer(Container): """A container class for losses passed to `Model.compile()`. Args: losses: Struct of loss function(s). See `Model.compile()` doc for more information. loss_weights: Weights of the losses contributions of different model outputs. See `Model.compile()` doc for more information. output_names: List of string. Per-output metric names. total_loss_mean: A `keras.metrics.Mean` instance that is used to track the mean of all losses (including compiled and regularization losses). """ def __init__( self, losses, loss_weights=None, output_names=None, total_loss_mean=None, mesh=None, ): super(LossesContainer, self).__init__( output_names=output_names, mesh=mesh ) # Keep user-supplied values untouched for recompiling and serialization. self._user_losses = losses self._user_loss_weights = loss_weights self._losses = losses self._loss_weights = loss_weights self._per_output_metrics = None # Per-output losses become metrics. # Mean of the total loss. self._total_loss_mean = total_loss_mean or metrics_mod.Mean( name="loss", mesh=self._mesh ) self._built = False def get_config(self): # In case `self._losses` is a single string where we convert it to a # list. self._losses = tf.nest.flatten(self._losses) return { "losses": [ saving_lib.serialize_keras_object(obj) for obj in self._losses if obj is not None ], "total_loss_mean": saving_lib.serialize_keras_object( self._total_loss_mean ), } @classmethod def from_config(cls, config): """Returns the `LossesContainer` instance given the `config`.""" deserialized_config = {} for key, value in config.items(): if isinstance(value, list): deserialized_config[key] = [ saving_lib.deserialize_keras_object(item) for item in value ] else: deserialized_config[key] = saving_lib.deserialize_keras_object( value ) return cls(**deserialized_config) @property def metrics(self): """Per-output loss metrics.""" if not self._built: return [] per_output_metrics = [ metric_obj for metric_obj in tf.nest.flatten(self._per_output_metrics) if metric_obj is not None ] return [self._total_loss_mean] + per_output_metrics def build(self, y_pred): """One-time setup of loss objects.""" super(LossesContainer, self).build(y_pred) self._losses = self._maybe_broadcast_to_outputs(y_pred, self._losses) self._losses = self._conform_to_outputs(y_pred, self._losses) self._losses = tf.nest.map_structure( self._get_loss_object, self._losses ) self._losses = tf.nest.flatten(self._losses) self._loss_weights = self._maybe_broadcast_to_outputs( y_pred, self._loss_weights ) self._loss_weights = self._conform_to_outputs( y_pred, self._loss_weights ) self._loss_weights = tf.nest.flatten(self._loss_weights) self._create_metrics() self._built = True @property def built(self): return self._built def _create_metrics(self): """Creates per-output loss metrics, but only for multi-output Models.""" if len(self._output_names) == 1: self._per_output_metrics = [None] else: self._per_output_metrics = [] for loss_obj, output_name in zip(self._losses, self._output_names): if loss_obj is None: self._per_output_metrics.append(None) else: self._per_output_metrics.append( metrics_mod.Mean(output_name + "_loss", mesh=self._mesh) ) def __call__( self, y_true, y_pred, sample_weight=None, regularization_losses=None ): """Computes the overall loss. Args: y_true: An arbitrary structure of Tensors representing the ground truth. y_pred: An arbitrary structure of Tensors representing a Model's outputs. sample_weight: An arbitrary structure of Tensors representing the per-sample loss weights. If one Tensor is passed, it is used for all losses. If multiple Tensors are passed, the structure should match `y_pred`. regularization_losses: Additional losses to be added to the total loss. Returns: The total loss as a `tf.Tensor`, or `None` if no loss results. """ y_true = self._conform_to_outputs(y_pred, y_true) sample_weight = self._conform_to_outputs(y_pred, sample_weight) if not self._built: self.build(y_pred) y_pred = tf.nest.flatten(y_pred) y_true = tf.nest.flatten(y_true) sample_weight = tf.nest.flatten(sample_weight) loss_values = [] # Used for gradient calculation. total_loss_mean_values = [] # Used for loss metric calculation. batch_dim = None zip_args = ( y_true, y_pred, sample_weight, self._losses, self._loss_weights, self._per_output_metrics, ) for y_t, y_p, sw, loss_obj, loss_weight, metric_obj in zip(*zip_args): if ( y_t is None or loss_obj is None ): # Ok to have no loss for an output. continue y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw) sw = losses_utils.apply_mask(y_p, sw, losses_utils.get_mask(y_p)) loss_value = loss_obj(y_t, y_p, sample_weight=sw) total_loss_mean_value = loss_value # Correct for the `Mean` loss metrics counting each replica as a # batch. if loss_obj.reduction == losses_utils.ReductionV2.SUM: total_loss_mean_value *= ( tf.distribute.get_strategy().num_replicas_in_sync ) if batch_dim is None: if tf_utils.is_ragged(y_t): batch_dim = y_t.nrows() else: batch_dim = tf.shape(y_t)[0] if metric_obj is not None: metric_obj.update_state( total_loss_mean_value, sample_weight=batch_dim ) if loss_weight is not None: loss_value *= loss_weight total_loss_mean_value *= loss_weight if ( loss_obj.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE or loss_obj.reduction == losses_utils.ReductionV2.AUTO ): loss_value = losses_utils.scale_loss_for_distribution( loss_value ) loss_values.append(loss_value) total_loss_mean_values.append(total_loss_mean_value) if regularization_losses: regularization_losses = losses_utils.cast_losses_to_common_dtype( regularization_losses ) reg_loss = tf.add_n(regularization_losses) total_loss_mean_values.append(reg_loss) loss_values.append( losses_utils.scale_loss_for_distribution(reg_loss) ) if loss_values: total_loss_mean_values = losses_utils.cast_losses_to_common_dtype( total_loss_mean_values ) total_total_loss_mean_value = tf.add_n(total_loss_mean_values) self._total_loss_mean.update_state( total_total_loss_mean_value, sample_weight=batch_dim ) loss_values = losses_utils.cast_losses_to_common_dtype(loss_values) total_loss = tf.add_n(loss_values) return total_loss else: return None def reset_state(self): """Resets the state of loss metrics.""" if not self._built: return metrics = [self._total_loss_mean] + tf.nest.flatten( self._per_output_metrics ) for metric_obj in metrics: if metric_obj is not None: metric_obj.reset_state() def _get_loss_object(self, loss): """Returns a `Loss` object. Converts the user-supplied loss to a `Loss` object. Also allows `SUM_OVER_BATCH_SIZE` reduction to be used for this loss. Args: loss: A string, function, or `Loss` object. Returns: A `Loss` object. """ if loss is None: return None # Ok to have no loss for an output. loss = losses_mod.get(loss) if not isinstance(loss, losses_mod.Loss): loss_name = get_custom_object_name(loss) if loss_name is None: raise ValueError(f"Loss should be a callable, received: {loss}") loss = losses_mod.LossFunctionWrapper(loss, name=loss_name) loss._allow_sum_over_batch_size = True return loss def _should_broadcast(self, obj): return not tf.nest.is_nested(obj) def _copy_object(self, obj): return obj # Losses don't need to be copied. class MetricsContainer(Container): """A container class for metrics passed to `Model.compile`.""" def __init__( self, metrics=None, weighted_metrics=None, output_names=None, from_serialized=False, mesh=None, ): """Initializes a container for metrics. Arguments: metrics: see the `metrics` argument from `tf.keras.Model.compile`. weighted_metrics: see the `weighted_metrics` argument from `tf.keras.Model.compile`. output_names: A list of strings of names of outputs for the model. from_serialized: Whether the model being compiled is from a serialized model. Used to avoid redundantly applying pre-processing renaming steps. """ super(MetricsContainer, self).__init__( output_names=output_names, mesh=mesh ) self._check_duplicated_metrics(metrics, weighted_metrics) # Keep user-supplied values untouched for recompiling and serialization. self._user_metrics = metrics self._user_weighted_metrics = weighted_metrics self._metrics = metrics self._weighted_metrics = weighted_metrics self._built = False self._from_serialized = from_serialized def _check_duplicated_metrics(self, metrics, weighted_metrics): """Raise error when user provided metrics have any duplications. Note that metrics are stateful container, a shared metric instance between model.metric and model.weighted_metric will make the same intance to be udpated twice, and report wrong value. Args: metrics: User provided metrics list. weighted_metrics: User provided weighted metrics list. Raises: ValueError, when duplicated metrics instance discovered in user provided metrics and weighted metrics. """ seen = set() duplicated = [] for x in tf.nest.flatten(metrics) + tf.nest.flatten(weighted_metrics): # We only check metrics object. The string and function objects # will be converted to unique Metric instance. if not isinstance(x, metrics_mod.Metric): continue if x in seen: duplicated.append(x) seen.add(x) if duplicated: raise ValueError( "Found duplicated metrics object in the user provided " "metrics and weighted metrics. This will cause the same " "metric object to be updated multiple times, and report " "wrong results. \n" f"Duplicated items: {duplicated}" ) @property def metrics(self): """All metrics in this container.""" if not self._built: return [] return self._metrics_in_order @property def unweighted_metrics(self): """Metrics in the container that should not be passed sample_weight.""" if not self._built: return None return tf.nest.flatten(self._metrics) @property def weighted_metrics(self): """Metrics in this container that should be passed `sample_weight`.""" if not self._built: return None return tf.nest.flatten(self._weighted_metrics) def build(self, y_pred, y_true): """One-time setup of metric objects.""" super(MetricsContainer, self).build(y_pred) self._metrics = self._maybe_broadcast_to_outputs(y_pred, self._metrics) self._metrics = self._conform_to_outputs(y_pred, self._metrics) self._weighted_metrics = self._maybe_broadcast_to_outputs( y_pred, self._weighted_metrics ) self._weighted_metrics = self._conform_to_outputs( y_pred, self._weighted_metrics ) # Standardize on tuple since `tf.data` turns lists into `Tensor`s. y_pred = tf.__internal__.nest.list_to_tuple(y_pred) y_true = tf.__internal__.nest.list_to_tuple(y_true) self._metrics = tf.__internal__.nest.list_to_tuple(self._metrics) self._weighted_metrics = tf.__internal__.nest.list_to_tuple( self._weighted_metrics ) # Convert to `Metric` objects, potentially disambiguating based on # output properties. self._metrics = tf.__internal__.nest.map_structure_up_to( y_pred, self._get_metric_objects, self._metrics, y_true, y_pred ) self._weighted_metrics = tf.__internal__.nest.map_structure_up_to( y_pred, self._get_metric_objects, self._weighted_metrics, y_true, y_pred, ) self._metrics = tf.__internal__.nest.flatten_up_to( y_pred, self._metrics, check_types=False ) self._weighted_metrics = tf.__internal__.nest.flatten_up_to( y_pred, self._weighted_metrics, check_types=False ) # Assumes metrics, weighted_metrics have been flattened up to outputs. # # If we are loading a model that has been already serialized, we do not # want to re-apply any pre-processing metric renaming steps. if not self._from_serialized: self._set_metric_names() self._create_ordered_metrics() self._built = True @property def built(self): return self._built def _set_metric_names(self): """Sets unique metric names.""" # For multi-output models, prepend the output name to the metric name. # For weighted metrics, prepend "weighted_" if the name would be # non-unique. metric_names = set() is_multi_output = len(self._output_names) > 1 zip_args = (self._output_names, self._metrics, self._weighted_metrics) for output_name, output_metrics, weighted_output_metrics in zip( *zip_args ): for m in output_metrics: if m is None: continue if is_multi_output: m._name = output_name + "_" + m._name if m._name in metric_names: raise ValueError( f"Found two metrics with the same name: {m._name}. " "All the metrics added to the model need to have " "unique names." ) metric_names.add(m._name) for wm in weighted_output_metrics: if wm is None: continue if is_multi_output: if output_name + "_" + wm._name in metric_names: wm._name = output_name + "_weighted_" + wm._name else: wm._name = output_name + "_" + wm._name elif wm._name in metric_names: wm._name = "weighted_" + wm._name if wm._name in metric_names: raise ValueError( "Found two weighted metrics with the same name: " f"{wm._name}.All the metrics added to the model need " "to have unique names." ) metric_names.add(wm._name) def _create_ordered_metrics(self): """Cache the flat order needed when return metrics, for backcompat.""" self._metrics_in_order = [] for output_metrics, output_weighted_metrics in zip( self._metrics, self._weighted_metrics ): for m in tf.nest.flatten(output_metrics): if m is not None: self._metrics_in_order.append(m) for wm in tf.nest.flatten(output_weighted_metrics): if wm is not None: self._metrics_in_order.append(wm) def update_state(self, y_true, y_pred, sample_weight=None): """Updates the state of per-output metrics.""" y_true = self._conform_to_outputs(y_pred, y_true) sample_weight = self._conform_to_outputs(y_pred, sample_weight) if not self._built: self.build(y_pred, y_true) y_pred = tf.nest.flatten(y_pred) y_true = tf.nest.flatten(y_true) if y_true is not None else [] sample_weight = tf.nest.flatten(sample_weight) zip_args = ( y_true, y_pred, sample_weight, self._metrics, self._weighted_metrics, ) for y_t, y_p, sw, metric_objs, weighted_metric_objs in zip(*zip_args): # Ok to have no metrics for an output. if y_t is None or ( all(m is None for m in metric_objs) and all(wm is None for wm in weighted_metric_objs) ): continue y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw) mask = losses_utils.get_mask(y_p) sw = losses_utils.apply_mask(y_p, sw, mask) for metric_obj in metric_objs: if metric_obj is None: continue metric_obj.update_state(y_t, y_p, sample_weight=mask) for weighted_metric_obj in weighted_metric_objs: if weighted_metric_obj is None: continue weighted_metric_obj.update_state(y_t, y_p, sample_weight=sw) def reset_state(self): """Resets the state of all `Metric`s in this container.""" if self._built: metrics = self._metrics_in_order else: # If the user supplied `Metric` objects directly, we should # reset those. This could also contain `str`s or `function`s # though. metrics = tf.nest.flatten(self._user_metrics) + tf.nest.flatten( self._user_weighted_metrics ) for metric_obj in metrics: if isinstance(metric_obj, metrics_mod.Metric): metric_obj.reset_state() def _get_metric_objects(self, metrics, y_t, y_p): """Convert user-supplied metrics to `Metric` objects.""" metrics = tf.nest.flatten(metrics) return [self._get_metric_object(m, y_t, y_p) for m in metrics] def _get_metric_object(self, metric, y_t, y_p): """Converts user-supplied metric to a `Metric` object. Args: metric: A string, function, or `Metric` object. y_t: Sample of label. y_p: Sample of output. Returns: A `Metric` object. """ if metric is None: return None # Ok to have no metric for an output. # Convenience feature for selecting b/t binary, categorical, # and sparse categorical. if str(metric).lower() not in ["accuracy", "acc", "crossentropy", "ce"]: metric_obj = metrics_mod.get(metric) else: y_t_rank = len(y_t.shape.as_list()) y_p_rank = len(y_p.shape.as_list()) y_t_last_dim = y_t.shape.as_list()[-1] y_p_last_dim = y_p.shape.as_list()[-1] is_binary = y_p_last_dim == 1 is_sparse_categorical = ( y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1 ) if str(metric).lower() in ["accuracy", "acc"]: if is_binary: metric_obj = metrics_mod.binary_accuracy elif is_sparse_categorical: metric_obj = metrics_mod.sparse_categorical_accuracy else: metric_obj = metrics_mod.categorical_accuracy else: if is_binary: metric_obj = metrics_mod.binary_crossentropy elif is_sparse_categorical: metric_obj = metrics_mod.sparse_categorical_crossentropy else: metric_obj = metrics_mod.categorical_crossentropy if isinstance(metric_obj, losses_mod.Loss): metric_obj._allow_sum_over_batch_size = True if not isinstance(metric_obj, metrics_mod.Metric): if isinstance(metric, str): metric_name = metric else: metric_name = get_custom_object_name(metric) if metric_name is None: raise ValueError( f"Metric should be a callable, received: {metric}" ) metric_obj = metrics_mod.MeanMetricWrapper( metric_obj, name=metric_name, mesh=self._mesh ) return metric_obj def _should_broadcast(self, obj): # e.g. 'mse'. if not tf.nest.is_nested(obj): return True # e.g. ['mse'] or ['mse', 'mae']. return isinstance(obj, (list, tuple)) and not any( tf.nest.is_nested(o) for o in obj ) def _copy_object(self, obj): if isinstance(obj, metrics_mod.Metric): return obj.__class__.from_config(obj.get_config()) return obj # Can be a function or `None`. def create_pseudo_output_names(outputs): """Create pseudo output names for a subclassed Model.""" return _create_pseudo_names(outputs, prefix="output_") def create_pseudo_input_names(inputs): """Create pseudo input names for a subclassed Model.""" return _create_pseudo_names(inputs, prefix="input_") def _create_pseudo_names(tensors, prefix): """Creates pseudo {input | output} names for subclassed Models. Warning: this function should only be used to define default names for `Metics` and `SavedModel`. No other use cases should rely on a `Model`'s input or output names. Example with dict: `{'a': [x1, x2], 'b': x3}` becomes: `['a_1', 'a_2', 'b']` Example with list: `[x, y]` becomes: `['output_1', 'output_2']` Args: tensors: `Model`'s outputs or inputs. prefix: 'output_' for outputs, 'input_' for inputs. Returns: Flattened list of pseudo names. """ def one_index(ele): # Start with "output_1" instead of "output_0". if isinstance(ele, int): return ele + 1 return ele flat_paths = list(tf.__internal__.nest.yield_flat_paths(tensors)) flat_paths = tf.nest.map_structure(one_index, flat_paths) names = [] for path in flat_paths: if not path: name = prefix + "1" # Single output. else: name = "_".join(str(p) for p in path) if isinstance(path[0], int): name = prefix + name names.append(name) return names def map_to_output_names(y_pred, output_names, struct): """Maps a dict to a list using `output_names` as keys. This is a convenience feature only. When a `Model`'s outputs are a list, you can specify per-output losses and metrics as a dict, where the keys are the output names. If you specify per-output losses and metrics via the same structure as the `Model`'s outputs (recommended), no mapping is performed. For the Functional API, the output names are the names of the last layer of each output. For the Subclass API, the output names are determined by `create_pseudo_output_names` (For example: `['output_1', 'output_2']` for a list of outputs). This mapping preserves backwards compatibility for `compile` and `fit`. Args: y_pred: Sample outputs of the Model, to determine if this convenience feature should be applied (`struct` is returned unmodified if `y_pred` isn't a flat list). output_names: List. The names of the outputs of the Model. struct: The structure to map. Returns: `struct` mapped to a list in same order as `output_names`. """ single_output = not tf.nest.is_nested(y_pred) outputs_are_flat_list = ( not single_output and isinstance(y_pred, (list, tuple)) and not any(tf.nest.is_nested(y_p) for y_p in y_pred) ) if (single_output or outputs_are_flat_list) and isinstance(struct, dict): output_names = output_names or create_pseudo_output_names(y_pred) struct = copy.copy(struct) new_struct = [struct.pop(name, None) for name in output_names] if struct: raise ValueError( "Found unexpected losses or metrics that do not correspond " f"to any Model output: {struct.keys()}. " f"Valid mode output names: {output_names}. " f"Received struct is: {struct}." ) if len(new_struct) == 1: return new_struct[0] return new_struct else: return struct def map_missing_dict_keys(y_pred, struct): """Replaces missing dict keys in `struct` with `None` placeholders.""" if not isinstance(y_pred, dict) or not isinstance(struct, dict): return struct struct = copy.copy(struct) for k in y_pred.keys(): if k not in struct: struct[k] = None return struct def match_dtype_and_rank(y_t, y_p, sw): """Match dtype and rank of predictions.""" if y_t.shape.rank == 1 and y_p.shape.rank == 2: y_t = tf.expand_dims(y_t, axis=-1) if sw is not None: if sw.shape.rank == 1 and y_p.shape.rank == 2: sw = tf.expand_dims(sw, axis=-1) # Dtype. # This is required mainly for custom loss functions which do not take care # casting dtypes. if (y_t.dtype.is_floating and y_p.dtype.is_floating) or ( y_t.dtype.is_integer and y_p.dtype.is_integer ): y_t = tf.cast(y_t, y_p.dtype) if sw is not None: sw = tf.cast(sw, y_p.dtype) return y_t, y_p, sw def get_custom_object_name(obj): """Returns the name to use for a custom loss or metric callable. Args: obj: Custom loss of metric callable Returns: Name to use, or `None` if the object was not recognized. """ if hasattr(obj, "name"): # Accept `Loss` instance as `Metric`. return obj.name elif hasattr(obj, "__name__"): # Function. return obj.__name__ elif hasattr(obj, "__class__"): # Class instance. return generic_utils.to_snake_case(obj.__class__.__name__) else: # Unrecognized object. return None
tf-keras/tf_keras/engine/compile_utils.py/0
{ "file_path": "tf-keras/tf_keras/engine/compile_utils.py", "repo_id": "tf-keras", "token_count": 14668 }
165
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras Input Tensor used to track functional API Topology.""" import tensorflow.compat.v2 as tf from tf_keras.utils import object_identity # isort: off from tensorflow.python.data.util import structure from tensorflow.python.util.tf_export import keras_export # Tensorflow tensors have a maximum rank of 254 # (See `MaxDimensions()` in //tensorflow/core/framework/tensor_shape.h ) # So we do not try to infer values for int32 tensors larger than this, # As they cannot represent shapes. _MAX_TENSOR_RANK = 254 @keras_export("keras.__internal__.KerasTensor", v1=[]) class KerasTensor: """A representation of a TF-Keras in/output during Functional API construction. `KerasTensor`s are tensor-like objects that represent the symbolic inputs and outputs of TF-Keras layers during Functional model construction. They are comprised of the `tf.TypeSpec` of the (Composite)Tensor that will be consumed/produced in the corresponding location of the Functional model. KerasTensors are intended as a private API, so users should never need to directly instantiate `KerasTensor`s. **Building Functional Models with KerasTensors** `tf.keras.Input` produces `KerasTensor`s that represent the symbolic inputs to your model. Passing a `KerasTensor` to a `tf.keras.Layer` `__call__` lets the layer know that you are building a Functional model. The layer __call__ will infer the output signature and return `KerasTensor`s with `tf.TypeSpec`s corresponding to the symbolic outputs of that layer call. These output `KerasTensor`s will have all of the internal KerasHistory metadata attached to them that TF-Keras needs to construct a Functional Model. Currently, layers infer the output signature by: * creating a scratch `FuncGraph` * making placeholders in the scratch graph that match the input typespecs * Calling `layer.call` on these placeholders * extracting the signatures of the outputs before clearing the scratch graph (Note: names assigned to KerasTensors by this process are not guaranteed to be unique, and are subject to implementation details). `tf.nest` methods are used to insure all of the inputs/output data structures get maintained, with elements swapped between KerasTensors and placeholders. In rare cases (such as when directly manipulating shapes using Keras layers), the layer may be able to partially infer the value of the output in addition to just inferring the signature. When this happens, the returned KerasTensor will also contain the inferred value information. Follow-on layers can use this information. during their own output signature inference. E.g. if one layer produces a symbolic `KerasTensor` that the next layer uses as the shape of its outputs, partially knowing the value helps infer the output shape. **Automatically converting TF APIs to layers**: If you passing a `KerasTensor` to a TF API that supports dispatching, TF-Keras will automatically turn that API call into a lambda layer in the Functional model, and return KerasTensors representing the symbolic outputs. Most TF APIs that take only tensors as input and produce output tensors will support dispatching. Calling a `tf.function` does not support dispatching, so you cannot pass `KerasTensor`s as inputs to a `tf.function`. Higher-order APIs that take methods which produce tensors (e.g. `tf.while`, `tf.map_fn`, `tf.cond`) also do not currently support dispatching. So, you cannot directly pass KerasTensors as inputs to these APIs either. If you want to use these APIs inside of a Functional model, you must put them inside of a custom layer. Args: type_spec: The `tf.TypeSpec` for the symbolic input created by `tf.keras.Input`, or symbolically inferred for the output during a symbolic layer `__call__`. inferred_value: (Optional) a non-symbolic static value, possibly partially specified, that could be symbolically inferred for the outputs during a symbolic layer `__call__`. This will generally only happen when grabbing and manipulating `tf.int32` shapes directly as tensors. Statically inferring values in this way and storing them in the KerasTensor allows follow-on layers to infer output signatures more effectively. (e.g. when using a symbolic shape tensor to later construct a tensor with that shape). name: (optional) string name for this KerasTensor. Names automatically generated by symbolic layer `__call__`s are not guaranteed to be unique, and are subject to implementation details. """ def __init__(self, type_spec, inferred_value=None, name=None): """Constructs a KerasTensor.""" if not isinstance(type_spec, tf.TypeSpec): raise ValueError( "KerasTensors must be constructed with a `tf.TypeSpec`." ) self._type_spec = type_spec self._inferred_value = inferred_value self._name = name if not isinstance(type_spec, structure.NoneTensorSpec): if not hasattr(type_spec, "shape"): raise ValueError( "KerasTensor only supports TypeSpecs that have a shape " f"field; got {type(type_spec).__qualname__}, " "which does not have a shape." ) if not isinstance(type_spec.shape, tf.TensorShape): raise TypeError( "KerasTensor requires that wrapped TypeSpec's shape is a " f"TensorShape; got TypeSpec {type(type_spec).__qualname__}" ", whose shape field has unexpected type " f"{type(type_spec.dtype).__qualname__}." ) @property def type_spec(self): """Returns the `tf.TypeSpec` symbolically inferred for TF-Keras output. """ return self._type_spec @property def shape(self): """Returns the `TensorShape` symbolically inferred for TF-Keras output. """ return self._type_spec.shape @classmethod def from_tensor(cls, tensor): """Convert a traced (composite)tensor to a representative KerasTensor.""" if isinstance(tensor, tf.Tensor): name = getattr(tensor, "name", None) type_spec = tf.type_spec_from_value(tensor) inferred_value = None if ( type_spec.dtype == tf.int32 and type_spec.shape.rank is not None and type_spec.shape.rank < 2 ): # If this tensor might be representing shape information, # (dtype=int32, rank of 0 or 1, not too large to represent a # shape) we attempt to capture any value information # tensorflow's shape handling can extract from the current # scratch graph. # # Even though keras layers each trace in their own scratch # graph, this shape value info extraction allows us to capture a # sizable and useful subset of the C++ shape value inference TF # can do if all tf ops appear in the same graph when using shape # ops. # # Examples of things this cannot infer concrete dimensions for # that the full single-graph C++ shape inference sometimes can # are: # * cases where the shape tensor is cast out of int32 before # being manipulated w/ floating point numbers then converted # back # * cases where int32 tensors w/ rank >= 2 are manipulated # before being used as a shape tensor # * cases where int32 tensors too large to represent shapes are # manipulated to a smaller size before being used as a shape # tensor inferred_value = tf.ones(shape=tensor).shape if inferred_value.dims: inferred_value = inferred_value.as_list() if len(inferred_value) > _MAX_TENSOR_RANK: inferred_value = None else: inferred_value = None return KerasTensor( type_spec, inferred_value=inferred_value, name=name ) else: # Fallback to the generic arbitrary-typespec KerasTensor name = getattr(tensor, "name", None) type_spec = tf.type_spec_from_value(tensor) return cls(type_spec, name=name) @classmethod def from_type_spec(cls, type_spec, name=None): return cls(type_spec=type_spec, name=name) def _to_placeholder(self): """Convert this KerasTensor to a placeholder in a graph.""" # If there is an inferred value for this tensor, inject the inferred # value if self._inferred_value is not None: # If we suspect this KerasTensor might be representing a shape # tensor, and we were able to extract value information with # TensorFlow's shape handling when making the KerasTensor, we # construct the placeholder by re-injecting the inferred value # information into the graph. We do this injection through the shape # of a placeholder, because that allows us to specify # partially-unspecified shape values. # # See the comment on value extraction inside `from_tensor` for more # info. inferred_value = tf.shape( tf.compat.v1.placeholder( shape=self._inferred_value, dtype=tf.int32 ) ) if self.type_spec.shape.rank == 0: # `tf.shape` always returns a rank-1, we may need to turn it # back to a scalar. inferred_value = inferred_value[0] return inferred_value # Use the generic conversion from typespec to a placeholder. def component_to_placeholder(component): return tf.compat.v1.placeholder(component.dtype, component.shape) return tf.nest.map_structure( component_to_placeholder, self.type_spec, expand_composites=True ) def get_shape(self): return self.shape def __len__(self): raise TypeError( "Keras symbolic inputs/outputs do not " "implement `__len__`. You may be " "trying to pass TF-Keras symbolic inputs/outputs " "to a TF API that does not register dispatching, " "preventing TF-Keras from automatically " "converting the API call to a lambda layer " "in the Functional Model. This error will also get raised " "if you try asserting a symbolic input/output directly." ) @property def op(self): raise TypeError( "Keras symbolic inputs/outputs do not " "implement `op`. You may be " "trying to pass TF-Keras symbolic inputs/outputs " "to a TF API that does not register dispatching, " "preventing TF-Keras from automatically " "converting the API call to a lambda layer " "in the Functional Model." ) def __hash__(self): raise TypeError( f"Tensors are unhashable (this tensor: {self}). " "Instead, use tensor.ref() as the key." ) # Note: This enables the KerasTensor's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # In the future explore changing this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Tensors interact # with ndarrays. __array_priority__ = 100 def __array__(self, dtype=None): raise TypeError( f"You are passing {self}, an intermediate TF-Keras symbolic " "input/output, to a TF API that does not allow registering custom " "dispatchers, such as `tf.cond`, `tf.function`, gradient tapes, " "or `tf.map_fn`. TF-Keras Functional model construction only " "supports TF API calls that *do* support dispatching, such as " "`tf.math.add` or `tf.reshape`. " "Other APIs cannot be called directly on symbolic Keras" "inputs/outputs. You can work around " "this limitation by putting the operation in a custom TF-Keras " "layer `call` and calling that layer " "on this symbolic input/output." ) @property def is_tensor_like(self): return True def set_shape(self, shape): """Updates the shape of this KerasTensor. Mimics `tf.Tensor.set_shape()`.""" if not isinstance(shape, tf.TensorShape): shape = tf.TensorShape(shape) if not self.shape.is_compatible_with(shape): raise ValueError( f"Keras symbolic input/output's shape {self.shape} is not " f"compatible with supplied shape {shape}." ) else: shape = self.shape.merge_with(shape) self._type_spec = type_spec_with_shape(self._type_spec, shape) def __str__(self): symbolic_description = "" inferred_value_string = "" name_string = "" if hasattr(self, "_keras_history"): layer = self._keras_history.layer symbolic_description = ", description=\"created by layer '%s'\"" % ( layer.name, ) if self._inferred_value is not None: inferred_value_string = f", inferred_value={self._inferred_value}" if self.name is not None: name_string = f", name='{self._name}'" return "KerasTensor(type_spec=%s%s%s%s)" % ( self.type_spec, inferred_value_string, name_string, symbolic_description, ) def __repr__(self): symbolic_description = "" inferred_value_string = "" if isinstance(self.type_spec, tf.TensorSpec): type_spec_string = f"shape={self.shape} dtype={self.dtype.name}" else: type_spec_string = f"type_spec={self.type_spec}" if hasattr(self, "_keras_history"): layer = self._keras_history.layer symbolic_description = f" (created by layer '{layer.name}')" if self._inferred_value is not None: inferred_value_string = f" inferred_value={self._inferred_value}" return "<KerasTensor: %s%s%s>" % ( type_spec_string, inferred_value_string, symbolic_description, ) @property def dtype(self): """Returns the `dtype` symbolically inferred for this TF-Keras output. """ type_spec = self._type_spec if not hasattr(type_spec, "dtype"): raise AttributeError( f"KerasTensor wraps TypeSpec {type(type_spec).__qualname__}, " "which does not have a dtype." ) if not isinstance(type_spec.dtype, tf.DType): raise TypeError( "KerasTensor requires that wrapped TypeSpec's dtype is a " f"DType; got TypeSpec {type(type_spec).__qualname__}, whose " "dtype field has unexpected type " f"{type(type_spec.dtype).__qualname__}." ) return type_spec.dtype def ref(self): """Returns a hashable reference object to this KerasTensor. The primary use case for this API is to put KerasTensors in a set/dictionary. We can't put tensors in a set/dictionary as `tensor.__hash__()` is not available and tensor equality (`==`) is supposed to produce a tensor representing if the two inputs are equal. See the documentation of `tf.Tensor.ref()` for more info. """ return object_identity.Reference(self) @property def node(self): """Find the corresponding `Node` that produce this keras_tensor. During functional model construction, TF-Keras will attach `KerasHistory` to keras tensor to track the connectivity between calls of layers. Return None if there isn't any KerasHistory attached to this tensor. """ if hasattr(self, "_keras_history"): layer, node_index, _ = self._keras_history return layer.inbound_nodes[node_index] return None def __iter__(self): shape = None if self.shape.ndims is not None: shape = [dim.value for dim in self.shape.dims] if shape is None: raise TypeError("Cannot iterate over a Tensor with unknown shape.") if not shape: raise TypeError("Cannot iterate over a scalar.") if shape[0] is None: raise TypeError( "Cannot iterate over a Tensor with unknown first dimension." ) return _KerasTensorIterator(self, shape[0]) @property def name(self): """Returns the (non-unique, optional) name of this symbolic Keras value.""" return self._name @classmethod def _overload_all_operators(cls, tensor_class): """Register overloads for all operators.""" for operator in tf.Tensor.OVERLOADABLE_OPERATORS: cls._overload_operator(tensor_class, operator) # We include `experimental_ref` for versions of TensorFlow that # still include the deprecated method in Tensors. if hasattr(tensor_class, "experimental_ref"): cls._overload_operator(tensor_class, "experimental_ref") @classmethod def _overload_operator(cls, tensor_class, operator): """Overload operator with the same implementation as the Tensor class. We pull the operator out of the class dynamically to avoid ordering issues. Args: tensor_class: The (Composite)Tensor to get the method from. operator: string. The operator name. """ tensor_oper = getattr(tensor_class, operator) # Compatibility with Python 2: # Python 2 unbound methods have type checks for the first arg, # so we need to extract the underlying function tensor_oper = getattr(tensor_oper, "__func__", tensor_oper) setattr(cls, operator, tensor_oper) KerasTensor._overload_all_operators(tf.Tensor) @keras_export("keras.__internal__.SparseKerasTensor", v1=[]) class SparseKerasTensor(KerasTensor): """A specialized KerasTensor representation for `tf.sparse.SparseTensor`s. Specifically, it specializes the conversion to a placeholder in order to maintain dense shape information. """ def _to_placeholder(self): spec = self.type_spec # nest.map_structure loses dense shape information for sparse tensors. # So, we special-case sparse placeholder creation. # This only preserves shape information for top-level sparse tensors; # not for sparse tensors that are nested inside another composite # tensor. return tf.compat.v1.sparse_placeholder( dtype=spec.dtype, shape=spec.shape ) @keras_export("keras.__internal__.RaggedKerasTensor", v1=[]) class RaggedKerasTensor(KerasTensor): """A specialized KerasTensor representation for `tf.RaggedTensor`s. Specifically, it: 1. Specializes the conversion to a placeholder in order to maintain shape information for non-ragged dimensions. 2. Overloads the KerasTensor's operators with the RaggedTensor versions when they don't match the `tf.Tensor` versions 3. Exposes some of the instance method/attribute that are unique to the RaggedTensor API (such as ragged_rank). """ def _to_placeholder(self): ragged_spec = self.type_spec if ragged_spec.ragged_rank == 0 or ragged_spec.shape.rank is None: return super()._to_placeholder() flat_shape = ragged_spec.shape[ragged_spec.ragged_rank :] result = tf.compat.v1.placeholder(ragged_spec.dtype, flat_shape) known_num_splits = [] prod = 1 for axis_size in ragged_spec.shape: if prod is not None: if axis_size is None or ( getattr(axis_size, "value", True) is None ): prod = None else: prod = prod * axis_size known_num_splits.append(prod) for axis in range(ragged_spec.ragged_rank, 0, -1): axis_size = ragged_spec.shape[axis] if axis_size is None or (getattr(axis_size, "value", True) is None): num_splits = known_num_splits[axis - 1] if num_splits is not None: num_splits = num_splits + 1 splits = tf.compat.v1.placeholder( ragged_spec.row_splits_dtype, [num_splits] ) result = tf.RaggedTensor.from_row_splits( result, splits, validate=False ) else: rowlen = tf.constant(axis_size, ragged_spec.row_splits_dtype) result = tf.RaggedTensor.from_uniform_row_length( result, rowlen, validate=False ) return result @property def ragged_rank(self): return self.type_spec.ragged_rank # Overload slicing RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__getitem__") # Overload math ops RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__add__") RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__radd__") RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__mul__") RaggedKerasTensor._overload_operator(tf.RaggedTensor, "__rmul__") # TODO(b/161487382): # Special-case user-registered symbolic objects (registered by the # private `register_symbolic_tensor_type` method) by passing them between # scratch graphs directly. # This is needed to not break Tensorflow probability # while they finish migrating to composite tensors. class UserRegisteredSpec(tf.TypeSpec): """TypeSpec to represent user-registered symbolic objects.""" def __init__(self, shape, dtype): self.shape = shape self._dtype = dtype self.dtype = dtype def _component_specs(self): raise NotImplementedError def _from_components(self, components): raise NotImplementedError def _serialize(self): raise NotImplementedError def _to_components(self, value): raise NotImplementedError def value_type(self): raise NotImplementedError # TODO(b/161487382): # Special-case user-registered symbolic objects (registered by the # private `register_symbolic_tensor_type` method) by passing them between # scratch graphs directly. # This is needed to not break Tensorflow probability # while they finish migrating to composite tensors. class UserRegisteredTypeKerasTensor(KerasTensor): """KerasTensor that represents legacy register_symbolic_tensor_type.""" def __init__(self, user_registered_symbolic_object): x = user_registered_symbolic_object self._user_registered_symbolic_object = x type_spec = UserRegisteredSpec(x.shape, x.dtype) name = getattr(x, "name", None) super().__init__(type_spec, name) @classmethod def from_tensor(cls, tensor): return cls(tensor) @classmethod def from_type_spec(cls, type_spec, name=None): raise NotImplementedError( "You cannot instantiate a KerasTensor directly from TypeSpec: %s" % type_spec ) def _to_placeholder(self): return self._user_registered_symbolic_object class _KerasTensorIterator: """Iterates over the leading dim of a KerasTensor. Performs 0 error checks.""" def __init__(self, tensor, dim0): self._tensor = tensor self._index = 0 self._limit = dim0 def __iter__(self): return self def __next__(self): if self._index == self._limit: raise StopIteration result = self._tensor[self._index] self._index += 1 return result # Specify the mappings of tensor class to KerasTensor class. # This is specifically a list instead of a dict for now because # 1. we do a check w/ isinstance because a key lookup based on class # would miss subclasses # 2. a list allows us to control lookup ordering # We include ops.Tensor -> KerasTensor in the first position as a fastpath, # *and* include object -> KerasTensor at the end as a catch-all. # We can re-visit these choices in the future as needed. keras_tensor_classes = [ (tf.Tensor, KerasTensor), (tf.SparseTensor, SparseKerasTensor), (tf.RaggedTensor, RaggedKerasTensor), (object, KerasTensor), ] def register_keras_tensor_specialization(cls, keras_tensor_subclass): """Register a specialized KerasTensor subclass for a Tensor type.""" # We always leave (object, KerasTensor) at the end as a generic fallback keras_tensor_classes.insert(-1, (cls, keras_tensor_subclass)) def keras_tensor_to_placeholder(x): """Construct a graph placeholder to represent a KerasTensor when tracing.""" if isinstance(x, KerasTensor): return x._to_placeholder() else: return x def keras_tensor_from_tensor(tensor): """Convert a traced (composite)tensor to a representative KerasTensor.""" # Create a specialized KerasTensor that supports instance methods, # operators, and additional value inference if possible keras_tensor_cls = None for tensor_type, cls in keras_tensor_classes: if isinstance(tensor, tensor_type): keras_tensor_cls = cls break out = keras_tensor_cls.from_tensor(tensor) if getattr(tensor, "_keras_mask", None) is not None: out._keras_mask = keras_tensor_from_tensor(tensor._keras_mask) return out def keras_tensor_from_type_spec(type_spec, name=None): """Convert a TypeSpec to a representative KerasTensor.""" # Create a specialized KerasTensor that supports instance methods, # operators, and additional value inference if possible keras_tensor_cls = None value_type = type_spec.value_type for tensor_type, cls in keras_tensor_classes: if issubclass(value_type, tensor_type): keras_tensor_cls = cls break return keras_tensor_cls.from_type_spec(type_spec, name=name) def type_spec_with_shape(spec, shape): """Returns a copy of TypeSpec `spec` with its shape set to `shape`.""" if isinstance(spec, tf.TensorSpec): # TODO(b/203201161) Figure out why mutation is needed here, and remove # it. (TensorSpec objects should be immutable; and we should not be # modifying private fields.) shape = tf.TensorShape(shape) spec._shape = shape return spec elif isinstance(spec, tf.RaggedTensorSpec): return tf.RaggedTensorSpec( shape, spec.dtype, spec.ragged_rank, spec.row_splits_dtype, spec.flat_values_spec, ) elif isinstance(spec, tf.SparseTensorSpec): return tf.SparseTensorSpec(shape, spec.dtype) elif hasattr(spec, "with_shape"): # TODO(edloper): Consider adding .with_shape method to TensorSpec, # RaggedTensorSpec, and SparseTensorSpec. return spec.with_shape(shape) else: # TODO(edloper): Consider moving this check to the KerasTensor # constructor. raise ValueError( "Keras requires TypeSpec to have a `with_shape` method " "that returns a copy of `self` with an updated shape." )
tf-keras/tf_keras/engine/keras_tensor.py/0
{ "file_path": "tf-keras/tf_keras/engine/keras_tensor.py", "repo_id": "tf-keras", "token_count": 11697 }
166
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" import itertools import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras import layers as layers_module from tf_keras import losses from tf_keras import metrics as metrics_module from tf_keras.engine import input_layer from tf_keras.engine import training from tf_keras.engine import training_generator_v1 from tf_keras.optimizers.legacy import rmsprop from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils from tf_keras.utils import data_utils def custom_generator(mode=2): batch_size = 10 num_samples = 50 arr_data = np.random.random((num_samples, 2)) arr_labels = np.random.random((num_samples, 4)) arr_weights = np.random.random((num_samples,)) i = 0 while True: batch_index = i * batch_size % num_samples i += 1 start = batch_index end = start + batch_size x = arr_data[start:end] y = arr_labels[start:end] w = arr_weights[start:end] if mode == 1: yield x elif mode == 2: yield x, y else: yield x, y, w def custom_generator_changing_batch_size(mode=2): batch_size = 10 cur_batch_size = 11 num_samples = 50 arr_data = np.random.random((num_samples, 2)) arr_labels = np.random.random((num_samples, 4)) arr_weights = np.random.random((num_samples,)) i = 0 while True: if cur_batch_size > 1: cur_batch_size -= 1 batch_index = i * batch_size % num_samples i += 1 start = batch_index end = start + cur_batch_size x = arr_data[start:end] y = arr_labels[start:end] w = arr_weights[start:end] if mode == 1: yield x elif mode == 2: yield x, y else: yield x, y, w custom_generator_threads = data_utils.threadsafe_generator(custom_generator) class TestGeneratorMethods(test_combinations.TestCase): @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_fit_generator_method(self): model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile( loss="mse", optimizer=rmsprop.RMSprop(1e-3), metrics=["mae", metrics_module.CategoricalAccuracy()], ) model.fit_generator( custom_generator_threads(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, workers=4, use_multiprocessing=True, ) model.fit_generator( custom_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, ) model.fit_generator( custom_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=custom_generator(), validation_steps=10, ) model.fit_generator( custom_generator(), steps_per_epoch=5, validation_data=custom_generator(), validation_steps=1, workers=1, ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_evaluate_generator_method(self): model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile( loss="mse", optimizer=rmsprop.RMSprop(1e-3), metrics=["mae", metrics_module.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) model.evaluate_generator( custom_generator_threads(), steps=5, max_queue_size=10, workers=2, verbose=1, use_multiprocessing=True, ) model.evaluate_generator( custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, ) model.evaluate_generator( custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, workers=1, ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_predict_generator_method(self): model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.run_eagerly = test_utils.should_run_eagerly() model.predict_generator( custom_generator_threads(), steps=5, max_queue_size=10, workers=2, use_multiprocessing=True, ) model.predict_generator( custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, ) model.predict_generator( custom_generator(), steps=5, max_queue_size=10, workers=1 ) # Test generator with just inputs (no targets) model.predict_generator( custom_generator_threads(mode=1), steps=5, max_queue_size=10, workers=2, use_multiprocessing=True, ) model.predict_generator( custom_generator(mode=1), steps=5, max_queue_size=10, use_multiprocessing=False, ) model.predict_generator( custom_generator(mode=1), steps=5, max_queue_size=10, workers=1 ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_generator_methods_with_sample_weights(self): model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile( loss="mse", optimizer=rmsprop.RMSprop(1e-3), metrics=["mae", metrics_module.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) model.fit_generator( custom_generator(mode=3), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, ) model.fit_generator( custom_generator(mode=3), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=custom_generator(mode=3), validation_steps=10, ) model.predict_generator( custom_generator(mode=3), steps=5, max_queue_size=10, use_multiprocessing=False, ) model.evaluate_generator( custom_generator(mode=3), steps=5, max_queue_size=10, use_multiprocessing=False, ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_generator_methods_invalid_use_case(self): def invalid_generator(): while 1: yield (0, 0, 0, 0) model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile( loss="mse", optimizer=rmsprop.RMSprop(1e-3), run_eagerly=test_utils.should_run_eagerly(), ) with self.assertRaises(ValueError): model.fit_generator( invalid_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, ) with self.assertRaises(ValueError): model.fit_generator( custom_generator(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=invalid_generator(), validation_steps=10, ) with self.assertRaises(ValueError): model.predict_generator( invalid_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, ) with self.assertRaises(ValueError): model.evaluate_generator( invalid_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_generator_input_to_fit_eval_predict(self): val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32) def ones_generator(): while True: yield np.ones([10, 10], np.float32), np.ones( [10, 1], np.float32 ) model = test_utils.get_small_mlp( num_hidden=10, num_classes=1, input_dim=10 ) model.compile( rmsprop.RMSprop(0.001), "binary_crossentropy", run_eagerly=test_utils.should_run_eagerly(), ) model.fit( ones_generator(), steps_per_epoch=2, validation_data=val_data, epochs=2, ) model.evaluate(ones_generator(), steps=2) model.predict(ones_generator(), steps=2) # Test with a changing batch size model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile( loss="mse", optimizer=rmsprop.RMSprop(1e-3), metrics=["mae", metrics_module.CategoricalAccuracy()], ) model.fit_generator( custom_generator_changing_batch_size(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, ) model.fit_generator( custom_generator_changing_batch_size(), steps_per_epoch=5, epochs=1, verbose=1, max_queue_size=10, use_multiprocessing=False, validation_data=custom_generator_changing_batch_size(), validation_steps=10, ) model.fit( custom_generator_changing_batch_size(), steps_per_epoch=5, validation_data=custom_generator_changing_batch_size(), validation_steps=10, epochs=2, ) model.evaluate(custom_generator_changing_batch_size(), steps=5) model.predict(custom_generator_changing_batch_size(), steps=5) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_generator_dynamic_shapes(self): x = [ "I think juice is great", "unknown is the best language since slicedbread", "a a a a a a a", "matmul", "Yaks are also quite nice", ] y = [1, 0, 0, 1, 1] vocab = { word: i + 1 for i, word in enumerate( sorted(set(itertools.chain(*[i.split() for i in x]))) ) } def data_gen(batch_size=2): np.random.seed(0) data = list(zip(x, y)) * 10 np.random.shuffle(data) def pack_and_pad(queue): x = [[vocab[j] for j in i[0].split()] for i in queue] pad_len = max(len(i) for i in x) x = np.array([i + [0] * (pad_len - len(i)) for i in x]) y = np.array([i[1] for i in queue]) del queue[:] return x, y[:, np.newaxis] queue = [] for i, element in enumerate(data): queue.append(element) if not (i + 1) % batch_size: yield pack_and_pad(queue) if queue: # Last partial batch yield pack_and_pad(queue) model = test_utils.get_model_from_layers( [ layers_module.Embedding(input_dim=len(vocab) + 1, output_dim=4), layers_module.SimpleRNN(units=1), layers_module.Activation("sigmoid"), ], input_shape=(None,), ) model.compile(loss=losses.binary_crossentropy, optimizer="sgd") model.fit(data_gen(), epochs=1, steps_per_epoch=5) class TestGeneratorMethodsWithSequences(test_combinations.TestCase): @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_training_with_sequences(self): class DummySequence(data_utils.Sequence): def __getitem__(self, idx): return np.zeros([10, 2]), np.ones([10, 4]) def __len__(self): return 10 model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile(loss="mse", optimizer=rmsprop.RMSprop(1e-3)) model.fit_generator( DummySequence(), steps_per_epoch=10, validation_data=custom_generator(), validation_steps=1, max_queue_size=10, workers=1, use_multiprocessing=True, ) model.fit_generator( DummySequence(), steps_per_epoch=10, validation_data=custom_generator(), validation_steps=1, max_queue_size=10, workers=1, use_multiprocessing=False, ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @data_utils.dont_use_multiprocessing_pool def test_sequence_input_to_fit_eval_predict(self): val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32) class CustomSequence(data_utils.Sequence): def __getitem__(self, idx): return np.ones([10, 10], np.float32), np.ones( [10, 1], np.float32 ) def __len__(self): return 2 class CustomSequenceChangingBatchSize(data_utils.Sequence): def __getitem__(self, idx): batch_size = 10 - idx return ( np.ones([batch_size, 10], np.float32), np.ones([batch_size, 1], np.float32), ) def __len__(self): return 2 model = test_utils.get_small_mlp( num_hidden=10, num_classes=1, input_dim=10 ) model.compile(rmsprop.RMSprop(0.001), "binary_crossentropy") model.fit(CustomSequence(), validation_data=val_data, epochs=2) model.evaluate(CustomSequence()) model.predict(CustomSequence()) with self.assertRaisesRegex( ValueError, "`y` argument is not supported" ): model.fit(CustomSequence(), y=np.ones([10, 1])) with self.assertRaisesRegex( ValueError, "`sample_weight` argument is not supported" ): model.fit(CustomSequence(), sample_weight=np.ones([10, 1])) model.compile(rmsprop.RMSprop(0.001), "binary_crossentropy") model.fit( CustomSequenceChangingBatchSize(), validation_data=val_data, epochs=2, ) model.evaluate(CustomSequenceChangingBatchSize()) model.predict(CustomSequenceChangingBatchSize()) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_sequence_on_epoch_end(self): class MySequence(data_utils.Sequence): def __init__(self): self.epochs = 0 def __getitem__(self, idx): return np.ones([10, 10], np.float32), np.ones( [10, 1], np.float32 ) def __len__(self): return 2 def on_epoch_end(self): self.epochs += 1 inputs = input_layer.Input(10) outputs = layers_module.Dense(1)(inputs) model = training.Model(inputs, outputs) model.compile("sgd", "mse") my_seq = MySequence() model.fit(my_seq, epochs=2) self.assertEqual(my_seq.epochs, 2) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class TestConvertToGeneratorLike(tf.test.TestCase, parameterized.TestCase): simple_inputs = (np.ones((10, 10)), np.ones((10, 1))) nested_inputs = ( (np.ones((10, 10)), np.ones((10, 20))), (np.ones((10, 1)), np.ones((10, 3))), ) def _make_dataset(self, inputs, batches): return tf.data.Dataset.from_tensors(inputs).repeat(batches) def _make_iterator(self, inputs, batches): return tf.compat.v1.data.make_one_shot_iterator( self._make_dataset(inputs, batches) ) def _make_generator(self, inputs, batches): def _gen(): for _ in range(batches): yield inputs return _gen() def _make_numpy(self, inputs, _): return inputs @parameterized.named_parameters( ("simple_dataset", _make_dataset, simple_inputs), ("simple_iterator", _make_iterator, simple_inputs), ("simple_generator", _make_generator, simple_inputs), ("simple_numpy", _make_numpy, simple_inputs), ("nested_dataset", _make_dataset, nested_inputs), ("nested_iterator", _make_iterator, nested_inputs), ("nested_generator", _make_generator, nested_inputs), ("nested_numpy", _make_numpy, nested_inputs), ) def test_convert_to_generator_like(self, input_fn, inputs): expected_batches = 5 data = input_fn(self, inputs, expected_batches) # Dataset and Iterator not supported in Legacy Graph mode. if not tf.executing_eagerly() and isinstance( data, (tf.data.Dataset, tf.compat.v1.data.Iterator) ): return generator, steps = training_generator_v1.convert_to_generator_like( data, batch_size=2, steps_per_epoch=expected_batches ) self.assertEqual(steps, expected_batches) for _ in range(expected_batches): outputs = next(generator) tf.nest.assert_same_structure(outputs, inputs) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/engine/training_generator_test.py/0
{ "file_path": "tf-keras/tf_keras/engine/training_generator_test.py", "repo_id": "tf-keras", "token_count": 10149 }
167
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for KPL + CentralStorageStrategy.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized # isort: off from tensorflow.compat.v2.__internal__.distribute import combinations from tf_keras.utils import kpl_test_utils # TODO(b/182278926): Combine this test with other strategies. @combinations.generate( tf.__internal__.test.combinations.combine( distribution=[combinations.central_storage_strategy_with_gpu_and_cpu], mode=["eager"], ) ) class CentralStorageStrategyTest(tf.test.TestCase, parameterized.TestCase): def testTrainAndServeWithKPL(self, distribution): use_adapt = False test_utils_obj = kpl_test_utils.DistributeKplTestUtils() with distribution.scope(): ( feature_mapper, label_mapper, ) = test_utils_obj.define_kpls_for_training(use_adapt) model = test_utils_obj.define_model() optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1) accuracy = tf.keras.metrics.Accuracy() def dataset_fn(_): return test_utils_obj.dataset_fn(feature_mapper, label_mapper) @tf.function def train_step(iterator): """The step function for one training step.""" def step_fn(inputs): """The computation to run on each replica.""" features, labels = inputs with tf.GradientTape() as tape: pred = model(features, training=True) loss = tf.keras.losses.binary_crossentropy(labels, pred) loss = tf.nn.compute_average_loss(loss) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients( list(zip(grads, model.trainable_variables)) ) actual_pred = tf.cast( tf.math.greater(pred, 0.5), tf.dtypes.int64 ) accuracy.update_state(labels, actual_pred) distribution.run(step_fn, args=(next(iterator),)) distributed_dataset = ( distribution.distribute_datasets_from_function(dataset_fn) ) distributed_iterator = iter(distributed_dataset) num_epochs = 4 num_steps = 7 for _ in range(num_epochs): accuracy.reset_state() for _ in range(num_steps): train_step(distributed_iterator) self.assertGreater(accuracy.result().numpy(), 0.5) self.assertEqual( optimizer.iterations.numpy(), num_epochs * num_steps ) # Test save/load/serving the trained model. test_utils_obj.test_save_load_serving_model( model, feature_mapper, test_utils_obj.define_reverse_lookup_layer() ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/integration_test/central_storage_strategy_test.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/central_storage_strategy_test.py", "repo_id": "tf-keras", "token_count": 1660 }
168
import tensorflow as tf from tensorflow import keras from tf_keras.integration_test.models.input_spec import InputSpec from tf_keras.saving import serialization_lib IMG_SIZE = (64, 64) LATENT_DIM = 128 def get_data_spec(batch_size): return InputSpec((batch_size,) + IMG_SIZE + (3,)) def get_input_preprocessor(): return None class GAN(keras.Model): def __init__(self, discriminator, generator, latent_dim): super(GAN, self).__init__() self.discriminator = discriminator self.generator = generator self.latent_dim = latent_dim def compile(self, d_optimizer, g_optimizer, loss_fn, jit_compile=False): super(GAN, self).compile(jit_compile=jit_compile) self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.loss_fn = loss_fn self.d_loss_metric = keras.metrics.Mean(name="d_loss") self.g_loss_metric = keras.metrics.Mean(name="g_loss") @property def metrics(self): return [self.d_loss_metric, self.g_loss_metric] def train_step(self, real_images): batch_size = tf.shape(real_images)[0] random_latent_vectors = tf.random.normal( shape=(batch_size, self.latent_dim) ) generated_images = self.generator(random_latent_vectors) combined_images = tf.concat([generated_images, real_images], axis=0) labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0 ) labels += 0.05 * tf.random.uniform(tf.shape(labels)) with tf.GradientTape() as tape: predictions = self.discriminator(combined_images) d_loss = self.loss_fn(labels, predictions) grads = tape.gradient(d_loss, self.discriminator.trainable_weights) self.d_optimizer.apply_gradients( zip(grads, self.discriminator.trainable_weights) ) random_latent_vectors = tf.random.normal( shape=(batch_size, self.latent_dim) ) misleading_labels = tf.zeros((batch_size, 1)) with tf.GradientTape() as tape: predictions = self.discriminator( self.generator(random_latent_vectors) ) g_loss = self.loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, self.generator.trainable_weights) self.g_optimizer.apply_gradients( zip(grads, self.generator.trainable_weights) ) self.d_loss_metric.update_state(d_loss) self.g_loss_metric.update_state(g_loss) return { "d_loss": self.d_loss_metric.result(), "g_loss": self.g_loss_metric.result(), } def get_config(self): return { "discriminator": self.discriminator, "generator": self.generator, "latent_dim": self.latent_dim, } @classmethod def from_config(cls, config): discriminator = serialization_lib.deserialize_keras_object( config["discriminator"] ) generator = serialization_lib.deserialize_keras_object( config["generator"] ) latent_dim = config["latent_dim"] return cls(discriminator, generator, latent_dim) def get_compile_config(self): return { "loss_fn": self.loss_fn, "d_optimizer": self.d_optimizer, "g_optimizer": self.g_optimizer, "jit_compile": self.jit_compile, } def compile_from_config(self, config): loss_fn = serialization_lib.deserialize_keras_object(config["loss_fn"]) d_optimizer = serialization_lib.deserialize_keras_object( config["d_optimizer"] ) g_optimizer = serialization_lib.deserialize_keras_object( config["g_optimizer"] ) jit_compile = config["jit_compile"] self.compile( loss_fn=loss_fn, d_optimizer=d_optimizer, g_optimizer=g_optimizer, jit_compile=jit_compile, ) def get_model( build=False, compile=False, jit_compile=False, include_preprocessing=True ): discriminator = keras.Sequential( [ keras.Input(shape=IMG_SIZE + (3,)), keras.layers.Conv2D(64, kernel_size=4, strides=2, padding="same"), keras.layers.LeakyReLU(alpha=0.2), keras.layers.Conv2D(128, kernel_size=4, strides=2, padding="same"), keras.layers.LeakyReLU(alpha=0.2), keras.layers.Conv2D(128, kernel_size=4, strides=2, padding="same"), keras.layers.LeakyReLU(alpha=0.2), keras.layers.Flatten(), keras.layers.Dropout(0.2), keras.layers.Dense(1, activation="sigmoid"), ], name="discriminator", ) generator = keras.Sequential( [ keras.Input(shape=(LATENT_DIM,)), keras.layers.Dense(8 * 8 * 128), keras.layers.Reshape((8, 8, 128)), keras.layers.Conv2DTranspose( 128, kernel_size=4, strides=2, padding="same" ), keras.layers.LeakyReLU(alpha=0.2), keras.layers.Conv2DTranspose( 256, kernel_size=4, strides=2, padding="same" ), keras.layers.LeakyReLU(alpha=0.2), keras.layers.Conv2DTranspose( 512, kernel_size=4, strides=2, padding="same" ), keras.layers.LeakyReLU(alpha=0.2), keras.layers.Conv2D( 3, kernel_size=5, padding="same", activation="sigmoid" ), ], name="generator", ) gan = GAN( discriminator=discriminator, generator=generator, latent_dim=LATENT_DIM ) if compile: gan.compile( d_optimizer=keras.optimizers.Adam(learning_rate=0.0001), g_optimizer=keras.optimizers.Adam(learning_rate=0.0001), loss_fn=keras.losses.BinaryCrossentropy(), jit_compile=jit_compile, ) return gan def get_custom_objects(): return {"GAN": GAN}
tf-keras/tf_keras/integration_test/models/dcgan.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/models/dcgan.py", "repo_id": "tf-keras", "token_count": 3024 }
169
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test to demonstrate custom training loop with ParameterServerStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import multiprocessing import portpicker import tensorflow.compat.v2 as tf from absl import logging NUM_EPOCHS = 10 NUM_STEPS = 100 STEPS_PER_EXECUTION = 10 class ParameterServerCustomTrainingLoopTest(tf.test.TestCase): """Test to demonstrate custom training loop with ParameterServerStrategy.""" def create_in_process_cluster(self, num_workers, num_ps): """Creates and starts local servers and returns the cluster_resolver.""" worker_ports = [ portpicker.pick_unused_port() for _ in range(num_workers) ] ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)] cluster_dict = {} cluster_dict["worker"] = [f"localhost:{port}" for port in worker_ports] if num_ps > 0: cluster_dict["ps"] = [f"localhost:{port}" for port in ps_ports] cluster_spec = tf.train.ClusterSpec(cluster_dict) # Workers need some inter_ops threads to work properly. worker_config = tf.compat.v1.ConfigProto() if multiprocessing.cpu_count() < num_workers + 1: worker_config.inter_op_parallelism_threads = num_workers + 1 for i in range(num_workers): tf.distribute.Server( cluster_spec, job_name="worker", task_index=i, config=worker_config, protocol="grpc", ) for i in range(num_ps): tf.distribute.Server( cluster_spec, job_name="ps", task_index=i, protocol="grpc" ) return cluster_spec def setUp(self): super().setUp() cluster_spec = self.create_in_process_cluster(num_workers=3, num_ps=2) cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver( cluster_spec, rpc_layer="grpc" ) self.strategy = tf.distribute.experimental.ParameterServerStrategy( cluster_resolver ) self.coordinator = ( tf.distribute.experimental.coordinator.ClusterCoordinator( self.strategy ) ) def testCustomTrainingLoop(self): coordinator, strategy = self.coordinator, self.strategy def per_worker_dataset_fn(): def dataset_fn(_): return ( tf.data.Dataset.from_tensor_slices( (tf.random.uniform((6, 10)), tf.random.uniform((6, 10))) ) .batch(2) .repeat() ) return strategy.distribute_datasets_from_function(dataset_fn) per_worker_dataset = coordinator.create_per_worker_dataset( per_worker_dataset_fn ) with strategy.scope(): model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001) train_accuracy = tf.keras.metrics.CategoricalAccuracy( name="train_accuracy" ) @tf.function def worker_train_fn(iterator): def replica_fn(inputs): """Training loop function.""" batch_data, labels = inputs with tf.GradientTape() as tape: predictions = model(batch_data, training=True) loss = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE )(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients( zip(gradients, model.trainable_variables) ) train_accuracy.update_state(labels, predictions) for _ in tf.range(STEPS_PER_EXECUTION): strategy.run(replica_fn, args=(next(iterator),)) for epoch in range(NUM_EPOCHS): distributed_iterator = iter(per_worker_dataset) for step in range(0, NUM_STEPS, STEPS_PER_EXECUTION): coordinator.schedule( worker_train_fn, args=(distributed_iterator,) ) logging.info("Epoch %d, step %d scheduled.", epoch, step) logging.info("Now joining at epoch %d.", epoch) coordinator.join() logging.info( "Finished joining at epoch %d. Training accuracy: %f. " "Total iterations: %d", epoch, train_accuracy.result(), optimizer.iterations.value(), ) if epoch < NUM_EPOCHS - 1: train_accuracy.reset_states() if __name__ == "__main__": if tf.__internal__.tf2.enabled(): tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/integration_test/parameter_server_custom_training_loop_test.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/parameter_server_custom_training_loop_test.py", "repo_id": "tf-keras", "token_count": 2593 }
170
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras abstract base for depthwise convolutions.""" import tensorflow.compat.v2 as tf from tf_keras import constraints from tf_keras import initializers from tf_keras import regularizers from tf_keras.engine.input_spec import InputSpec from tf_keras.layers.convolutional.base_conv import Conv class DepthwiseConv(Conv): """Depthwise convolution. Depthwise convolution is a type of convolution in which each input channel is convolved with a different kernel (called a depthwise kernel). You can understand depthwise convolution as the first step in a depthwise separable convolution. It is implemented via the following steps: - Split the input into individual channels. - Convolve each channel with an individual depthwise kernel with `depth_multiplier` output channels. - Concatenate the convolved outputs along the channels axis. Unlike a regular convolution, depthwise convolution does not mix information across different input channels. The `depth_multiplier` argument determines how many filter are applied to one input channel. As such, it controls the amount of output channels that are generated per input channel in the depthwise step. Args: kernel_size: A tuple or list of integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding with zeros evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. If left unspecified, uses `image_data_format` value found in your TF-Keras config file at `~/.keras/keras.json` (if exists) else 'channels_last'. Defaults to 'channels_last'. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix (see `keras.initializers`). If None, the default initializer ('glorot_uniform') will be used. bias_initializer: Initializer for the bias vector (see `keras.initializers`). If None, the default initializer ('zeros') will be used. depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its 'activation') (see `keras.regularizers`). depthwise_constraint: Constraint function applied to the depthwise kernel matrix (see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`). Input shape: 4D tensor with shape: `[batch_size, channels, rows, cols]` if data_format='channels_first' or 4D tensor with shape: `[batch_size, rows, cols, channels]` if data_format='channels_last'. Output shape: 4D tensor with shape: `[batch_size, channels * depth_multiplier, new_rows, new_cols]` if `data_format='channels_first'` or 4D tensor with shape: `[batch_size, new_rows, new_cols, channels * depth_multiplier]` if `data_format='channels_last'`. `rows` and `cols` values might have changed due to padding. Returns: A tensor of rank 4 representing `activation(depthwiseconv2d(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is "causal". ValueError: when both `strides` > 1 and `dilation_rate` > 1. """ def __init__( self, rank, kernel_size, strides=1, padding="valid", depth_multiplier=1, data_format=None, dilation_rate=1, activation=None, use_bias=True, depthwise_initializer="glorot_uniform", bias_initializer="zeros", depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs, ): super().__init__( rank, filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs, ) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) def build(self, input_shape): if len(input_shape) != self.rank + 2: raise ValueError( "Inputs to `DepthwiseConv` should have " f"rank {self.rank + 2}. " f"Received input_shape={input_shape}." ) input_shape = tf.TensorShape(input_shape) channel_axis = self._get_channel_axis() if input_shape.dims[channel_axis].value is None: raise ValueError( "The channel dimension of the inputs to `DepthwiseConv` " "should be defined. " f"The input_shape received is {input_shape}, " f"where axis {channel_axis} (0-based) " "is the channel dimension, which found to be `None`." ) input_dim = int(input_shape[channel_axis]) depthwise_kernel_shape = self.kernel_size + ( input_dim, self.depth_multiplier, ) self.depthwise_kernel = self.add_weight( shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, name="depthwise_kernel", regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint, ) if self.use_bias: self.bias = self.add_weight( shape=(input_dim * self.depth_multiplier,), initializer=self.bias_initializer, name="bias", regularizer=self.bias_regularizer, constraint=self.bias_constraint, ) else: self.bias = None # Set input spec. self.input_spec = InputSpec( min_ndim=self.rank + 2, axes={channel_axis: input_dim} ) self.built = True def call(self, inputs): raise NotImplementedError def get_config(self): config = super().get_config() config.pop("filters") config.pop("kernel_initializer") config.pop("kernel_regularizer") config.pop("kernel_constraint") config["depth_multiplier"] = self.depth_multiplier config["depthwise_initializer"] = initializers.serialize( self.depthwise_initializer ) config["depthwise_regularizer"] = regularizers.serialize( self.depthwise_regularizer ) config["depthwise_constraint"] = constraints.serialize( self.depthwise_constraint ) return config
tf-keras/tf_keras/layers/convolutional/base_depthwise_conv.py/0
{ "file_path": "tf-keras/tf_keras/layers/convolutional/base_depthwise_conv.py", "repo_id": "tf-keras", "token_count": 3717 }
171
# Placeholder: load unaliased py_library # Description: # Contains the TF-Keras core layers. load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test") # buildifier: disable=same-origin-load load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//tf_keras:friends", "//third_party/py/tensorflow_gnn:__subpackages__", "//third_party/tensorflow/python/distribute:__pkg__", "//third_party/tensorflow/python/feature_column:__pkg__", "//third_party/tensorflow/python/trackable:__pkg__", "//third_party/tensorflow/tools/pip_package:__pkg__", "//third_party/tensorflow_models/official/projects/residual_mobilenet/modeling/backbones:__pkg__", ], licenses = ["notice"], ) py_library( name = "core", srcs = [ "__init__.py", ], srcs_version = "PY3", deps = [ ":activation", ":dense", ":einsum_dense", ":embedding", ":identity", ":lambda", ":masking", ":tf_op_layer", "//tf_keras/layers/regularization:activity_regularization", "//tf_keras/layers/regularization:dropout", "//tf_keras/layers/regularization:spatial_dropout1d", "//tf_keras/layers/regularization:spatial_dropout2d", "//tf_keras/layers/regularization:spatial_dropout3d", "//tf_keras/layers/reshaping:flatten", "//tf_keras/layers/reshaping:permute", "//tf_keras/layers/reshaping:repeat_vector", "//tf_keras/layers/reshaping:reshape", ], ) py_library( name = "activation", srcs = ["activation.py"], deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", ], ) py_library( name = "dense", srcs = ["dense.py"], deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/dtensor:utils", ], ) py_library( name = "einsum_dense", srcs = ["einsum_dense.py"], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras:activations", "//tf_keras:constraints", "//tf_keras:regularizers", "//tf_keras/engine:base_layer", "//tf_keras/initializers", ], ) py_library( name = "embedding", srcs = ["embedding.py"], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras:constraints", "//tf_keras:regularizers", "//tf_keras/dtensor:utils", "//tf_keras/engine:base_layer", "//tf_keras/engine:base_layer_utils", "//tf_keras/initializers", "//tf_keras/utils:tf_utils", ], ) py_library( name = "lambda", srcs = ["lambda_layer.py"], deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", ], ) py_library( name = "masking", srcs = ["masking.py"], deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", ], ) py_library( name = "tf_op_layer", srcs = ["tf_op_layer.py"], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras:constraints", "//tf_keras:regularizers", "//tf_keras/engine:base_layer", "//tf_keras/initializers", "//tf_keras/utils:tf_utils", ], ) py_library( name = "identity", srcs = ["identity.py"], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras/engine:base_layer", ], ) tf_py_test( name = "core_test", size = "medium", srcs = ["core_test.py"], python_version = "PY3", shard_count = 3, deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "einsum_dense_test", srcs = ["einsum_dense_test.py"], python_version = "PY3", deps = [ ":einsum_dense", "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], ) cuda_py_test( name = "embedding_test", size = "medium", srcs = ["embedding_test.py"], python_version = "PY3", deps = [ "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/mixed_precision:policy", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], )
tf-keras/tf_keras/layers/core/BUILD/0
{ "file_path": "tf-keras/tf_keras/layers/core/BUILD", "repo_id": "tf-keras", "token_count": 2442 }
172
"""Test DynamicEmbedding with Parameter server strategy.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.callbacks import UpdateEmbeddingCallback from tf_keras.layers.experimental import dynamic_embedding from tf_keras.testing_infra import test_utils ds_combinations = tf.__internal__.distribute.combinations @test_utils.run_v2_only class DistributedDynamicEmbeddingTest(tf.test.TestCase, parameterized.TestCase): @ds_combinations.generate( tf.__internal__.test.combinations.combine( strategy=[ ds_combinations.parameter_server_strategy_3worker_2ps_cpu ], mode="eager", ) ) def test_dynamic_embedding_with_pss(self, strategy): # Generate dummy data train_data = np.array( [ ["a", "j", "c", "d", "e"], ["a", "h", "i", "j", "b"], ["i", "h", "c", "j", "e"], ] ) train_labels = np.array([0, 1, 2]) vocab = tf.constant(["a", "b", "c", "d", "e"]) eviction_policy = "LFU" with strategy.scope(): # Define the model model = keras.models.Sequential( [ dynamic_embedding.DynamicEmbedding( input_dim=5, output_dim=2, input_length=5, eviction_policy=eviction_policy, initial_vocabulary=vocab, ), keras.layers.Flatten(), keras.layers.Dense(3, activation="softmax"), ] ) update_embedding_callback = UpdateEmbeddingCallback( model.layers[0], interval=1, ) with update_embedding_callback: # Compile the model model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) result = model.fit( train_data, train_labels, epochs=100, batch_size=1, steps_per_epoch=2, callbacks=[update_embedding_callback], ) # Assert model trains self.assertEqual(result.history["loss"][0] > 0, True) self.assertTrue( tf.reduce_all( tf.not_equal( model.layers[0].dynamic_lookup_layer.vocabulary, vocab, ) ) ) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/layers/experimental/dynamic_embedding_distributed_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/experimental/dynamic_embedding_distributed_test.py", "repo_id": "tf-keras", "token_count": 1657 }
173
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layer that adds several inputs.""" from tf_keras.layers.merging.base_merge import _Merge # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.layers.Add") class Add(_Merge): """Layer that adds a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). Examples: >>> input_shape = (2, 3, 4) >>> x1 = tf.random.normal(input_shape) >>> x2 = tf.random.normal(input_shape) >>> y = tf.keras.layers.Add()([x1, x2]) >>> print(y.shape) (2, 3, 4) Used in a functional model: >>> input1 = tf.keras.layers.Input(shape=(16,)) >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) >>> input2 = tf.keras.layers.Input(shape=(32,)) >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) >>> # equivalent to `added = tf.keras.layers.add([x1, x2])` >>> added = tf.keras.layers.Add()([x1, x2]) >>> out = tf.keras.layers.Dense(4)(added) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output += inputs[i] return output @keras_export("keras.layers.add") def add(inputs, **kwargs): """Functional interface to the `tf.keras.layers.Add` layer. Args: inputs: A list of input tensors with the same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor as the sum of the inputs. It has the same shape as the inputs. Examples: >>> input_shape = (2, 3, 4) >>> x1 = tf.random.normal(input_shape) >>> x2 = tf.random.normal(input_shape) >>> y = tf.keras.layers.add([x1, x2]) >>> print(y.shape) (2, 3, 4) Used in a functional model: >>> input1 = tf.keras.layers.Input(shape=(16,)) >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) >>> input2 = tf.keras.layers.Input(shape=(32,)) >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) >>> added = tf.keras.layers.add([x1, x2]) >>> out = tf.keras.layers.Dense(4)(added) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) """ return Add(**kwargs)(inputs)
tf-keras/tf_keras/layers/merging/add.py/0
{ "file_path": "tf-keras/tf_keras/layers/merging/add.py", "repo_id": "tf-keras", "token_count": 1140 }
174
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Private base class for global pooling 2D layers.""" import tensorflow.compat.v2 as tf from tf_keras.engine.base_layer import Layer from tf_keras.engine.input_spec import InputSpec from tf_keras.utils import conv_utils class GlobalPooling2D(Layer): """Abstract class for different global pooling 2D layers.""" def __init__(self, data_format=None, keepdims=False, **kwargs): super().__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) self.keepdims = keepdims def _validate_reduction_axis(self, input_shape, axes): for axis in axes: if input_shape[axis] == 0: raise ValueError( f"Incorrect input shape {input_shape} " f"with dimension 0 at reduction axis {axis}." ) def build(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == "channels_last": self._validate_reduction_axis(input_shape, [1, 2]) else: self._validate_reduction_axis(input_shape, [2, 3]) def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == "channels_last": if self.keepdims: return tf.TensorShape([input_shape[0], 1, 1, input_shape[3]]) else: return tf.TensorShape([input_shape[0], input_shape[3]]) else: if self.keepdims: return tf.TensorShape([input_shape[0], input_shape[1], 1, 1]) else: return tf.TensorShape([input_shape[0], input_shape[1]]) def call(self, inputs): raise NotImplementedError def get_config(self): config = {"data_format": self.data_format, "keepdims": self.keepdims} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
tf-keras/tf_keras/layers/pooling/base_global_pooling2d.py/0
{ "file_path": "tf-keras/tf_keras/layers/pooling/base_global_pooling2d.py", "repo_id": "tf-keras", "token_count": 1066 }
175
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for max pooling layers.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class MaxPoolingTest(tf.test.TestCase, parameterized.TestCase): def test_max_pooling_1d(self): for padding in ["valid", "same"]: for stride in [1, 2]: test_utils.layer_test( keras.layers.MaxPooling1D, kwargs={"strides": stride, "padding": padding}, input_shape=(3, 5, 4), ) test_utils.layer_test( keras.layers.MaxPooling1D, kwargs={"data_format": "channels_first"}, input_shape=(3, 2, 6), ) def test_max_pooling_2d(self): pool_size = (3, 3) for strides in [(1, 1), (2, 2)]: test_utils.layer_test( keras.layers.MaxPooling2D, kwargs={ "strides": strides, "padding": "valid", "pool_size": pool_size, }, input_shape=(3, 5, 6, 4), ) def test_max_pooling_3d(self): pool_size = (3, 3, 3) test_utils.layer_test( keras.layers.MaxPooling3D, kwargs={"strides": 2, "padding": "valid", "pool_size": pool_size}, input_shape=(3, 11, 12, 10, 4), ) test_utils.layer_test( keras.layers.MaxPooling3D, kwargs={ "strides": 3, "padding": "valid", "data_format": "channels_first", "pool_size": pool_size, }, input_shape=(3, 4, 11, 12, 10), ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/pooling/max_pooling_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/pooling/max_pooling_test.py", "repo_id": "tf-keras", "token_count": 1209 }
176
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras hashed crossing preprocessing layer.""" import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.engine import base_layer from tf_keras.layers.preprocessing import preprocessing_utils as utils from tf_keras.utils import layer_utils # isort: off from tensorflow.python.util.tf_export import keras_export INT = utils.INT ONE_HOT = utils.ONE_HOT @keras_export( "keras.layers.HashedCrossing", "keras.layers.experimental.preprocessing.HashedCrossing", v1=[], ) class HashedCrossing(base_layer.Layer): """A preprocessing layer which crosses features using the "hashing trick". This layer performs crosses of categorical features using the "hashing trick". Conceptually, the transformation can be thought of as: `hash(concatenate(features)) % num_bins`. This layer currently only performs crosses of scalar inputs and batches of scalar inputs. Valid input shapes are `(batch_size, 1)`, `(batch_size,)` and `()`. For an overview and full list of preprocessing layers, see the preprocessing [guide](https://www.tensorflow.org/guide/keras/preprocessing_layers). Args: num_bins: Number of hash bins. output_mode: Specification for the output of the layer. Values can be `"int"`, or `"one_hot"` configuring the layer as follows: - `"int"`: Return the integer bin indices directly. - `"one_hot"`: Encodes each individual element in the input into an array the same size as `num_bins`, containing a 1 at the input's bin index. Defaults to `"int"`. sparse: Boolean. Only applicable to `"one_hot"` mode. If `True`, returns a `SparseTensor` instead of a dense `Tensor`. Defaults to `False`. **kwargs: Keyword arguments to construct a layer. Examples: **Crossing two scalar features.** >>> layer = tf.keras.layers.HashedCrossing( ... num_bins=5) >>> feat1 = tf.constant(['A', 'B', 'A', 'B', 'A']) >>> feat2 = tf.constant([101, 101, 101, 102, 102]) >>> layer((feat1, feat2)) <tf.Tensor: shape=(5,), dtype=int64, numpy=array([1, 4, 1, 1, 3])> **Crossing and one-hotting two scalar features.** >>> layer = tf.keras.layers.HashedCrossing( ... num_bins=5, output_mode='one_hot') >>> feat1 = tf.constant(['A', 'B', 'A', 'B', 'A']) >>> feat2 = tf.constant([101, 101, 101, 102, 102]) >>> layer((feat1, feat2)) <tf.Tensor: shape=(5, 5), dtype=float32, numpy= array([[0., 1., 0., 0., 0.], [0., 0., 0., 0., 1.], [0., 1., 0., 0., 0.], [0., 1., 0., 0., 0.], [0., 0., 0., 1., 0.]], dtype=float32)> """ def __init__(self, num_bins, output_mode="int", sparse=False, **kwargs): # By default, output int64 when output_mode="int" and floats otherwise. if "dtype" not in kwargs or kwargs["dtype"] is None: kwargs["dtype"] = ( tf.int64 if output_mode == INT else backend.floatx() ) super().__init__(**kwargs) # Check dtype only after base layer parses it; dtype parsing is complex. if ( output_mode == INT and not tf.as_dtype(self.compute_dtype).is_integer ): input_dtype = kwargs["dtype"] raise ValueError( "When `output_mode='int'`, `dtype` should be an integer " f"type. Received: dtype={input_dtype}" ) # "output_mode" must be one of (INT, ONE_HOT) layer_utils.validate_string_arg( output_mode, allowable_strings=(INT, ONE_HOT), layer_name=self.__class__.__name__, arg_name="output_mode", ) self.num_bins = num_bins self.output_mode = output_mode self.sparse = sparse def call(self, inputs): # Convert all inputs to tensors and check shape. This layer only # supports sclars and batches of scalars for the initial version. self._check_at_least_two_inputs(inputs) inputs = [utils.ensure_tensor(x) for x in inputs] self._check_input_shape_and_type(inputs) # Uprank to rank 2 for the cross_hashed op. rank = inputs[0].shape.rank if rank < 2: inputs = [utils.expand_dims(x, -1) for x in inputs] if rank < 1: inputs = [utils.expand_dims(x, -1) for x in inputs] # Perform the cross and convert to dense outputs = tf.sparse.cross_hashed(inputs, self.num_bins) outputs = tf.sparse.to_dense(outputs) # Fix output shape and downrank to match input rank. if rank == 2: # tf.sparse.cross_hashed output shape will always be None on the # last dimension. Given our input shape restrictions, we want to # force shape 1 instead. outputs = tf.reshape(outputs, [-1, 1]) elif rank == 1: outputs = tf.reshape(outputs, [-1]) elif rank == 0: outputs = tf.reshape(outputs, []) # Encode outputs. return utils.encode_categorical_inputs( outputs, output_mode=self.output_mode, depth=self.num_bins, sparse=self.sparse, dtype=self.compute_dtype, ) def compute_output_shape(self, input_shapes): self._check_at_least_two_inputs(input_shapes) return utils.compute_shape_for_encode_categorical(input_shapes[0]) def compute_output_signature(self, input_specs): input_shapes = [x.shape.as_list() for x in input_specs] output_shape = self.compute_output_shape(input_shapes) if self.sparse or any( isinstance(x, tf.SparseTensorSpec) for x in input_specs ): return tf.SparseTensorSpec( shape=output_shape, dtype=self.compute_dtype ) return tf.TensorSpec(shape=output_shape, dtype=self.compute_dtype) def get_config(self): config = super().get_config() config.update( { "num_bins": self.num_bins, "output_mode": self.output_mode, "sparse": self.sparse, } ) return config def _check_at_least_two_inputs(self, inputs): if not isinstance(inputs, (list, tuple)): raise ValueError( "`HashedCrossing` should be called on a list or tuple of " f"inputs. Received: inputs={inputs}" ) if len(inputs) < 2: raise ValueError( "`HashedCrossing` should be called on at least two inputs. " f"Received: inputs={inputs}" ) def _check_input_shape_and_type(self, inputs): first_shape = inputs[0].shape.as_list() rank = len(first_shape) if rank > 2 or (rank == 2 and first_shape[-1] != 1): raise ValueError( "All `HashedCrossing` inputs should have shape `[]`, " "`[batch_size]` or `[batch_size, 1]`. " f"Received: inputs={inputs}" ) if not all(x.shape.as_list() == first_shape for x in inputs[1:]): raise ValueError( "All `HashedCrossing` inputs should have equal shape. " f"Received: inputs={inputs}" ) if any( isinstance(x, (tf.RaggedTensor, tf.SparseTensor)) for x in inputs ): raise ValueError( "All `HashedCrossing` inputs should be dense tensors. " f"Received: inputs={inputs}" ) if not all(x.dtype.is_integer or x.dtype == tf.string for x in inputs): raise ValueError( "All `HashedCrossing` inputs should have an integer or " f"string dtype. Received: inputs={inputs}" )
tf-keras/tf_keras/layers/preprocessing/hashed_crossing.py/0
{ "file_path": "tf-keras/tf_keras/layers/preprocessing/hashed_crossing.py", "repo_id": "tf-keras", "token_count": 3821 }
177
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Preprocessing stage.""" import numpy as np import tensorflow.compat.v2 as tf from tf_keras.engine import base_preprocessing_layer from tf_keras.engine import functional from tf_keras.engine import sequential from tf_keras.utils import tf_utils # Sequential methods should take precedence. class PreprocessingStage( sequential.Sequential, base_preprocessing_layer.PreprocessingLayer ): """A sequential preprocessing stage. This preprocessing stage wraps a list of preprocessing layers into a Sequential-like object that enables you to `adapt()` the whole list via a single `adapt()` call on the preprocessing stage. Args: layers: List of layers. Can include layers that aren't preprocessing layers. name: String. Optional name for the preprocessing stage object. """ def adapt(self, data, reset_state=True): """Adapt the state of the layers of the preprocessing stage to the data. Args: data: A batched Dataset object, or a NumPy array, or an EagerTensor. Data to be iterated over to adapt the state of the layers in this preprocessing stage. reset_state: Whether this call to `adapt` should reset the state of the layers in this preprocessing stage. """ if not isinstance( data, (tf.data.Dataset, np.ndarray, tf.__internal__.EagerTensor) ): raise ValueError( "`adapt()` requires a batched Dataset, an EagerTensor, or a " f"Numpy array as input. Received data={data}" ) if isinstance(data, tf.data.Dataset): # Validate the datasets to try and ensure we haven't been passed one # with infinite size. That would cause an infinite loop here. if tf_utils.dataset_is_infinite(data): raise ValueError( "The dataset passed to `adapt()` has an infinite number of " "elements. Please use dataset.take(...) to make the number " "of elements finite." ) for current_layer_index in range(0, len(self.layers)): if not hasattr(self.layers[current_layer_index], "adapt"): # Skip any layer that does not need adapting. continue def map_fn(x): """Maps this object's inputs to those at current_layer_index. Args: x: Batch of inputs seen in entry of the `PreprocessingStage` instance. Returns: Batch of inputs to be processed by layer `self.layers[current_layer_index]` """ if current_layer_index == 0: return x for i in range(current_layer_index): x = self.layers[i](x) return x if isinstance(data, tf.data.Dataset): current_layer_data = data.map(map_fn) else: current_layer_data = map_fn(data) self.layers[current_layer_index].adapt( current_layer_data, reset_state=reset_state ) # Functional methods should take precedence. class FunctionalPreprocessingStage( functional.Functional, base_preprocessing_layer.PreprocessingLayer ): """A functional preprocessing stage. This preprocessing stage wraps a graph of preprocessing layers into a Functional-like object that enables you to `adapt()` the whole graph via a single `adapt()` call on the preprocessing stage. Preprocessing stage is not a complete model, so it cannot be called with `fit()`. However, it is possible to add regular layers that may be trainable to a preprocessing stage. A functional preprocessing stage is created in the same way as `Functional` models. A stage can be instantiated by passing two arguments to `__init__`. The first argument is the `keras.Input` Tensors that represent the inputs to the stage. The second argument specifies the output tensors that represent the outputs of this stage. Both arguments can be a nested structure of tensors. Example: >>> inputs = {'x2': tf.keras.Input(shape=(5,)), ... 'x1': tf.keras.Input(shape=(1,))} >>> norm_layer = tf.keras.layers.Normalization() >>> y = norm_layer(inputs['x2']) >>> y, z = tf.keras.layers.Lambda(lambda x: (x, x))(inputs['x1']) >>> outputs = [inputs['x1'], [y, z]] >>> stage = FunctionalPreprocessingStage(inputs, outputs) Args: inputs: An input tensor (must be created via `tf.keras.Input()`), or a list, a dict, or a nested structure of input tensors. outputs: An output tensor, or a list, a dict or a nested structure of output tensors. name: String, optional. Name of the preprocessing stage. """ def fit(self, *args, **kwargs): raise ValueError( "Preprocessing stage is not a complete model, and hence should not " "be `fit`. Instead, you may feed data to `adapt` the stage to set " "appropriate states of the layers in the stage." ) def adapt(self, data, reset_state=True): """Adapt the state of the layers of the preprocessing stage to the data. Args: data: A batched Dataset object, a NumPy array, an EagerTensor, or a list, dict or nested structure of Numpy Arrays or EagerTensors. The elements of Dataset object need to conform with inputs of the stage. The first dimension of NumPy arrays or EagerTensors are understood to be batch dimension. Data to be iterated over to adapt the state of the layers in this preprocessing stage. reset_state: Whether this call to `adapt` should reset the state of the layers in this preprocessing stage. Examples: >>> # For a stage with dict input >>> inputs = {'x2': tf.keras.Input(shape=(5,)), ... 'x1': tf.keras.Input(shape=(1,))} >>> outputs = [inputs['x1'], inputs['x2']] >>> stage = FunctionalPreprocessingStage(inputs, outputs) >>> ds = tf.data.Dataset.from_tensor_slices({'x1': tf.ones((4,5)), ... 'x2': tf.ones((4,1))}) >>> sorted(ds.element_spec.items()) # Check element_spec [('x1', TensorSpec(shape=(5,), dtype=tf.float32, name=None)), ('x2', TensorSpec(shape=(1,), dtype=tf.float32, name=None))] >>> stage.adapt(ds) >>> data_np = {'x1': np.ones((4, 5)), 'x2': np.ones((4, 1))} >>> stage.adapt(data_np) """ if not isinstance(data, tf.data.Dataset): data = self._flatten_to_reference_inputs(data) if any( not isinstance(datum, (np.ndarray, tf.__internal__.EagerTensor)) for datum in data ): raise ValueError( "`adapt()` requires a batched Dataset, a list of " f"EagerTensors or Numpy arrays as input, got {type(data)}" ) ds_input = [ tf.data.Dataset.from_tensor_slices(x).batch(1) for x in data ] if isinstance(data, tf.data.Dataset): # Validate the datasets to try and ensure we haven't been passed one # with infinite size. That would cause an infinite loop here. if tf_utils.dataset_is_infinite(data): raise ValueError( "The dataset passed to `adapt()` has an infinite number of " "elements. Please use dataset.take(...) to make the number " "of elements finite." ) # Unzip dataset object to a list of single input dataset. ds_input = _unzip_dataset(data) # Dictionary mapping reference tensors to datasets ds_dict = {} tensor_usage_count = self._tensor_usage_count for x, y in zip(self.inputs, ds_input): x_id = str(id(x)) ds_dict[x_id] = [y] * tensor_usage_count[x_id] nodes_by_depth = self._nodes_by_depth depth_keys = sorted(nodes_by_depth.keys(), reverse=True) def build_map_fn(node, args, kwargs): if not isinstance(args.element_spec, tuple): def map_fn(*x): return tf.nest.flatten(node.layer(*x, **kwargs)) else: def map_fn(*x): return tf.nest.flatten(node.layer(x, **kwargs)) return map_fn for depth in depth_keys: for node in nodes_by_depth[depth]: # Input node if node.is_input: continue # Node with input not computed yet if any(t_id not in ds_dict for t_id in node.flat_input_ids): continue args, kwargs = node.map_arguments(ds_dict) args = tf.data.Dataset.zip( tf.__internal__.nest.list_to_tuple(*args) ) if node.layer.stateful and hasattr(node.layer, "adapt"): node.layer.adapt(args, reset_state=reset_state) map_fn = build_map_fn(node, args, kwargs) outputs = args.map(map_fn) outputs = _unzip_dataset(outputs) # Update ds_dict. for x_id, y in zip(node.flat_output_ids, outputs): ds_dict[x_id] = [y] * tensor_usage_count[x_id] def _unzip_dataset(ds): """Unzip dataset into a list of single element datasets. Args: ds: A Dataset object. Returns: A list of Dataset object, each correspond to one of the `element_spec` of the input Dataset object. Example: >>> ds1 = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> ds2 = tf.data.Dataset.from_tensor_slices([4, 5, 6]) >>> ds_zipped_tuple = tf.data.Dataset.zip((ds1, ds2)) >>> ds_unzipped_tuple = _unzip_dataset(ds_zipped_tuple) >>> ds_zipped_dict = tf.data.Dataset.zip({'ds1': ds1, 'ds2': ds2}) >>> ds_unzipped_dict = _unzip_dataset(ds_zipped_dict) Then the two elements of `ds_unzipped_tuple` and `ds_unzipped_dict` are both the same as `ds1` and `ds2`. """ element_count = len(tf.nest.flatten(ds.element_spec)) ds_unzipped = [] for i in range(element_count): def map_fn(*x, j=i): return tf.nest.flatten(x)[j] ds_unzipped.append(ds.map(map_fn)) return ds_unzipped
tf-keras/tf_keras/layers/preprocessing/preprocessing_stage.py/0
{ "file_path": "tf-keras/tf_keras/layers/preprocessing/preprocessing_stage.py", "repo_id": "tf-keras", "token_count": 4977 }
178
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for cropping layers.""" import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.run_all_keras_modes class CroppingTest(test_combinations.TestCase): def test_cropping_1d(self): num_samples = 2 time_length = 4 input_len_dim1 = 2 inputs = np.random.rand(num_samples, time_length, input_len_dim1) with self.cached_session(): test_utils.layer_test( keras.layers.Cropping1D, kwargs={"cropping": (1, 1)}, input_shape=inputs.shape, ) # test incorrect use with self.assertRaises(ValueError): keras.layers.Cropping1D(cropping=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.Cropping1D(cropping=None) with self.assertRaises(ValueError): input_layer = keras.layers.Input( shape=(num_samples, time_length, input_len_dim1) ) keras.layers.Cropping1D(cropping=(2, 3))(input_layer) def test_cropping_2d(self): num_samples = 2 stack_size = 2 input_len_dim1 = 9 input_len_dim2 = 9 cropping = ((2, 2), (3, 3)) for data_format in ["channels_first", "channels_last"]: if data_format == "channels_first": inputs = np.random.rand( num_samples, stack_size, input_len_dim1, input_len_dim2 ) else: inputs = np.random.rand( num_samples, input_len_dim1, input_len_dim2, stack_size ) with self.cached_session(): # basic test test_utils.layer_test( keras.layers.Cropping2D, kwargs={"cropping": cropping, "data_format": data_format}, input_shape=inputs.shape, ) # correctness test layer = keras.layers.Cropping2D( cropping=cropping, data_format=data_format ) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) # compare with numpy if data_format == "channels_first": expected_out = inputs[ :, :, cropping[0][0] : -cropping[0][1], cropping[1][0] : -cropping[1][1], ] else: expected_out = inputs[ :, cropping[0][0] : -cropping[0][1], cropping[1][0] : -cropping[1][1], :, ] np.testing.assert_allclose(np_output, expected_out) for data_format in ["channels_first", "channels_last"]: if data_format == "channels_first": inputs = np.random.rand( num_samples, stack_size, input_len_dim1, input_len_dim2 ) else: inputs = np.random.rand( num_samples, input_len_dim1, input_len_dim2, stack_size ) # another correctness test (no cropping) with self.cached_session(): cropping = ((0, 0), (0, 0)) layer = keras.layers.Cropping2D( cropping=cropping, data_format=data_format ) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) # compare with input np.testing.assert_allclose(np_output, inputs) # test incorrect use with self.assertRaises(ValueError): keras.layers.Cropping2D(cropping=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.Cropping2D(cropping=None) with self.assertRaises(ValueError): input_layer = keras.layers.Input( shape=(num_samples, input_len_dim1, input_len_dim2, stack_size) ) keras.layers.Cropping2D(cropping=((5, 4), (3, 4)))(input_layer) def test_cropping_3d(self): num_samples = 2 stack_size = 2 input_len_dim1 = 8 input_len_dim2 = 8 input_len_dim3 = 8 croppings = [((2, 2), (1, 1), (2, 3)), 3, (0, 1, 1)] for cropping in croppings: for data_format in ["channels_last", "channels_first"]: if data_format == "channels_first": inputs = np.random.rand( num_samples, stack_size, input_len_dim1, input_len_dim2, input_len_dim3, ) else: inputs = np.random.rand( num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size, ) # basic test with self.cached_session(): test_utils.layer_test( keras.layers.Cropping3D, kwargs={ "cropping": cropping, "data_format": data_format, }, input_shape=inputs.shape, ) if len(croppings) == 3 and len(croppings[0]) == 2: # correctness test with self.cached_session(): layer = keras.layers.Cropping3D( cropping=cropping, data_format=data_format ) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) # compare with numpy if data_format == "channels_first": expected_out = inputs[ :, :, cropping[0][0] : -cropping[0][1], cropping[1][0] : -cropping[1][1], cropping[2][0] : -cropping[2][1], ] else: expected_out = inputs[ :, cropping[0][0] : -cropping[0][1], cropping[1][0] : -cropping[1][1], cropping[2][0] : -cropping[2][1], :, ] np.testing.assert_allclose(np_output, expected_out) # test incorrect use with self.assertRaises(ValueError): keras.layers.Cropping3D(cropping=(1, 1)) with self.assertRaises(ValueError): keras.layers.Cropping3D(cropping=None) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/reshaping/cropping_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/reshaping/cropping_test.py", "repo_id": "tf-keras", "token_count": 4883 }
179
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for zero-padding layers.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.run_all_keras_modes class ZeroPaddingTest(test_combinations.TestCase): def test_zero_padding_1d(self): num_samples = 2 input_dim = 2 num_steps = 5 shape = (num_samples, num_steps, input_dim) inputs = np.ones(shape) with self.cached_session(): # basic test test_utils.layer_test( keras.layers.ZeroPadding1D, kwargs={"padding": 2}, input_shape=inputs.shape, ) test_utils.layer_test( keras.layers.ZeroPadding1D, kwargs={"padding": (1, 2)}, input_shape=inputs.shape, ) # correctness test layer = keras.layers.ZeroPadding1D(padding=2) layer.build(shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) for offset in [0, 1, -1, -2]: np.testing.assert_allclose(np_output[:, offset, :], 0.0) np.testing.assert_allclose(np_output[:, 2:-2, :], 1.0) layer = keras.layers.ZeroPadding1D(padding=(1, 2)) layer.build(shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) for left_offset in [0]: np.testing.assert_allclose(np_output[:, left_offset, :], 0.0) for right_offset in [-1, -2]: np.testing.assert_allclose(np_output[:, right_offset, :], 0.0) np.testing.assert_allclose(np_output[:, 1:-2, :], 1.0) layer.get_config() # test incorrect use with self.assertRaises(ValueError): keras.layers.ZeroPadding1D(padding=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.ZeroPadding1D(padding=None) @parameterized.named_parameters( ("channels_first", "channels_first"), ("channels_last", "channels_last") ) def test_zero_padding_2d(self, data_format): num_samples = 2 stack_size = 2 input_num_row = 4 input_num_col = 5 if data_format == "channels_first": inputs = np.ones( (num_samples, stack_size, input_num_row, input_num_col) ) elif data_format == "channels_last": inputs = np.ones( (num_samples, input_num_row, input_num_col, stack_size) ) # basic test with self.cached_session(): test_utils.layer_test( keras.layers.ZeroPadding2D, kwargs={"padding": (2, 2), "data_format": data_format}, input_shape=inputs.shape, ) test_utils.layer_test( keras.layers.ZeroPadding2D, kwargs={ "padding": ((1, 2), (3, 4)), "data_format": data_format, }, input_shape=inputs.shape, ) # correctness test with self.cached_session(): layer = keras.layers.ZeroPadding2D( padding=(2, 2), data_format=data_format ) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == "channels_last": for offset in [0, 1, -1, -2]: np.testing.assert_allclose(np_output[:, offset, :, :], 0.0) np.testing.assert_allclose(np_output[:, :, offset, :], 0.0) np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.0) elif data_format == "channels_first": for offset in [0, 1, -1, -2]: np.testing.assert_allclose(np_output[:, :, offset, :], 0.0) np.testing.assert_allclose(np_output[:, :, :, offset], 0.0) np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.0) layer = keras.layers.ZeroPadding2D( padding=((1, 2), (3, 4)), data_format=data_format ) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == "channels_last": for top_offset in [0]: np.testing.assert_allclose( np_output[:, top_offset, :, :], 0.0 ) for bottom_offset in [-1, -2]: np.testing.assert_allclose( np_output[:, bottom_offset, :, :], 0.0 ) for left_offset in [0, 1, 2]: np.testing.assert_allclose( np_output[:, :, left_offset, :], 0.0 ) for right_offset in [-1, -2, -3, -4]: np.testing.assert_allclose( np_output[:, :, right_offset, :], 0.0 ) np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.0) elif data_format == "channels_first": for top_offset in [0]: np.testing.assert_allclose( np_output[:, :, top_offset, :], 0.0 ) for bottom_offset in [-1, -2]: np.testing.assert_allclose( np_output[:, :, bottom_offset, :], 0.0 ) for left_offset in [0, 1, 2]: np.testing.assert_allclose( np_output[:, :, :, left_offset], 0.0 ) for right_offset in [-1, -2, -3, -4]: np.testing.assert_allclose( np_output[:, :, :, right_offset], 0.0 ) np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.0) # test incorrect use with self.assertRaises(ValueError): keras.layers.ZeroPadding2D(padding=(1, 1, 1)) with self.assertRaises(ValueError): keras.layers.ZeroPadding2D(padding=None) @parameterized.named_parameters( ("channels_first", "channels_first"), ("channels_last", "channels_last") ) def test_zero_padding_3d(self, data_format): num_samples = 2 stack_size = 2 input_len_dim1 = 4 input_len_dim2 = 5 input_len_dim3 = 3 if data_format == "channels_first": inputs = np.ones( ( num_samples, stack_size, input_len_dim1, input_len_dim2, input_len_dim3, ) ) elif data_format == "channels_last": inputs = np.ones( ( num_samples, input_len_dim1, input_len_dim2, input_len_dim3, stack_size, ) ) with self.cached_session(): # basic test test_utils.layer_test( keras.layers.ZeroPadding3D, kwargs={"padding": (2, 2, 2), "data_format": data_format}, input_shape=inputs.shape, ) test_utils.layer_test( keras.layers.ZeroPadding3D, kwargs={ "padding": ((1, 2), (3, 4), (0, 2)), "data_format": data_format, }, input_shape=inputs.shape, ) with self.cached_session(): # correctness test layer = keras.layers.ZeroPadding3D( padding=(2, 2, 2), data_format=data_format ) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == "channels_last": for offset in [0, 1, -1, -2]: np.testing.assert_allclose( np_output[:, offset, :, :, :], 0.0 ) np.testing.assert_allclose( np_output[:, :, offset, :, :], 0.0 ) np.testing.assert_allclose( np_output[:, :, :, offset, :], 0.0 ) np.testing.assert_allclose( np_output[:, 2:-2, 2:-2, 2:-2, :], 1.0 ) elif data_format == "channels_first": for offset in [0, 1, -1, -2]: np.testing.assert_allclose( np_output[:, :, offset, :, :], 0.0 ) np.testing.assert_allclose( np_output[:, :, :, offset, :], 0.0 ) np.testing.assert_allclose( np_output[:, :, :, :, offset], 0.0 ) np.testing.assert_allclose( np_output[:, :, 2:-2, 2:-2, 2:-2], 1.0 ) layer = keras.layers.ZeroPadding3D( padding=((1, 2), (3, 4), (0, 2)), data_format=data_format ) layer.build(inputs.shape) output = layer(keras.backend.variable(inputs)) if tf.executing_eagerly(): np_output = output.numpy() else: np_output = keras.backend.eval(output) if data_format == "channels_last": for offset in [0]: np.testing.assert_allclose( np_output[:, offset, :, :, :], 0.0 ) for offset in [-1, -2]: np.testing.assert_allclose( np_output[:, offset, :, :, :], 0.0 ) for offset in [0, 1, 2]: np.testing.assert_allclose( np_output[:, :, offset, :, :], 0.0 ) for offset in [-1, -2, -3, -4]: np.testing.assert_allclose( np_output[:, :, offset, :, :], 0.0 ) for offset in [-1, -2]: np.testing.assert_allclose( np_output[:, :, :, offset, :], 0.0 ) np.testing.assert_allclose( np_output[:, 1:-2, 3:-4, 0:-2, :], 1.0 ) elif data_format == "channels_first": for offset in [0]: np.testing.assert_allclose( np_output[:, :, offset, :, :], 0.0 ) for offset in [-1, -2]: np.testing.assert_allclose( np_output[:, :, offset, :, :], 0.0 ) for offset in [0, 1, 2]: np.testing.assert_allclose( np_output[:, :, :, offset, :], 0.0 ) for offset in [-1, -2, -3, -4]: np.testing.assert_allclose( np_output[:, :, :, offset, :], 0.0 ) for offset in [-1, -2]: np.testing.assert_allclose( np_output[:, :, :, :, offset], 0.0 ) np.testing.assert_allclose( np_output[:, :, 1:-2, 3:-4, 0:-2], 1.0 ) # test incorrect use with self.assertRaises(ValueError): keras.layers.ZeroPadding3D(padding=(1, 1)) with self.assertRaises(ValueError): keras.layers.ZeroPadding3D(padding=None) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/reshaping/zero_padding_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/reshaping/zero_padding_test.py", "repo_id": "tf-keras", "token_count": 7701 }
180
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Long Short-Term Memory layer.""" import uuid import tensorflow.compat.v2 as tf from tf_keras import activations from tf_keras import backend from tf_keras import constraints from tf_keras import initializers from tf_keras import regularizers from tf_keras.engine import base_layer from tf_keras.engine.input_spec import InputSpec from tf_keras.layers.rnn import gru_lstm_utils from tf_keras.layers.rnn import rnn_utils from tf_keras.layers.rnn.base_rnn import RNN from tf_keras.layers.rnn.dropout_rnn_cell_mixin import DropoutRNNCellMixin from tf_keras.utils import tf_utils # isort: off from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export RECURRENT_DROPOUT_WARNING_MSG = ( "RNN `implementation=2` is not supported when `recurrent_dropout` is set. " "Using `implementation=1`." ) @keras_export("keras.layers.LSTMCell", v1=[]) class LSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer): """Cell class for the LSTM layer. See [the TF-Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. This class processes one step within the whole time sequence input, whereas `tf.keras.layer.LSTM` processes the whole sequence. For example: >>> inputs = tf.random.normal([32, 10, 8]) >>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4)) >>> output = rnn(inputs) >>> print(output.shape) (32, 4) >>> rnn = tf.keras.layers.RNN( ... tf.keras.layers.LSTMCell(4), ... return_sequences=True, ... return_state=True) >>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs) >>> print(whole_seq_output.shape) (32, 10, 4) >>> print(final_memory_state.shape) (32, 4) >>> print(final_carry_state.shape) (32, 4) Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. Call arguments: inputs: A 2D tensor, with shape of `[batch, feature]`. states: List of 2 tensors that corresponding to the cell's units. Both of them have shape `[batch, units]`, the first tensor is the memory state from previous time step, the second tensor is the carry state from previous time step. For timestep 0, the initial state provided by user will be feed to cell. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__( self, units, activation="tanh", recurrent_activation="sigmoid", use_bias=True, kernel_initializer="glorot_uniform", recurrent_initializer="orthogonal", bias_initializer="zeros", unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, **kwargs, ): if units <= 0: raise ValueError( "Received an invalid value for argument `units`, " f"expected a positive integer, got {units}." ) # By default use cached variable under v2 mode, see b/143699808. if tf.compat.v1.executing_eagerly_outside_functions(): self._enable_caching_device = kwargs.pop( "enable_caching_device", True ) else: self._enable_caching_device = kwargs.pop( "enable_caching_device", False ) super().__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1.0, max(0.0, dropout)) self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout)) implementation = kwargs.pop("implementation", 2) if self.recurrent_dropout != 0 and implementation != 1: logging.debug(RECURRENT_DROPOUT_WARNING_MSG) self.implementation = 1 else: self.implementation = implementation self.state_size = [self.units, self.units] self.output_size = self.units @tf_utils.shape_type_conversion def build(self, input_shape): super().build(input_shape) default_caching_device = rnn_utils.caching_device(self) input_dim = input_shape[-1] self.kernel = self.add_weight( shape=(input_dim, self.units * 4), name="kernel", initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, caching_device=default_caching_device, ) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units * 4), name="recurrent_kernel", initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint, caching_device=default_caching_device, ) if self.use_bias: if self.unit_forget_bias: def bias_initializer(_, *args, **kwargs): return backend.concatenate( [ self.bias_initializer( (self.units,), *args, **kwargs ), initializers.get("ones")( (self.units,), *args, **kwargs ), self.bias_initializer( (self.units * 2,), *args, **kwargs ), ] ) else: bias_initializer = self.bias_initializer self.bias = self.add_weight( shape=(self.units * 4,), name="bias", initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, caching_device=default_caching_device, ) else: self.bias = None self.built = True def _compute_carry_and_output(self, x, h_tm1, c_tm1): """Computes carry and output using split kernels.""" x_i, x_f, x_c, x_o = x h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1 i = self.recurrent_activation( x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, : self.units]) ) f = self.recurrent_activation( x_f + backend.dot( h_tm1_f, self.recurrent_kernel[:, self.units : self.units * 2] ) ) c = f * c_tm1 + i * self.activation( x_c + backend.dot( h_tm1_c, self.recurrent_kernel[:, self.units * 2 : self.units * 3], ) ) o = self.recurrent_activation( x_o + backend.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3 :]) ) return c, o def _compute_carry_and_output_fused(self, z, c_tm1): """Computes carry and output using fused kernels.""" z0, z1, z2, z3 = z i = self.recurrent_activation(z0) f = self.recurrent_activation(z1) c = f * c_tm1 + i * self.activation(z2) o = self.recurrent_activation(z3) return c, o def call(self, inputs, states, training=None): h_tm1 = states[0] # previous memory state c_tm1 = states[1] # previous carry state dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4) rec_dp_mask = self.get_recurrent_dropout_mask_for_cell( h_tm1, training, count=4 ) if self.implementation == 1: if 0 < self.dropout < 1.0: inputs_i = inputs * dp_mask[0] inputs_f = inputs * dp_mask[1] inputs_c = inputs * dp_mask[2] inputs_o = inputs * dp_mask[3] else: inputs_i = inputs inputs_f = inputs inputs_c = inputs inputs_o = inputs k_i, k_f, k_c, k_o = tf.split( self.kernel, num_or_size_splits=4, axis=1 ) x_i = backend.dot(inputs_i, k_i) x_f = backend.dot(inputs_f, k_f) x_c = backend.dot(inputs_c, k_c) x_o = backend.dot(inputs_o, k_o) if self.use_bias: b_i, b_f, b_c, b_o = tf.split( self.bias, num_or_size_splits=4, axis=0 ) x_i = backend.bias_add(x_i, b_i) x_f = backend.bias_add(x_f, b_f) x_c = backend.bias_add(x_c, b_c) x_o = backend.bias_add(x_o, b_o) if 0 < self.recurrent_dropout < 1.0: h_tm1_i = h_tm1 * rec_dp_mask[0] h_tm1_f = h_tm1 * rec_dp_mask[1] h_tm1_c = h_tm1 * rec_dp_mask[2] h_tm1_o = h_tm1 * rec_dp_mask[3] else: h_tm1_i = h_tm1 h_tm1_f = h_tm1 h_tm1_c = h_tm1 h_tm1_o = h_tm1 x = (x_i, x_f, x_c, x_o) h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o) c, o = self._compute_carry_and_output(x, h_tm1, c_tm1) else: if 0.0 < self.dropout < 1.0: inputs = inputs * dp_mask[0] z = backend.dot(inputs, self.kernel) z += backend.dot(h_tm1, self.recurrent_kernel) if self.use_bias: z = backend.bias_add(z, self.bias) z = tf.split(z, num_or_size_splits=4, axis=1) c, o = self._compute_carry_and_output_fused(z, c_tm1) h = o * self.activation(c) return h, [h, c] def get_config(self): config = { "units": self.units, "activation": activations.serialize(self.activation), "recurrent_activation": activations.serialize( self.recurrent_activation ), "use_bias": self.use_bias, "kernel_initializer": initializers.serialize( self.kernel_initializer ), "recurrent_initializer": initializers.serialize( self.recurrent_initializer ), "bias_initializer": initializers.serialize(self.bias_initializer), "unit_forget_bias": self.unit_forget_bias, "kernel_regularizer": regularizers.serialize( self.kernel_regularizer ), "recurrent_regularizer": regularizers.serialize( self.recurrent_regularizer ), "bias_regularizer": regularizers.serialize(self.bias_regularizer), "kernel_constraint": constraints.serialize(self.kernel_constraint), "recurrent_constraint": constraints.serialize( self.recurrent_constraint ), "bias_constraint": constraints.serialize(self.bias_constraint), "dropout": self.dropout, "recurrent_dropout": self.recurrent_dropout, "implementation": self.implementation, } config.update(rnn_utils.config_for_enable_caching_device(self)) base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def get_initial_state(self, inputs=None, batch_size=None, dtype=None): return list( rnn_utils.generate_zero_filled_state_for_cell( self, inputs, batch_size, dtype ) ) @keras_export("keras.layers.LSTM", v1=[]) class LSTM(DropoutRNNCellMixin, RNN, base_layer.BaseRandomLayer): """Long Short-Term Memory layer - Hochreiter 1997. See [the TF-Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the cuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. `activation` == `tanh` 2. `recurrent_activation` == `sigmoid` 3. `recurrent_dropout` == 0 4. `unroll` is `False` 5. `use_bias` is `True` 6. Inputs, if use masking, are strictly right-padded. 7. Eager execution is enabled in the outermost context. For example: >>> inputs = tf.random.normal([32, 10, 8]) >>> lstm = tf.keras.layers.LSTM(4) >>> output = lstm(inputs) >>> print(output.shape) (32, 4) >>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True) >>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs) >>> print(whole_seq_output.shape) (32, 10, 4) >>> print(final_memory_state.shape) (32, 4) >>> print(final_carry_state.shape) (32, 4) Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. Default: `False`. return_state: Boolean. Whether to return the last state in addition to the output. Default: `False`. go_backwards: Boolean (default `False`). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default `False`). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `[timesteps, batch, feature]`, whereas in the False case, it will be `[batch, timesteps, feature]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. unroll: Boolean (default `False`). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. Call arguments: inputs: A 3D tensor with shape `[batch, timesteps, feature]`. mask: Binary tensor of shape `[batch, timesteps]` indicating whether a given timestep should be masked (optional). An individual `True` entry indicates that the corresponding timestep should be utilized, while a `False` entry indicates that the corresponding timestep should be ignored. Defaults to `None`. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used (optional). Defaults to `None`. initial_state: List of initial state tensors to be passed to the first call of the cell (optional, `None` causes creation of zero-filled initial state tensors). Defaults to `None`. """ def __init__( self, units, activation="tanh", recurrent_activation="sigmoid", use_bias=True, kernel_initializer="glorot_uniform", recurrent_initializer="orthogonal", bias_initializer="zeros", unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, return_sequences=False, return_state=False, go_backwards=False, stateful=False, time_major=False, unroll=False, **kwargs, ): # return_runtime is a flag for testing, which shows the real backend # implementation chosen by grappler in graph mode. self.return_runtime = kwargs.pop("return_runtime", False) implementation = kwargs.pop("implementation", 2) if implementation == 0: logging.warning( "`implementation=0` has been deprecated, " "and now defaults to `implementation=1`." "Please update your layer call." ) if "enable_caching_device" in kwargs: cell_kwargs = { "enable_caching_device": kwargs.pop("enable_caching_device") } else: cell_kwargs = {} cell = LSTMCell( units, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, unit_forget_bias=unit_forget_bias, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, implementation=implementation, dtype=kwargs.get("dtype"), trainable=kwargs.get("trainable", True), name="lstm_cell", **cell_kwargs, ) super().__init__( cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, time_major=time_major, unroll=unroll, **kwargs, ) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = [InputSpec(ndim=3)] self.state_spec = [ InputSpec(shape=(None, dim)) for dim in (self.units, self.units) ] self._could_use_gpu_kernel = ( self.activation in (activations.tanh, tf.tanh) and self.recurrent_activation in (activations.sigmoid, tf.sigmoid) and recurrent_dropout == 0 and not unroll and use_bias and tf.compat.v1.executing_eagerly_outside_functions() ) if tf.config.list_logical_devices("GPU"): # Only show the message when there is GPU available, user will not # care about the cuDNN if there isn't any GPU. if self._could_use_gpu_kernel: logging.debug(gru_lstm_utils.CUDNN_AVAILABLE_MSG % self.name) else: logging.warning( gru_lstm_utils.CUDNN_NOT_AVAILABLE_MSG % self.name ) if gru_lstm_utils.use_new_gru_lstm_impl(): self._defun_wrapper = gru_lstm_utils.DefunWrapper( time_major, go_backwards, "lstm" ) def call(self, inputs, mask=None, training=None, initial_state=None): # The input should be dense, padded with zeros. If a ragged input is fed # into the layer, it is padded and the row lengths are used for masking. inputs, row_lengths = backend.convert_inputs_if_ragged(inputs) is_ragged_input = row_lengths is not None self._validate_args_if_ragged(is_ragged_input, mask) # LSTM does not support constants. Ignore it during process. inputs, initial_state, _ = self._process_inputs( inputs, initial_state, None ) if isinstance(mask, list): mask = mask[0] input_shape = backend.int_shape(inputs) timesteps = input_shape[0] if self.time_major else input_shape[1] if not self._could_use_gpu_kernel: # Fall back to use the normal LSTM. kwargs = {"training": training} self._maybe_reset_cell_dropout_mask(self.cell) def step(inputs, states): return self.cell(inputs, states, **kwargs) last_output, outputs, states = backend.rnn( step, inputs, initial_state, constants=None, go_backwards=self.go_backwards, mask=mask, unroll=self.unroll, input_length=row_lengths if row_lengths is not None else timesteps, time_major=self.time_major, zero_output_for_mask=self.zero_output_for_mask, return_all_outputs=self.return_sequences, ) runtime = gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_UNKNOWN) else: # Use the new defun approach for backend implementation swap. # Note that different implementations need to have same function # signature, eg, the tensor parameters need to have same shape and # dtypes. Since the cuDNN has an extra set of bias, those bias will # be passed to both normal and cuDNN implementations. self.reset_dropout_mask() dropout_mask = self.get_dropout_mask_for_cell( inputs, training, count=4 ) if dropout_mask is not None: inputs = inputs * dropout_mask[0] if gru_lstm_utils.use_new_gru_lstm_impl(): lstm_kwargs = { "inputs": inputs, "init_h": gru_lstm_utils.read_variable_value( initial_state[0] ), "init_c": gru_lstm_utils.read_variable_value( initial_state[1] ), "kernel": gru_lstm_utils.read_variable_value( self.cell.kernel ), "recurrent_kernel": gru_lstm_utils.read_variable_value( self.cell.recurrent_kernel ), "bias": gru_lstm_utils.read_variable_value(self.cell.bias), "mask": mask, "time_major": self.time_major, "go_backwards": self.go_backwards, "sequence_lengths": row_lengths, "zero_output_for_mask": self.zero_output_for_mask, } ( last_output, outputs, new_h, new_c, runtime, ) = self._defun_wrapper.defun_layer(**lstm_kwargs) else: gpu_lstm_kwargs = { "inputs": inputs, "init_h": gru_lstm_utils.read_variable_value( initial_state[0] ), "init_c": gru_lstm_utils.read_variable_value( initial_state[1] ), "kernel": gru_lstm_utils.read_variable_value( self.cell.kernel ), "recurrent_kernel": gru_lstm_utils.read_variable_value( self.cell.recurrent_kernel ), "bias": gru_lstm_utils.read_variable_value(self.cell.bias), "mask": mask, "time_major": self.time_major, "go_backwards": self.go_backwards, "sequence_lengths": row_lengths, "return_sequences": self.return_sequences, } normal_lstm_kwargs = gpu_lstm_kwargs.copy() normal_lstm_kwargs.update( { "zero_output_for_mask": self.zero_output_for_mask, } ) if tf.executing_eagerly(): device_type = gru_lstm_utils.get_context_device_type() can_use_gpu = ( # Either user specified GPU or unspecified but GPU is # available. ( device_type == gru_lstm_utils.GPU_DEVICE_NAME or ( device_type is None and tf.config.list_logical_devices("GPU") ) ) and gru_lstm_utils.is_cudnn_supported_inputs( mask, self.time_major, row_lengths ) ) # Under eager context, check the device placement and prefer # the GPU implementation when GPU is available. if can_use_gpu: last_output, outputs, new_h, new_c, runtime = gpu_lstm( **gpu_lstm_kwargs ) else: ( last_output, outputs, new_h, new_c, runtime, ) = standard_lstm(**normal_lstm_kwargs) else: ( last_output, outputs, new_h, new_c, runtime, ) = lstm_with_backend_selection(**normal_lstm_kwargs) states = [new_h, new_c] if self.stateful: updates = [ tf.compat.v1.assign( self_state, tf.cast(state, self_state.dtype) ) for self_state, state in zip(self.states, states) ] self.add_update(updates) if self.return_sequences: output = backend.maybe_convert_to_ragged( is_ragged_input, outputs, row_lengths, go_backwards=self.go_backwards, ) else: output = last_output if self.return_state: return [output] + list(states) elif self.return_runtime: return output, runtime else: return output @property def units(self): return self.cell.units @property def activation(self): return self.cell.activation @property def recurrent_activation(self): return self.cell.recurrent_activation @property def use_bias(self): return self.cell.use_bias @property def kernel_initializer(self): return self.cell.kernel_initializer @property def recurrent_initializer(self): return self.cell.recurrent_initializer @property def bias_initializer(self): return self.cell.bias_initializer @property def unit_forget_bias(self): return self.cell.unit_forget_bias @property def kernel_regularizer(self): return self.cell.kernel_regularizer @property def recurrent_regularizer(self): return self.cell.recurrent_regularizer @property def bias_regularizer(self): return self.cell.bias_regularizer @property def kernel_constraint(self): return self.cell.kernel_constraint @property def recurrent_constraint(self): return self.cell.recurrent_constraint @property def bias_constraint(self): return self.cell.bias_constraint @property def dropout(self): return self.cell.dropout @property def recurrent_dropout(self): return self.cell.recurrent_dropout @property def implementation(self): return self.cell.implementation def get_config(self): config = { "units": self.units, "activation": activations.serialize(self.activation), "recurrent_activation": activations.serialize( self.recurrent_activation ), "use_bias": self.use_bias, "kernel_initializer": initializers.serialize( self.kernel_initializer ), "recurrent_initializer": initializers.serialize( self.recurrent_initializer ), "bias_initializer": initializers.serialize(self.bias_initializer), "unit_forget_bias": self.unit_forget_bias, "kernel_regularizer": regularizers.serialize( self.kernel_regularizer ), "recurrent_regularizer": regularizers.serialize( self.recurrent_regularizer ), "bias_regularizer": regularizers.serialize(self.bias_regularizer), "activity_regularizer": regularizers.serialize( self.activity_regularizer ), "kernel_constraint": constraints.serialize(self.kernel_constraint), "recurrent_constraint": constraints.serialize( self.recurrent_constraint ), "bias_constraint": constraints.serialize(self.bias_constraint), "dropout": self.dropout, "recurrent_dropout": self.recurrent_dropout, "implementation": self.implementation, } config.update(rnn_utils.config_for_enable_caching_device(self.cell)) base_config = super().get_config() del base_config["cell"] return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): if "implementation" in config and config["implementation"] == 0: config["implementation"] = 1 return cls(**config) def standard_lstm( inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, sequence_lengths, zero_output_for_mask, return_sequences, ): """LSTM with standard kernel implementation. This implementation can be run on all types for hardware. This implementation lifts out all the layer weights and make them function parameters. It has same number of tensor input params as the cuDNN counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since cuDNN implementation does not support that. Note that the first half of the bias tensor should be ignored by this impl. The cuDNN impl need an extra set of input gate bias. In order to make the both function take same shape of parameter, that extra set of bias is also feed here. Args: inputs: input tensor of LSTM layer. init_h: initial state tensor for the cell output. init_c: initial state tensor for the cell hidden state. kernel: weights for cell kernel. recurrent_kernel: weights for cell recurrent kernel. bias: weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. An individual `True` entry indicates that the corresponding timestep should be utilized, while a `False` entry indicates that the corresponding timestep should be ignored. time_major: boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. zero_output_for_mask: Boolean, whether to output zero for masked timestep. return_sequences: Boolean. If True, return the recurrent outputs for all timesteps in the sequence. If False, only return the output for the last timestep (which consumes less memory). Returns: last_output: output tensor for the last timestep, which has shape [batch, units]. outputs: - If `return_sequences=True`: output tensor for all timesteps, which has shape [batch, time, units]. - Else, a tensor equal to `last_output` with shape [batch, 1, units] state_0: the cell output, which has same shape as init_h. state_1: the cell hidden state, which has same shape as init_c. runtime: constant string tensor which indicate real runtime hardware. This value is for testing purpose and should be used by user. """ input_shape = backend.int_shape(inputs) timesteps = input_shape[0] if time_major else input_shape[1] def step(cell_inputs, cell_states): """Step function that will be used by TF-Keras RNN backend.""" h_tm1 = cell_states[0] # previous memory state c_tm1 = cell_states[1] # previous carry state z = backend.dot(cell_inputs, kernel) z += backend.dot(h_tm1, recurrent_kernel) z = backend.bias_add(z, bias) z0, z1, z2, z3 = tf.split(z, 4, axis=1) i = tf.sigmoid(z0) f = tf.sigmoid(z1) c = f * c_tm1 + i * tf.tanh(z2) o = tf.sigmoid(z3) h = o * tf.tanh(c) return h, [h, c] last_output, outputs, new_states = backend.rnn( step, inputs, [init_h, init_c], constants=None, unroll=False, time_major=time_major, mask=mask, go_backwards=go_backwards, input_length=( sequence_lengths if sequence_lengths is not None else timesteps ), zero_output_for_mask=zero_output_for_mask, return_all_outputs=return_sequences, ) return ( last_output, outputs, new_states[0], new_states[1], gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_CPU), ) def gpu_lstm( inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, sequence_lengths, return_sequences, ): """LSTM with either cuDNN or ROCm implementation which is only available for GPU. Note that currently only right padded data is supported, or the result will be polluted by the unmasked data which should be filtered. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. An individual `True` entry indicates that the corresponding timestep should be utilized, while a `False` entry indicates that the corresponding timestep should be ignored. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. return_sequences: Boolean. If True, return the recurrent outputs for all timesteps in the sequence. If False, only return the output for the last timestep, matching the CPU function output format. Returns: last_output: Output tensor for the last timestep, which has shape [batch, units]. outputs: - If `return_sequences=True`: output tensor for all timesteps, which has shape [batch, time, units]. - Else, a tensor equal to `last_output` with shape [batch, 1, units] state_0: The cell output, which has same shape as init_h. state_1: The cell hidden state, which has same shape as init_c. runtime: Constant string tensor which indicate real runtime hardware. This value is for testing purpose and should not be used by user. """ if mask is not None: sequence_lengths = gru_lstm_utils.calculate_sequence_by_mask( mask, time_major ) if not time_major and sequence_lengths is None: inputs = tf.transpose(inputs, perm=(1, 0, 2)) seq_axis, batch_axis = (0, 1) else: seq_axis, batch_axis = (0, 1) if time_major else (1, 0) # For init_h and init_c, cuDNN expects one more dim of num_layers before or # after batch dim for time major or batch major inputs respectively init_h = tf.expand_dims(init_h, axis=seq_axis) init_c = tf.expand_dims(init_c, axis=seq_axis) weights = tf.split(kernel, 4, axis=1) weights += tf.split(recurrent_kernel, 4, axis=1) # cuDNN has an extra set of bias for inputs, we disable them (setting to 0), # so that mathematically it is same as the canonical LSTM implementation. full_bias = tf.concat((tf.zeros_like(bias), bias), 0) if tf.sysconfig.get_build_info()["is_rocm_build"]: # ROCm MIOpen's weight sequence for LSTM is different from both # canonical and Cudnn format # MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o] # i is input gate weights. # f is forget gate weights. # o is output gate weights. # c is cell gate weights. weights = [weights[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)] # full_bias is a tensor of shape (8*n,) full_bias = tf.split(full_bias, 8, axis=0) full_bias = [full_bias[x] for x in (0, 1, 3, 2, 4, 5, 7, 6)] params = gru_lstm_utils.canonical_to_params( weights=weights, biases=tf.split(full_bias, 8), shape=tf.constant([-1]), transpose_weights=True, ) if sequence_lengths is not None: if go_backwards: # Three reversals are required. E.g., # normal input = [1, 2, 3, 0, 0] # where 0 need to be masked # reversed_input_to_cudnn = [3, 2, 1, 0, 0] # output_from_cudnn = [6, 5, 4, 0, 0] # expected_output = [0, 0, 6, 5 ,4] inputs = tf.reverse_sequence( inputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis, ) outputs, h, c, _, _ = tf.raw_ops.CudnnRNNV3( input=inputs, input_h=init_h, input_c=init_c, params=params, is_training=True, rnn_mode="lstm", sequence_lengths=sequence_lengths, time_major=time_major, ) if go_backwards: outputs = tf.reverse_sequence( outputs, sequence_lengths, seq_axis=seq_axis, batch_axis=batch_axis, ) outputs = tf.reverse(outputs, axis=[seq_axis]) else: # # Fill the array with shape [batch] with value of max timesteps. # sequence_length = array_ops.fill([array_ops.shape(inputs)[1]], # array_ops.shape(inputs)[0]) if go_backwards: # Reverse axis 0 since the input is already convert to time major. inputs = tf.reverse(inputs, axis=[0]) outputs, h, c, _ = tf.raw_ops.CudnnRNN( input=inputs, input_h=init_h, input_c=init_c, params=params, is_training=True, rnn_mode="lstm", ) last_output = outputs[-1] if not time_major and sequence_lengths is None and return_sequences: outputs = tf.transpose(outputs, perm=[1, 0, 2]) h = tf.squeeze(h, axis=seq_axis) c = tf.squeeze(c, axis=seq_axis) # In the case of variable length input, the cudnn kernel will fill zeros for # the output, whereas the default keras behavior is to bring over the # previous output for t-1, so that in the return_sequence=False case, user # can quickly get the final effect output instead just 0s at the last # timestep. In order to mimic the default keras behavior, we copy the final # h state as the last_output, since it is numerically same as the output. if sequence_lengths is not None: last_output = h # Match CPU return format if not return_sequences: outputs = tf.expand_dims(last_output, axis=0 if time_major else 1) return ( last_output, outputs, h, c, gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_GPU), ) def lstm_with_backend_selection( inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, sequence_lengths, zero_output_for_mask, return_sequences, ): """Call the LSTM with optimized backend kernel selection. Under the hood, this function will create two TF function, one with the most generic kernel and can run on all device condition, and the second one with cuDNN specific kernel, which can only run on GPU. The first function will be called with normal_lstm_params, while the second function is not called, but only registered in the graph. The Grappler will do the proper graph rewrite and swap the optimized TF function based on the device placement. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. An individual `True` entry indicates that the corresponding timestep should be utilized, while a `False` entry indicates that the corresponding timestep should be ignored. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. zero_output_for_mask: Boolean, whether to output zero for masked timestep. return_sequences: Boolean. If True, return the recurrent outputs for all timesteps in the sequence. If False, only return the output for the last timestep (which consumes less memory). Returns: List of output tensors, same as standard_lstm. """ params = { "inputs": inputs, "init_h": init_h, "init_c": init_c, "kernel": kernel, "recurrent_kernel": recurrent_kernel, "bias": bias, "mask": mask, "time_major": time_major, "go_backwards": go_backwards, "sequence_lengths": sequence_lengths, "zero_output_for_mask": zero_output_for_mask, "return_sequences": return_sequences, } def gpu_lstm_with_fallback( inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask, time_major, go_backwards, sequence_lengths, zero_output_for_mask, return_sequences, ): """Use cuDNN kernel when mask is none or strictly right padded.""" def cudnn_lstm_fn(): return gpu_lstm( inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, sequence_lengths=sequence_lengths, return_sequences=return_sequences, ) def stardard_lstm_fn(): return standard_lstm( inputs=inputs, init_h=init_h, init_c=init_c, kernel=kernel, recurrent_kernel=recurrent_kernel, bias=bias, mask=mask, time_major=time_major, go_backwards=go_backwards, sequence_lengths=sequence_lengths, zero_output_for_mask=zero_output_for_mask, return_sequences=return_sequences, ) return tf.__internal__.smart_cond.smart_cond( gru_lstm_utils.is_cudnn_supported_inputs( mask, time_major, sequence_lengths ), true_fn=cudnn_lstm_fn, false_fn=stardard_lstm_fn, ) if gru_lstm_utils.use_new_gru_lstm_impl(): # Chooses the implementation dynamically based on the running device. ( last_output, outputs, new_h, new_c, runtime, ) = tf.__internal__.execute_fn_for_device( { gru_lstm_utils.CPU_DEVICE_NAME: lambda: standard_lstm(**params), gru_lstm_utils.GPU_DEVICE_NAME: lambda: gpu_lstm_with_fallback( **params ), }, lambda: standard_lstm(**params), ) else: # Each time a `tf.function` is called, we will give it a unique # identifiable API name, so that Grappler won't get confused when it # sees multiple LSTM layers added into same graph, and it will be able # to pair up the different implementations across them. api_name = "lstm_" + str(uuid.uuid4()) supportive_attribute = { "time_major": time_major, "go_backwards": go_backwards, } defun_standard_lstm = gru_lstm_utils.generate_defun_backend( api_name, gru_lstm_utils.CPU_DEVICE_NAME, standard_lstm, supportive_attribute, ) defun_gpu_lstm = gru_lstm_utils.generate_defun_backend( api_name, gru_lstm_utils.GPU_DEVICE_NAME, gpu_lstm_with_fallback, supportive_attribute, ) # Call the normal LSTM impl and register the cuDNN impl function. The # grappler will kick in during session execution to optimize the graph. last_output, outputs, new_h, new_c, runtime = defun_standard_lstm( **params ) gru_lstm_utils.function_register(defun_gpu_lstm, **params) return last_output, outputs, new_h, new_c, runtime
tf-keras/tf_keras/layers/rnn/lstm.py/0
{ "file_path": "tf-keras/tf_keras/layers/rnn/lstm.py", "repo_id": "tf-keras", "token_count": 24696 }
181
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Contains the base Layer class, from which all layers inherit.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import warnings import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.engine import base_layer_utils from tf_keras.engine import base_layer_v1 as base_layer from tf_keras.legacy_tf_layers import variable_scope_shim from tf_keras.mixed_precision import policy from tf_keras.utils import tf_contextlib # isort: off from tensorflow.python.ops import variable_scope as vs from tensorflow.python.util.tf_export import keras_export _KERAS_STYLE_SCOPE = False @keras_export( v1=["keras.__internal__.legacy.layers.experimental.keras_style_scope"] ) @tf_contextlib.contextmanager def keras_style_scope(): """Use Keras-style variable management. All tf.layers and tf RNN cells created in this scope use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this scope is to allow users of existing layers to slowly transition to a TF-Keras layers API without breaking existing functionality. One example of this is when using TensorFlow's RNN classes with Keras Models or Networks. Because TF-Keras models do not properly set variable scopes, users of RNNs may either accidentally share scopes between two different models, or get errors about variables that already exist. Example: ```python class RNNModel(tf.keras.Model): def __init__(self, name): super(RNNModel, self).__init__(name=name) self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell( [tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)]) def call(self, input, state): return self.rnn(input, state) model_1 = RNNModel("model_1") model_2 = RNNModel("model_2") # OK output_1, next_state_1 = model_1(input, state) # Raises an error about trying to create an already existing variable. output_2, next_state_2 = model_2(input, state) ``` The solution is to wrap the model construction and execution in a keras-style scope: ```python with keras_style_scope(): model_1 = RNNModel("model_1") model_2 = RNNModel("model_2") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ``` Yields: A keras layer style scope. """ global _KERAS_STYLE_SCOPE stack = _KERAS_STYLE_SCOPE _KERAS_STYLE_SCOPE = True try: yield finally: _KERAS_STYLE_SCOPE = stack @keras_export( v1=["keras.__internal__.legacy.layers.experimental.set_keras_style"] ) def set_keras_style(): """Use Keras-style variable management. All tf.layers and tf RNN cells created after keras style ha been enabled use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this function is to allow users of existing layers to slowly transition to TF-Keras layers API without breaking existing functionality. For more details, see the documentation for `keras_style_scope`. Note, once keras style has been set, it is set globally for the entire program and cannot be unset. Example: ```python set_keras_style() model_1 = RNNModel(name="model_1") model_2 = RNNModel(name="model_2") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ``` """ global _KERAS_STYLE_SCOPE _KERAS_STYLE_SCOPE = True def _is_in_keras_style_scope(): global _KERAS_STYLE_SCOPE return _KERAS_STYLE_SCOPE @keras_export(v1=["keras.__internal__.legacy.layers.Layer"]) class Layer(base_layer.Layer): """Base layer class. It is considered legacy, and we recommend the use of `tf.keras.layers.Layer` instead. Args: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). Read-only properties: name: The name of the layer (string). dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). trainable_variables: List of trainable variables. non_trainable_variables: List of non-trainable variables. variables: List of all variables of this layer, trainable and non-trainable. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). Mutable properties: trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. """ def __init__(self, trainable=True, name=None, dtype=None, **kwargs): # For backwards compatibility, legacy layers do not use # `ResourceVariable` by default. self._use_resource_variables = False scope = kwargs.pop("_scope", None) self._reuse = kwargs.pop("_reuse", None) # Avoid an incorrect lint error self._trainable_weights = [] self.built = False if dtype is None: # Indicates to infer dtype from inputs. When the V2 dtype behavior # is enabled, TF-Keras layers default their dtype to floatx instead, # so we pass an "_infer" policy to keep the old V1 behavior. dtype = policy.Policy("_infer") if "autocast" not in kwargs: kwargs["autocast"] = False # Mark that legacy layers should not be instrumented as TF-Keras usage self._disable_keras_instrumentation = True super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs) if _is_in_keras_style_scope(): if scope is not None: raise ValueError( "scope argument not allowed when keras style layers are " "enabled, but saw: {}".format(scope) ) if self._reuse is not None: raise ValueError( "reuse argument not allowed when keras style layers are " "enabled, but saw: {}".format(self._reuse) ) self._keras_style = True else: self._keras_style = False self._call_has_scope_arg = "scope" in self._call_spec.arg_names if scope: with tf.compat.v1.variable_scope(scope) as captured_scope: self._scope = captured_scope else: self._scope = None self._current_scope = None def apply(self, *args, **kwargs): return self(*args, **kwargs) # We no longer track graph in tf.layers layers. This property is only kept # to maintain API backward compatibility. @property def graph(self): warnings.warn( "`Layer.graph` is deprecated and " "will be removed in a future version. " "Please stop using this property because tf.layers layers no " "longer track their graph.", stacklevel=2, ) if tf.executing_eagerly(): raise RuntimeError( "Layer.graph not supported when executing eagerly." ) return None def _init_set_name(self, name): # Determine layer name (non-unique). if isinstance(name, tf.compat.v1.VariableScope): base_name = name.name self._name, _ = self._make_unique_name() else: base_name = name self._name = name if not name: self._name, base_name = self._make_unique_name() self._base_name = base_name def _make_unique_name( self, name_uid_map=None, avoid_names=None, namespace="", zero_based=False, ): base_name = base_layer.to_snake_case(self.__class__.__name__) name = backend.unique_object_name( base_name, name_uid_map=name_uid_map, avoid_names=avoid_names, namespace=namespace, zero_based=zero_based, ) return (name, base_name) @property def scope_name(self): if not self._scope: raise ValueError( 'No name available for layer scope because the layer "' + self._name + '" has not been used yet. The scope name ' + " is determined the first time the layer instance is " + "called. You must therefore call the layer before " + "querying `scope_name`." ) return self._scope.name def add_loss(self, losses, inputs=None): previous_losses_length = len(self._losses) previous_callable_losses_length = len(self._callable_losses) super().add_loss(losses, inputs=inputs) if not tf.executing_eagerly(): # TODO(fchollet): deprecate collection below. new_losses = self._losses[previous_losses_length:] new_callable_losses = self._callable_losses[ previous_callable_losses_length: ] for regularizer in new_callable_losses: loss_tensor = regularizer() if loss_tensor is not None: new_losses.append(loss_tensor) _add_elements_to_collection( new_losses, tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES ) def _name_scope(self): """Determines op naming for the Layer.""" if self._keras_style: return super()._name_scope() return self._current_scope.original_name_scope def _set_scope(self, scope=None): if self._scope is None: # If constructed with _scope=None, lazy setting of scope. if self._reuse: with tf.compat.v1.variable_scope( scope if scope is not None else self._base_name ) as captured_scope: self._scope = captured_scope else: with tf.compat.v1.variable_scope( scope, default_name=self._base_name ) as captured_scope: self._scope = captured_scope def add_weight( self, name, shape, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, use_resource=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE, partitioner=None, **kwargs ): """Adds a new variable to the layer, or gets an existing one; returns it Args: name: variable name. shape: variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: initializer instance (callable). regularizer: regularizer instance (callable). trainable: whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean, stddev). Note, if the current variable scope is marked as non-trainable then this parameter is ignored and any added variables are also marked as non-trainable. `trainable` becomes `True` unless `synchronization` is set to `ON_READ`. Defaults to `True`. constraint: constraint instance (callable). use_resource: Whether to use `ResourceVariable`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. partitioner: (optional) partitioner instance (callable). If provided, when the requested variable is created it will be split into multiple partitions according to `partitioner`. In this case, an instance of `PartitionedVariable` is returned. Available partitioners include `tf.compat.v1.fixed_size_partitioner` and `tf.compat.v1.variable_axis_size_partitioner`. For more details, see the documentation of `tf.compat.v1.get_variable` and the "Variable Partitioners and Sharding" section of the API guide. **kwargs: Additional keyword arguments. Returns: The created variable. Usually either a `Variable` or `ResourceVariable` instance. If `partitioner` is not `None`, a `PartitionedVariable` instance is returned. Raises: RuntimeError: If called with partitioned variable regularization and eager execution is enabled. ValueError: When trainable has been set to True with synchronization set as `ON_READ`. """ for kwarg in kwargs: if kwarg != "experimental_autocast": raise TypeError("Unknown keyword argument:", kwarg) if self._keras_style: return super().add_weight( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable and self.trainable, constraint=constraint, use_resource=use_resource, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.compat.v1.VariableAggregation.NONE, partitioner=partitioner, **kwargs ) if synchronization == tf.VariableSynchronization.ON_READ: if trainable: raise ValueError( "Synchronization value can be set to " "VariableSynchronization.ON_READ only for non-trainable " "variables. You have specified trainable=True and " "synchronization=VariableSynchronization.ON_READ." ) else: # Set trainable to be false when variable is to be synced on # read. trainable = False elif trainable is None: trainable = True def _should_add_regularizer(variable, existing_variable_set): if base_layer_utils.is_split_variable(variable): for var in variable: if var in existing_variable_set: return False return True else: return variable not in existing_variable_set init_graph = None if not tf.executing_eagerly(): default_graph = tf.compat.v1.get_default_graph() if default_graph.building_function: with tf.init_scope(): # Retrieve the variables from the graph into which variables # will be lifted; if initialization ops will be lifted into # the eager context, then there is nothing to retrieve, # since variable collections are not supported when eager # execution is enabled. if not tf.executing_eagerly(): init_graph = tf.compat.v1.get_default_graph() existing_variables = set( tf.compat.v1.global_variables() ) else: # Initialization ops will not be lifted out of the default # graph. init_graph = default_graph existing_variables = set(tf.compat.v1.global_variables()) if dtype is None: dtype = self.dtype or tf.float32 self._set_scope(None) reuse = self.built or self._reuse prev_len_trainable = len(self._trainable_weights) with tf.compat.v1.variable_scope( self._scope, reuse=reuse, auxiliary_name_scope=False ) as scope: self._current_scope = scope with backend.name_scope(self._name_scope()): use_resource = ( use_resource or self._use_resource_variables or scope.use_resource ) if initializer is None: initializer = scope.initializer variable = super().add_weight( name, shape, dtype=tf.as_dtype(dtype), initializer=initializer, trainable=trainable and self.trainable, constraint=constraint, partitioner=partitioner, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, getter=tf.compat.v1.get_variable, **kwargs ) if regularizer: if ( tf.compat.v1.executing_eagerly_outside_functions() or _should_add_regularizer(variable, existing_variables) ): self._handle_weight_regularization( name, variable, regularizer ) var_store = vs._get_default_variable_store() # When the shim to get variable scope working in TF2 is # used, We need to explicitly make the shim track the # regularization losses as the collections will not be # accessible. if hasattr(var_store, "add_regularizer"): var_store.add_regularizer(variable, regularizer) if init_graph is not None: # Handle edge case where a custom getter has overridden # `trainable`. There is one known occurrence of this, in # unit test testBasicRNNCellNotTrainable in # contrib.rnn.python.kernel_tests.core_rnn_cell_test with init_graph.as_default(): trainable_variables = tf.compat.v1.trainable_variables() if ( trainable and self.trainable and variable not in trainable_variables ): # A custom getter / variable scope overrode the # trainable flag. extra_trainable_vars = self._trainable_weights[ prev_len_trainable: ] self._trainable_weights = self._trainable_weights[ :prev_len_trainable ] self._non_trainable_weights += extra_trainable_vars return variable def __call__(self, inputs, *args, **kwargs): """Wraps `call`, applying pre- and post-processing steps. Args: inputs: input tensor(s). *args: additional positional arguments to be passed to `self.call`. **kwargs: additional keyword arguments to be passed to `self.call`. **Note**: kwarg `scope` is reserved for use by the layer. Returns: Output tensor(s). Note: - If the layer's `call` method takes a `scope` keyword argument, this argument will be automatically set to the current variable scope. - If the layer's `call` method takes a `mask` argument (as some Keras layers do), its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a TF-Keras layer with masking support. Raises: ValueError: if the layer's `call` method returns None (an invalid value). """ scope = kwargs.pop("scope", None) if self._keras_style: if scope is not None: raise ValueError( "scope argument not allowed when keras style layers are " "enabled, but saw: {}".format(scope) ) return super().__call__(inputs, *args, **kwargs) self._set_scope(scope) if self.built: try: # Some classes which inherit from Layer do not use its # constructor, so rather than initializing to None we check for # an AttributeError. scope_context_manager = self._always_reuse_variable_scope except AttributeError: scope_context_manager = None if scope_context_manager is None: # From this point we will always set reuse=True, so create a # "final" variable scope with this setting. We avoid re-creating # variable scopes after this point as an optimization. scope_context_manager = tf.compat.v1.variable_scope( self._scope, reuse=True, auxiliary_name_scope=False ) # Do not cache variable scopes if Eager mode is enabled. If # Eager mode is enabled then we don't want to reuse scopes # because the cached scope might be from a FuncGraph or Eager # scope we are no longer in. if not tf.compat.v1.executing_eagerly_outside_functions(): self._always_reuse_variable_scope = scope_context_manager else: scope_context_manager = tf.compat.v1.variable_scope( self._scope, reuse=self._reuse, auxiliary_name_scope=False ) with scope_context_manager as scope: self._current_scope = scope try: call_has_scope_arg = self._call_has_scope_arg except AttributeError: self._call_spec.arg_names = variable_scope_shim.fn_args( self.call ) self._call_has_scope_arg = "scope" in self._call_spec.arg_names call_has_scope_arg = self._call_has_scope_arg if call_has_scope_arg: kwargs["scope"] = scope # Actually call layer outputs = super().__call__(inputs, *args, **kwargs) if not tf.executing_eagerly(): # Update global default collections. _add_elements_to_collection( self.updates, tf.compat.v1.GraphKeys.UPDATE_OPS ) return outputs def __deepcopy__(self, memo): no_copy = set(["_graph", "_thread_local", "_metrics_lock"]) shallow_copy = set(["_scope", "_always_reuse_variable_scope"]) cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if k in no_copy: setattr(result, k, v) elif k in shallow_copy: setattr(result, k, copy.copy(v)) elif base_layer.is_tensor_or_tensor_list(v): setattr(result, k, v) else: setattr(result, k, copy.deepcopy(v, memo)) return result def __setattr__(self, value, name): # By-pass the automatic dependency tracking performed by the parent # Layer. super(tf.__internal__.tracking.Trackable, self).__setattr__(value, name) @property def _is_legacy_layer(self): """Used by keras to check compatibility. This should not be overridden.""" return True def _add_elements_to_collection(elements, collection_list): if tf.executing_eagerly(): raise RuntimeError( "Using collections from Layers not supported in Eager " "mode. Tried to add %s to %s" % (elements, collection_list) ) elements = tf.nest.flatten(elements) collection_list = tf.nest.flatten(collection_list) for name in collection_list: collection = tf.compat.v1.get_collection_ref(name) collection_set = {id(e) for e in collection} for element in elements: if id(element) not in collection_set: collection.append(element)
tf-keras/tf_keras/legacy_tf_layers/base.py/0
{ "file_path": "tf-keras/tf_keras/legacy_tf_layers/base.py", "repo_id": "tf-keras", "token_count": 11943 }
182
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Description: # Contains the TF-Keras metrics submodule. # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test") load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") # buildifier: disable=same-origin-load package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//tf_keras:friends", "//third_party/tensorflow/python/feature_column:__subpackages__", "//third_party/tensorflow/python/tpu:__subpackages__", "//third_party/tensorflow_estimator:__subpackages__", ], licenses = ["notice"], ) py_library( name = "metrics", srcs = [ "__init__.py", "accuracy_metrics.py", "base_metric.py", "confusion_metrics.py", "f_score_metrics.py", "hinge_metrics.py", "iou_metrics.py", "probabilistic_metrics.py", "py_metric.py", "regression_metrics.py", ], srcs_version = "PY3", deps = [ "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras:activations", "//tf_keras:backend", "//tf_keras:losses", "//tf_keras/distribute", "//tf_keras/dtensor", "//tf_keras/dtensor:utils", "//tf_keras/engine:base_layer", "//tf_keras/engine:base_layer_utils", "//tf_keras/utils:generic_utils", "//tf_keras/utils:metrics_utils", "//tf_keras/utils:tf_utils", ], ) tf_py_test( name = "metrics_functional_test", size = "small", srcs = ["metrics_functional_test.py"], python_version = "PY3", deps = [ "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "accuracy_metrics_test", size = "medium", srcs = ["accuracy_metrics_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/layers", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], ) tf_py_test( name = "confusion_metrics_test", size = "medium", srcs = ["confusion_metrics_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_scipy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/layers", "//tf_keras/models", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", "//tf_keras/utils:metrics_utils", ], ) tf_py_test( name = "f_score_metrics_test", size = "medium", srcs = ["f_score_metrics_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], ) tf_py_test( name = "hinge_metrics_test", size = "medium", srcs = ["hinge_metrics_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/layers", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], ) tf_py_test( name = "iou_metrics_test", size = "medium", srcs = ["iou_metrics_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/layers", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], ) tf_py_test( name = "probabilistic_metrics_test", size = "medium", srcs = ["probabilistic_metrics_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/layers", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], ) tf_py_test( name = "regression_metrics_test", size = "medium", srcs = ["regression_metrics_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "base_metric_test", size = "medium", srcs = ["base_metric_test.py"], python_version = "PY3", shard_count = 4, deps = [ ":metrics", "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/layers", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], ) tf_py_test( name = "metrics_correctness_test", size = "medium", srcs = ["metrics_correctness_test.py"], python_version = "PY3", shard_count = 4, deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", ], ) cuda_py_test( name = "py_metric_test", size = "medium", srcs = ["py_metric_test.py"], shard_count = 2, tags = [ "no_windows", ], deps = [ ":metrics", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/layers", "//tf_keras/testing_infra:test_combinations", "//tf_keras/testing_infra:test_utils", ], )
tf-keras/tf_keras/metrics/BUILD/0
{ "file_path": "tf-keras/tf_keras/metrics/BUILD", "repo_id": "tf-keras", "token_count": 3305 }
183
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Probabilistic metrics (based on Entropy).""" from typing import Optional from typing import Union import tensorflow.compat.v2 as tf from tf_keras.dtensor import utils as dtensor_utils from tf_keras.losses import binary_crossentropy from tf_keras.losses import categorical_crossentropy from tf_keras.losses import kullback_leibler_divergence from tf_keras.losses import poisson from tf_keras.losses import sparse_categorical_crossentropy from tf_keras.metrics import base_metric # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.metrics.Poisson") class Poisson(base_metric.MeanMetricWrapper): """Computes the Poisson score between `y_true` and `y_pred`. 🐟 🐟 🐟 It is defined as: `poisson_score = y_pred - y_true * log(y_pred)`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.49999997 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.99999994 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=[tf.keras.metrics.Poisson()]) ``` """ @dtensor_utils.inject_mesh def __init__(self, name="poisson", dtype=None): super().__init__(poisson, name, dtype=dtype) @keras_export("keras.metrics.KLDivergence") class KLDivergence(base_metric.MeanMetricWrapper): """Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`. `metric = y_true * log(y_true / y_pred)` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.KLDivergence() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 0.45814306 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.9162892 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=[tf.keras.metrics.KLDivergence()]) ``` """ @dtensor_utils.inject_mesh def __init__(self, name="kullback_leibler_divergence", dtype=None): super().__init__(kullback_leibler_divergence, name, dtype=dtype) @keras_export("keras.metrics.BinaryCrossentropy") class BinaryCrossentropy(base_metric.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are only two label classes (0 and 1). Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for label `0` and `0.9` for label `1`". Standalone usage: >>> m = tf.keras.metrics.BinaryCrossentropy() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 0.81492424 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.9162905 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=[tf.keras.metrics.BinaryCrossentropy()]) ``` """ @dtensor_utils.inject_mesh def __init__( self, name="binary_crossentropy", dtype=None, from_logits=False, label_smoothing=0, ): super().__init__( binary_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing, ) @keras_export("keras.metrics.CategoricalCrossentropy") class CategoricalCrossentropy(base_metric.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are multiple label classes (2 or more). Here we assume that labels are given as a `one_hot` representation. eg., When labels values are [2, 0, 1], `y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for label `0` and `0.9` for label `1`" axis: (Optional) -1 is the dimension along which entropy is computed. Defaults to `-1`. Standalone usage: >>> # EPSILON = 1e-7, y = y_true, y` = y_pred >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) >>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(y'), axis = -1) >>> # = -((log 0.95), (log 0.1)) >>> # = [0.051, 2.302] >>> # Reduced xent = (0.051 + 2.302) / 2 >>> m = tf.keras.metrics.CategoricalCrossentropy() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result().numpy() 1.1769392 >>> m.reset_state() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> m.result().numpy() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='categorical_crossentropy', metrics=[tf.keras.metrics.CategoricalCrossentropy()]) ``` """ @dtensor_utils.inject_mesh def __init__( self, name="categorical_crossentropy", dtype=None, from_logits=False, label_smoothing=0, axis=-1, ): super().__init__( categorical_crossentropy, name, dtype=dtype, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) @keras_export("keras.metrics.SparseCategoricalCrossentropy") class SparseCategoricalCrossentropy(base_metric.MeanMetricWrapper): """Computes the crossentropy metric between the labels and predictions. Use this crossentropy metric when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` metric. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. ignore_class: Optional integer. The ID of a class to be ignored during metric computation. This is useful, for example, in segmentation problems featuring a "void" class (commonly -1 or 255) in segmentation maps. By default (`ignore_class=None`), all classes are considered. axis: (Optional) The dimension along which entropy is computed. Defaults to `-1`. Standalone usage: >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] >>> # logits = log(y_pred) >>> # softmax = exp(logits) / sum(exp(logits), axis=-1) >>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(softmax), 1) >>> # log(softmax) = [[-2.9957, -0.0513, -16.1181], >>> # [-2.3026, -0.2231, -2.3026]] >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] >>> # xent = [0.0513, 2.3026] >>> # Reduced xent = (0.0513 + 2.3026) / 2 >>> m = tf.keras.metrics.SparseCategoricalCrossentropy() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result().numpy() 1.1769392 >>> m.reset_state() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> m.result().numpy() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()]) ``` """ @dtensor_utils.inject_mesh def __init__( self, name: str = "sparse_categorical_crossentropy", dtype: Optional[Union[str, tf.dtypes.DType]] = None, from_logits: bool = False, ignore_class: Optional[int] = None, axis: int = -1, ): super().__init__( sparse_categorical_crossentropy, name, dtype=dtype, from_logits=from_logits, ignore_class=ignore_class, axis=axis, ) _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING = """Accumulates metric statistics. For sparse categorical metrics, the shapes of `y_true` and `y_pred` are different. Args: y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or shape = `[batch_size, d0, .. dN-1, 1]`. y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`. sample_weight: Optional `sample_weight` acts as a coefficient for the metric. If a scalar is provided, then the metric is simply scaled by the given value. If `sample_weight` is a tensor of size `[batch_size]`, then the metric for each sample of the batch is rescaled by the corresponding element in the `sample_weight` vector. If the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to this shape), then each metric element of `y_pred` is scaled by the corresponding value of `sample_weight`. (Note on `dN-1`: all metric functions reduce by 1 dimension, usually the last axis (-1)). Returns: Update op. """ SparseCategoricalCrossentropy.update_state.__doc__ = ( _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING )
tf-keras/tf_keras/metrics/probabilistic_metrics.py/0
{ "file_path": "tf-keras/tf_keras/metrics/probabilistic_metrics.py", "repo_id": "tf-keras", "token_count": 5081 }
184
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests TF-Keras integration with enable_mixed_precision_graph_rewrite().""" import os import tensorflow.compat.v2 as tf from tf_keras.mixed_precision import ( loss_scale_optimizer as loss_scale_optimizer_v2, ) from tf_keras.mixed_precision import policy from tf_keras.optimizers.legacy import gradient_descent as gradient_descent_v2 from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils class MixedPrecisionTest(test_combinations.TestCase): IGNORE_PERF_VAR = "TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE" def setUp(self): super().setUp() # Enable the tests to be run on pre-Volta GPUs by telling the grappler # pass to ignore performance and always transform the graph. self._original_ignore_perf_value = os.getenv(self.IGNORE_PERF_VAR) os.environ[self.IGNORE_PERF_VAR] = "1" def tearDown(self): # Set the IGNORE_PERF_VAR variable back to its original value. if self._original_ignore_perf_value is not None: os.environ[self.IGNORE_PERF_VAR] = self._original_ignore_perf_value else: del os.environ[self.IGNORE_PERF_VAR] tf.compat.v1.mixed_precision.disable_mixed_precision_graph_rewrite() super().tearDown() @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_wrap_optimizer_fixed_loss_scale(self): opt = gradient_descent_v2.SGD(1.0) opt = tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( opt, 123 ) self.assertIsInstance(opt, loss_scale_optimizer_v2.LossScaleOptimizer) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual(self.evaluate(opt.loss_scale), 123.0) self.assertFalse(opt.dynamic) self.assertTrue(opt.initial_scale, 123.0) opt = gradient_descent_v2.SGD(1.0) opt = tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( opt, tf.compat.v1.mixed_precision.FixedLossScale(123) ) self.assertIsInstance(opt, loss_scale_optimizer_v2.LossScaleOptimizer) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual(self.evaluate(opt.loss_scale), 123.0) self.assertFalse(opt.dynamic) self.assertTrue(opt.initial_scale, 123.0) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_wrap_optimizer_dynamic_loss_scale(self): opt = gradient_descent_v2.SGD(1.0) opt = tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( opt, "dynamic" ) self.assertIsInstance(opt, loss_scale_optimizer_v2.LossScaleOptimizer) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual(self.evaluate(opt.loss_scale), 2.0**15) self.assertTrue(opt.dynamic) self.assertTrue(opt.initial_scale, 2.0**15) self.assertTrue(opt.dynamic_growth_steps, 2000) opt = gradient_descent_v2.SGD(1.0) opt = tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( opt, tf.compat.v1.mixed_precision.DynamicLossScale( initial_loss_scale=4, increment_period=1000 ), ) self.assertIsInstance(opt, loss_scale_optimizer_v2.LossScaleOptimizer) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual(self.evaluate(opt.loss_scale), 4.0) self.assertTrue(opt.dynamic) self.assertTrue(opt.initial_scale, 4.0) self.assertTrue(opt.dynamic_growth_steps, 1000) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_wrap_optimizer_dynamic_loss_scale_errors(self): opt = gradient_descent_v2.SGD(1.0) with self.assertRaisesRegex( ValueError, 'When passing a DynamicLossScale to "loss_scale", ' "DynamicLossScale.multiplier must be 2. Got: " "DynamicLossScale", ): tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( opt, tf.compat.v1.mixed_precision.DynamicLossScale(multiplier=4.0), ) class MyLossScale(tf.compat.v1.mixed_precision.LossScale): def __call__(self): return 1.0 def update(self, grads): return None, True def get_config(self): return {} with self.assertRaisesRegex( TypeError, "Passing a LossScale that is not a FixedLossScale or a " "DynamicLossScale is not supported. Got:", ): tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( opt, MyLossScale() ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_optimizer_errors(self): opt = gradient_descent_v2.SGD(1.0) opt = loss_scale_optimizer_v2.LossScaleOptimizer(opt) with self.assertRaisesRegex( ValueError, '"opt" must not already be an instance of a LossScaleOptimizer.', ): tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( opt ) self.assertFalse( tf.config.optimizer.get_experimental_options().get( "auto_mixed_precision", False ) ) @test_utils.enable_v2_dtype_behavior def test_error_if_policy_is_set(self): with policy.policy_scope("mixed_float16"): with self.assertRaisesRegex( ValueError, "the global Keras dtype Policy has been set" ): tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( # noqa: E501 gradient_descent_v2.SGD(1.0) ) # Test no error is thrown when the policy is currently the default. tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( gradient_descent_v2.SGD(1.0) ) # Test no error is thrown when the policy is a non-mixed policy. with policy.policy_scope("float64"): tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( gradient_descent_v2.SGD(1.0) ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/mixed_precision/mixed_precision_graph_rewrite_test.py/0
{ "file_path": "tf-keras/tf_keras/mixed_precision/mixed_precision_graph_rewrite_test.py", "repo_id": "tf-keras", "token_count": 3273 }
185
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Code for model cloning, plus model-related API entries.""" import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras import metrics as metrics_module from tf_keras.engine import functional from tf_keras.engine import sequential from tf_keras.engine import training from tf_keras.engine import training_v1 from tf_keras.engine.base_layer import AddMetric from tf_keras.engine.base_layer import Layer from tf_keras.engine.input_layer import Input from tf_keras.engine.input_layer import InputLayer from tf_keras.optimizers import optimizer_v1 from tf_keras.saving.legacy import serialization from tf_keras.saving.legacy.saved_model.utils import keras_option_scope from tf_keras.saving.object_registration import CustomObjectScope from tf_keras.utils import generic_utils from tf_keras.utils import version_utils # isort: off from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export # API entries importable from `keras.models`: Model = training.Model Sequential = sequential.Sequential # Callable used to clone a layer with weights preserved. def share_weights(layer): return layer def _clone_layer(layer): return layer.__class__.from_config(layer.get_config()) def _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes): """Inserts ancillary layers into the model with the proper order.""" # Sort `AddMetric` layers so they agree with metrics_names. metric_layers = [ layer for layer in ancillary_layers if isinstance(layer, AddMetric) ] metric_layers.sort(key=lambda layer: metrics_names.index(layer.metric_name)) ancillary_layers = [ layer for layer in ancillary_layers if not isinstance(layer, AddMetric) ] + metric_layers model._insert_layers(ancillary_layers, relevant_nodes=list(new_nodes)) def _make_new_nodes(nodes_by_depth, layer_fn, layer_map, tensor_map): """Make new nodes with the layers in `layer_map` based on `nodes_by_depth`. Args: nodes_by_depth: Provides structure information to create new nodes. layer_fn: Function to clone layers. layer_map: Map from layers in `model` to new layers. tensor_map: Map from tensors in `model` to newly compute tensors. Returns: A set of new nodes. `layer_map` and `tensor_map` are updated. """ # Iterated over every node in the reference model, in depth order. new_nodes = set() depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) for depth in depth_keys: nodes = nodes_by_depth[depth] for node in nodes: # Recover the corresponding layer. layer = node.outbound_layer # Get or create layer. if layer not in layer_map: new_layer = layer_fn(layer) layer_map[layer] = new_layer layer = new_layer else: # Reuse previously cloned layer. layer = layer_map[layer] # Don't call InputLayer multiple times. if isinstance(layer, InputLayer): continue # If all previous input tensors are available in tensor_map, # then call node.inbound_layer on them. if all( tensor in tensor_map for tensor in tf.nest.flatten(node.input_tensors) ): # Call layer. args = tf.nest.map_structure( lambda t: tensor_map.get(t, t), node.call_args ) kwargs = tf.nest.map_structure( lambda t: tensor_map.get(t, t), node.call_kwargs ) output_tensors = layer(*args, **kwargs) # Thread-safe way to keep track of what node was created. first_output_tensor = tf.nest.flatten(output_tensors)[0] new_nodes.add( layer._inbound_nodes[ first_output_tensor._keras_history.node_index ] ) for x, y in zip( tf.nest.flatten(node.output_tensors), tf.nest.flatten(output_tensors), ): tensor_map[x] = y return new_nodes def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer): """Clone a functional `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Input layers are always cloned. Args: model: Instance of `Model`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. layer_fn: callable to be applied on non-input layers in the model. By default it clones the layer. Another example is to preserve the layer to share the weights. This is required when we create a per-replica copy of the model with distribution strategy; we want the weights to be shared but still feed inputs separately so we create new input layers. Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. Raises: ValueError: in case of invalid `model` argument value or `layer_fn` argument value. """ if layer_fn is None: layer_fn = _clone_layer if not isinstance(model, Model): raise ValueError( "Expected `model` argument " f"to be a `Model` instance. Received: model={model}" ) if isinstance(model, Sequential): raise ValueError( "Expected `model` argument " "to be a functional `Model` instance, " f"got a `Sequential` instance instead: {model}" ) if not model._is_graph_network: raise ValueError( "Expected `model` argument " "to be a functional `Model` instance, " f"but got a subclassed model instead: {model}" ) new_input_layers = {} # Cache for created layers. if input_tensors is not None: # Make sure that all input tensors come from a TF-Keras layer. input_tensors = tf.nest.flatten(input_tensors) for i, input_tensor in enumerate(input_tensors): original_input_layer = model._input_layers[i] # Cache input layer. Create a new layer if the tensor is originally # not from a TF-Keras layer. if not backend.is_keras_tensor(input_tensor): name = original_input_layer.name input_tensor = Input( tensor=input_tensor, name="input_wrapper_for_" + name ) newly_created_input_layer = input_tensor._keras_history.layer new_input_layers[ original_input_layer ] = newly_created_input_layer else: new_input_layers[ original_input_layer ] = input_tensor._keras_history.layer if not callable(layer_fn): raise ValueError( "Expected `layer_fn` argument to be a callable. " f"Received: layer_fn={layer_fn}" ) # For affected g3 users who need to default to old serialization in cloning if getattr(model, "use_legacy_config", False): with keras_option_scope( save_traces=False, in_tf_saved_model_scope=True ): model_configs, created_layers = _clone_layers_and_model_config( model, new_input_layers, layer_fn ) else: model_configs, created_layers = _clone_layers_and_model_config( model, new_input_layers, layer_fn ) # Reconstruct model from the config, using the cloned layers. ( input_tensors, output_tensors, created_layers, ) = functional.reconstruct_from_config( model_configs, created_layers=created_layers ) metrics_names = model.metrics_names if functional.has_functional_like_constructor(model.__class__): new_model = model.__class__( input_tensors, output_tensors, name=model.name ) else: # This may be incorrect: the new model will end up having a different # class than the original. However various existing models rely # on this behavior, so we keep it. new_model = Model(input_tensors, output_tensors, name=model.name) # Layers not directly tied to outputs of the Model, such as loss layers # created in `add_loss` and `add_metric`. ancillary_layers = [ layer for layer in created_layers.values() if layer not in new_model.layers ] # TODO(b/162887610): This may need to adjust the inbound node index if the # created layers had already been used to define other models. if ancillary_layers: new_nodes = tf.nest.flatten( [ layer.inbound_nodes[1:] if functional._should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values() ] ) _insert_ancillary_layers( new_model, ancillary_layers, metrics_names, new_nodes ) return new_model def _clone_layers_and_model_config(model, input_layers, layer_fn): """Clones all layers; returns the model config without serializing layers. This function ensures that only the node graph is retrieved when getting the model config. The `layer_fn` used to clone layers might not rely on `layer.get_config()`, so some custom layers do not define `get_config`. Trying to retrieve the config results in errors. Args: model: A Functional model. input_layers: Dictionary mapping input layers in `model` to new input layers. layer_fn: Function used to clone all non-input layers. Returns: Model config object, and a dictionary of newly created layers. """ created_layers = {} def _copy_layer(layer): # Whenever the network config attempts to get the layer serialization, # return a dummy dictionary. if layer in input_layers: created_layers[layer.name] = input_layers[layer] elif layer in model._input_layers: created_layers[layer.name] = InputLayer(**layer.get_config()) else: created_layers[layer.name] = layer_fn(layer) return {} config = functional.get_network_config( model, serialize_layer_fn=_copy_layer ) return config, created_layers def _remove_ancillary_layers(model, layer_map, layers): """Removes and returns any ancillary layers from `layers` based on `model`. Ancillary layers are part of the model topology but not used to compute the model outputs, e.g., layers from `add_loss` and `add_metric`. Args: model: A TF-Keras Model. layer_map: A map to from layers in the `model` to those in `layers`. layers: A list of all layers. Returns: Two lists of layers: (1) `layers` with the ancillary layers removed, and (2) the ancillary layers. """ ancillary_layers = [] # Additional layers for computing losses and metrics. if not model._is_graph_network: return layers, ancillary_layers # Ancillary layers are those with depth < 0. depths = [depth for depth in model._nodes_by_depth.keys() if depth < 0] depths.sort(reverse=True) # Order topologically from inputs to outputs. for depth in depths: for node in model._nodes_by_depth[depth]: ancillary_layers.append(layer_map[node.outbound_layer]) return [l for l in layers if l not in ancillary_layers], ancillary_layers def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer): """Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Args: model: Instance of `Sequential`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. layer_fn: callable to be applied on non-input layers in the model. By default it clones the layer. Another example is to preserve the layer to share the weights. This is required when we create a per-replica copy of the model with distribution strategy; we want the weights to be shared but still feed inputs separately so we create new input layers. Returns: An instance of `Sequential` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. Raises: ValueError: in case of invalid `model` argument value or `layer_fn` argument value. """ if layer_fn is None: layer_fn = _clone_layer if not isinstance(model, Sequential): raise ValueError( "Expected `model` argument " "to be a `Sequential` model instance. " f"Received: model={model}" ) if not callable(layer_fn): raise ValueError( "Expected `layer_fn` argument to be a callable. " f"Received: layer_fn={layer_fn}" ) layers = [] # Layers needed to compute the model's outputs. layer_map = {} # Ensure that all layers are cloned. The model's layers # property will exclude the initial InputLayer (if it exists) in the model, # resulting in a different Sequential model structure. for layer in model._flatten_layers(include_self=False, recursive=False): if isinstance(layer, InputLayer) and input_tensors is not None: # If input tensors are provided, the original model's InputLayer is # overwritten with a different InputLayer. continue cloned_layer = ( _clone_layer(layer) if isinstance(layer, InputLayer) else layer_fn(layer) ) layers.append(cloned_layer) layer_map[layer] = cloned_layer layers, ancillary_layers = _remove_ancillary_layers( model, layer_map, layers ) if input_tensors is None: cloned_model = Sequential(layers=layers, name=model.name) elif len(generic_utils.to_list(input_tensors)) != 1: raise ValueError( "To clone a `Sequential` model, we expect at most one tensor as " f"part of `input_tensors`. Received: input_tensors={input_tensors}" ) else: # Overwrite the original model's input layer. if isinstance(input_tensors, tuple): input_tensors = list(input_tensors) x = generic_utils.to_list(input_tensors)[0] if backend.is_keras_tensor(x): origin_layer = x._keras_history.layer if isinstance(origin_layer, InputLayer): cloned_model = Sequential( layers=[origin_layer] + layers, name=model.name ) else: raise ValueError( "Cannot clone a `Sequential` model on top " "of a tensor that comes from a TF-Keras layer " "other than an `InputLayer`. " "Use the Functional API instead. " f"Received: input_tensors={input_tensors}" ) else: input_tensor = Input( tensor=x, name="input_wrapper_for_" + str(x.name) ) input_layer = input_tensor._keras_history.layer cloned_model = Sequential( layers=[input_layer] + layers, name=model.name ) if not ancillary_layers: return cloned_model tensor_map = {} # Maps tensors from `model` to those in `cloned_model`. for depth, cloned_nodes in cloned_model._nodes_by_depth.items(): nodes = model._nodes_by_depth[depth] # This should be safe in a Sequential model. In an arbitrary network, # you need to sort using the outbound layer of the node as a key. for cloned_node, node in zip(cloned_nodes, nodes): if isinstance(cloned_node.output_tensors, list): for j, output_tensor in enumerate(cloned_node.output_tensors): tensor_map[node.output_tensors[j]] = output_tensor else: tensor_map[node.output_tensors] = cloned_node.output_tensors # Ancillary nodes have negative depth. new_nodes = _make_new_nodes( { depth: nodes for depth, nodes in model._nodes_by_depth.items() if depth < 0 }, layer_fn, layer_map, tensor_map, ) _insert_ancillary_layers( cloned_model, ancillary_layers, model.metrics_names, new_nodes ) return cloned_model @keras_export("keras.models.clone_model") def clone_model(model, input_tensors=None, clone_function=None): """Clone a Functional or Sequential `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Note that `clone_model` will not preserve the uniqueness of shared objects within the model (e.g. a single variable attached to two distinct layers will be restored as two separate variables). Args: model: Instance of `Model` (could be a Functional model or a Sequential model). input_tensors: optional list of input tensors or InputLayer objects to build the model upon. If not provided, new `Input` objects will be created. clone_function: Callable to be used to clone each layer in the target model (except `InputLayer` instances). It takes as argument the layer instance to be cloned, and returns the corresponding layer instance to be used in the model copy. If unspecified, this callable becomes the following serialization/deserialization function: `lambda layer: layer.__class__.from_config(layer.get_config())`. By passing a custom callable, you can customize your copy of the model, e.g. by wrapping certain layers of interest (you might want to replace all `LSTM` instances with equivalent `Bidirectional(LSTM(...))` instances, for example). Defaults to `None`. Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. The cloned model may behave differently from the original model if a custom `clone_function` modifies the layer. Example: ```python # Create a test Sequential model. model = keras.Sequential([ keras.Input(shape=(728,)), keras.layers.Dense(32, activation='relu'), keras.layers.Dense(1, activation='sigmoid'), ]) # Create a copy of the test model (with freshly initialized weights). new_model = clone_model(model) ``` Note that subclassed models cannot be cloned, since their internal layer structure is not known. To achieve equivalent functionality as `clone_model` in the case of a subclassed model, simply make sure that the model class implements `get_config()` (and optionally `from_config()`), and call: ```python new_model = model.__class__.from_config(model.get_config()) ``` """ with serialization.DisableSharedObjectScope(): if isinstance(model, Sequential): return _clone_sequential_model( model, input_tensors=input_tensors, layer_fn=clone_function ) if isinstance(model, functional.Functional): # If the get_config() method is the same as a regular Functional # model, we're safe to use _clone_functional_model (which relies # on a Functional constructor). In the case where the get_config # is custom, this may not necessarily work, but if clone_function # or input_tensors are passed, we attempt it anyway # in order to preserve backwards compatibility. if generic_utils.is_default(model.get_config) or ( clone_function or input_tensors ): return _clone_functional_model( model, input_tensors=input_tensors, layer_fn=clone_function ) # Case of a custom model class if clone_function or input_tensors: raise ValueError( "Arguments clone_function and input_tensors " "are only supported for Sequential models " "or Functional models. Received model of " f"type '{model.__class__.__name__}', with " f"clone_function={clone_function} and " f"input_tensors={input_tensors}" ) # Note that a custom object scope may be required in this case. return model.__class__.from_config(model.get_config()) # "Clone" a subclassed model by resetting all of the attributes. def _in_place_subclassed_model_reset(model): """Substitute for model cloning that works for subclassed models. Subclassed models cannot be cloned because their topology is not serializable. To "instantiate" an identical model in a new TF graph, we reuse the original model object, but we clear its state. After calling this function on a model instance, you can use the model instance as if it were a model clone (in particular you can use it in a new graph). This method clears the state of the input model. It is thus destructive. However the original state can be restored fully by calling `_in_place_subclassed_model_state_restoration`. Args: model: Instance of a TF-Keras model created via subclassing. Raises: ValueError: In case the model uses a subclassed model as inner layer. """ assert ( not model._is_graph_network ) # Only makes sense for subclassed networks # Select correct base class for new Model. version_utils.swap_class( model.__class__, training.Model, training_v1.Model, tf.compat.v1.executing_eagerly_outside_functions(), ) # Retrieve all layers tracked by the model as well as their attribute names attributes_cache = {} for name in dir(model): # Skip attrs that track other trackables. if name == "submodules" or name == "_self_tracked_trackables": continue try: value = getattr(model, name) except (AttributeError, ValueError, TypeError): continue if isinstance(value, Layer): attributes_cache[name] = value assert value in model.layers if hasattr(value, "layers") and value.layers: raise ValueError( "We do not support the use of nested layers " "in `model_to_estimator` at this time. Found nested " f"layer: {value}" ) elif isinstance(value, (list, tuple)) and name not in ( "layers", "_layers", "metrics", "_compile_metric_functions", "_output_loss_metrics", ): # Handle case: list/tuple of layers (also tracked by the Network # API). if value and all(isinstance(val, Layer) for val in value): raise ValueError( "We do not support the use of list-of-layers " "attributes in subclassed models used with " "`model_to_estimator` at this time. Found list " f"model: {name}" ) # Replace layers on the model with fresh layers layers_to_names = {value: key for key, value in attributes_cache.items()} original_layers = list( model._flatten_layers(include_self=False, recursive=False) ) setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for layer in original_layers: # We preserve layer order. config = layer.get_config() # This will not work for nested subclassed models used as layers. # This would be theoretically possible to support, but would add # complexity. Only do it if users complain. if isinstance(layer, training.Model) and not layer._is_graph_network: raise ValueError( "We do not support the use of nested subclassed models " "in `model_to_estimator` at this time. Found nested " f"model: {layer}" ) fresh_layer = layer.__class__.from_config(config) name = layers_to_names[layer] setattr(model, name, fresh_layer) model._self_tracked_trackables.append(fresh_layer) # Cache original model build attributes (in addition to layers) if ( not hasattr(model, "_original_attributes_cache") or model._original_attributes_cache is None ): if model.built: attributes_to_cache = [ "inputs", "outputs", "total_loss", "optimizer", "train_function", "test_function", "predict_function", "_training_endpoints", "_collected_trainable_weights", "_feed_inputs", "_feed_input_names", "_feed_input_shapes", ] for name in attributes_to_cache: attributes_cache[name] = getattr(model, name) model._original_attributes_cache = attributes_cache _reset_build_compile_trackers(model) model._setattr_tracking = setattr_tracking def _reset_build_compile_trackers(model): """Reset state trackers for model. Note that we do not actually zero out attributes such as optimizer, but instead rely on the expectation that all of the attrs will be over-written on calling build/compile/etc. This is somewhat fragile, insofar as we check elsewhere for the presence of these attributes as evidence of having been built/compiled/etc. Pending a better way to do this, we reset key attributes here to allow building and compiling. Args: model: the model that is being reset """ # Reset build state model.built = False model.inputs = None model.outputs = None # Reset compile state model._is_compiled = False if not tf.compat.v1.executing_eagerly_outside_functions(): model._v1_compile_was_called = False model.optimizer = None @keras_export( "keras.__internal__.models.in_place_subclassed_model_state_restoration", v1=[], ) def in_place_subclassed_model_state_restoration(model): """Restores the original state of a model after it was "reset". This undoes this action of `_in_place_subclassed_model_reset`, which is called in `clone_and_build_model` if `in_place_reset` is set to True. Args: model: Instance of a TF-Keras model created via subclassing, on which `_in_place_subclassed_model_reset` was previously called. """ assert not model._is_graph_network # Restore layers and build attributes if ( hasattr(model, "_original_attributes_cache") and model._original_attributes_cache is not None ): # Models have sticky attribute assignment, so we want to be careful to # add back the previous attributes and track Layers by their original # names without adding dependencies on "utility" attributes which Models # exempt when they're constructed. setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: # Restore to the state of a never-called model. _reset_build_compile_trackers(model) @keras_export("keras.__internal__.models.clone_and_build_model", v1=[]) def clone_and_build_model( model, input_tensors=None, target_tensors=None, custom_objects=None, compile_clone=True, in_place_reset=False, optimizer_iterations=None, optimizer_config=None, ): """Clone a `Model` and build/compile it with the same settings used before. This function can be run in the same graph or in a separate graph from the model. When using a separate graph, `in_place_reset` must be `False`. Note that, currently, the clone produced from this function may not work with TPU DistributionStrategy. Try at your own risk. Args: model: `tf.keras.Model` object. Can be Functional, Sequential, or sub-classed. input_tensors: Optional list or dictionary of input tensors to build the model upon. If not provided, placeholders will be created. target_tensors: Optional list of target tensors for compiling the model. If not provided, placeholders will be created. custom_objects: Optional dictionary mapping string names to custom classes or functions. compile_clone: Boolean, whether to compile model clone (default `True`). in_place_reset: Boolean, whether to reset the model in place. Only used if the model is a subclassed model. In the case of a subclassed model, this argument must be set to `True` (default `False`). To restore the original model, use the function `in_place_subclassed_model_state_restoration(model)`. optimizer_iterations: An iterations variable that will be incremented by the optimizer if the clone is compiled. This argument is used when a TF-Keras model is cloned into an Estimator model function, because Estimators create their own global step variable. optimizer_config: Optimizer config dictionary or list of dictionary returned from `get_config()`. This argument should be defined if `clone_and_build_model` is called in a different graph or session from the original model, and the optimizer is an instance of `OptimizerV2`. Returns: Clone of the model. Raises: ValueError: Cloning fails in the following cases - cloning a subclassed model with `in_place_reset` set to False. - compiling the clone when the original model has not been compiled. """ # Grab optimizer now, as we reset-in-place for subclassed models, but # want to maintain access to the original optimizer. orig_optimizer = model.optimizer if compile_clone and not orig_optimizer: raise ValueError( "Error when cloning model: `compile_clone` was set to True, but " f"the original model has not been compiled. Received: model={model}" ) if compile_clone: compile_args = model._get_compile_args() # Allows this method to be robust to switching graph and eager classes. model._get_compile_args = lambda: compile_args with CustomObjectScope(custom_objects or {}): if model._is_graph_network: clone = clone_model(model, input_tensors=input_tensors) elif isinstance(model, Sequential): clone = clone_model(model, input_tensors=input_tensors) if ( not clone._is_graph_network and model._build_input_shape is not None ): if tf.compat.v1.executing_eagerly_outside_functions(): clone.build(model._build_input_shape) else: clone._set_inputs( backend.placeholder( model._build_input_shape, dtype=model.inputs[0].dtype, ) ) else: try: # Prefer cloning the model if serial/deserial logic is # implemented for subclassed model. clone = model.__class__.from_config(model.get_config()) except NotImplementedError: logging.warning( "This model is a subclassed model. Please implement " "`get_config` and `from_config` to better support " "cloning the model." ) if not in_place_reset: raise ValueError( f"This model ({model}) is a subclassed model. " "Such a model cannot be cloned, but there is a " "workaround where the model is reset in-place. " "To use this, please set the " "argument `in_place_reset` to `True`. This will reset " "the attributes in the original model. " "To restore the attributes, call " "`in_place_subclassed_model_state_restoration(model)`." ) clone = model _in_place_subclassed_model_reset(clone) if input_tensors is not None: if ( isinstance(input_tensors, (list, tuple)) and len(input_tensors) == 1 ): input_tensors = input_tensors[0] clone._set_inputs(input_tensors) if compile_clone: if isinstance(orig_optimizer, optimizer_v1.TFOptimizer): optimizer = optimizer_v1.TFOptimizer( orig_optimizer.optimizer, optimizer_iterations ) backend.track_tf_optimizer(optimizer) else: if not isinstance(orig_optimizer, (tuple, list)): orig_optimizer = [orig_optimizer] if optimizer_config is None: optimizer = [ opt.__class__.from_config(opt.get_config()) for opt in orig_optimizer ] elif isinstance(optimizer_config, dict): optimizer = [ orig_optimizer[0].__class__.from_config(optimizer_config) ] else: # optimizer config is list of dict, same order as # orig_optimizer. optimizer = [ opt.__class__.from_config(opt_config) for (opt, opt_config) in zip( orig_optimizer, optimizer_config ) ] if optimizer_iterations is not None: for opt in optimizer: opt.iterations = optimizer_iterations if len(optimizer) == 1: optimizer = optimizer[0] compile_args["optimizer"] = optimizer if target_tensors is not None: compile_args["target_tensors"] = target_tensors # Ensure Metric objects in new model are separate from existing model. compile_args["metrics"] = metrics_module.clone_metrics( compile_args["metrics"] ) compile_args["weighted_metrics"] = metrics_module.clone_metrics( compile_args["weighted_metrics"] ) clone.compile(**compile_args) return clone
tf-keras/tf_keras/models/cloning.py/0
{ "file_path": "tf-keras/tf_keras/models/cloning.py", "repo_id": "tf-keras", "token_count": 15396 }
186
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adadelta optimizer implementation.""" import numpy as np import tensorflow.compat.v2 as tf from tf_keras import backend_config from tf_keras.optimizers.legacy import optimizer_v2 # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export( "keras.optimizers.legacy.Adadelta", v1=["keras.optimizers.Adadelta", "keras.optimizers.legacy.Adadelta"], ) class Adadelta(optimizer_v2.OptimizerV2): r"""Optimizer that implements the Adadelta algorithm. Adadelta optimization is a stochastic gradient descent method that is based on adaptive learning rate per dimension to address two drawbacks: - The continual decay of learning rates throughout training. - The need for a manually selected global learning rate. Adadelta is a more robust extension of Adagrad that adapts learning rates based on a moving window of gradient updates, instead of accumulating all past gradients. This way, Adadelta continues learning even when many updates have been done. Compared to Adagrad, in the original version of Adadelta you don't have to set an initial learning rate. In this version, the initial learning rate can be set, as in most other TF-Keras optimizers. Args: learning_rate: Initial value for the learning rate: either a floating point value, or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance. Note that `Adadelta` tends to benefit from higher initial learning rate values compared to other optimizers. To match the exact form in the original paper, use 1.0. Defaults to `0.001`. rho: A `Tensor` or a floating point value. The decay rate. epsilon: Small floating point value used to maintain numerical stability. name: Optional name prefix for the operations created when applying gradients. Defaults to `"Adadelta"`. **kwargs: keyword arguments. Allowed arguments are `clipvalue`, `clipnorm`, `global_clipnorm`. If `clipvalue` (float) is set, the gradient of each weight is clipped to be no higher than this value. If `clipnorm` (float) is set, the gradient of each weight is individually clipped so that its norm is no higher than this value. If `global_clipnorm` (float) is set the gradient of all weights is clipped so that their global norm is no higher than this value. Reference: - [Zeiler, 2012](http://arxiv.org/abs/1212.5701) """ _HAS_AGGREGATE_GRAD = True def __init__( self, learning_rate=0.001, rho=0.95, epsilon=1e-7, name="Adadelta", **kwargs ): super().__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) self._set_hyper("decay", self._initial_decay) self._set_hyper("rho", rho) self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): # Separate for-loops to respect the ordering of slot variables from v1. for v in var_list: self.add_slot(v, "accum_grad") for v in var_list: self.add_slot(v, "accum_var") def _prepare_local(self, var_device, var_dtype, apply_state): super()._prepare_local(var_device, var_dtype, apply_state) apply_state[(var_device, var_dtype)].update( dict( epsilon=tf.convert_to_tensor(self.epsilon, var_dtype), rho=tf.identity(self._get_hyper("rho", var_dtype)), ) ) def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of TF-Keras V1 # optimizer since it does not include iteration at head of the weight # list. Set iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super().set_weights(weights) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (apply_state or {}).get( (var_device, var_dtype) ) or self._fallback_apply_state(var_device, var_dtype) accum_grad = self.get_slot(var, "accum_grad") accum_var = self.get_slot(var, "accum_var") return tf.raw_ops.ResourceApplyAdadelta( var=var.handle, accum=accum_grad.handle, accum_update=accum_var.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], epsilon=coefficients["epsilon"], grad=grad, use_locking=self._use_locking, ) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (apply_state or {}).get( (var_device, var_dtype) ) or self._fallback_apply_state(var_device, var_dtype) accum_grad = self.get_slot(var, "accum_grad") accum_var = self.get_slot(var, "accum_var") return tf.raw_ops.ResourceSparseApplyAdadelta( var=var.handle, accum=accum_grad.handle, accum_update=accum_var.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], epsilon=coefficients["epsilon"], grad=grad, indices=indices, use_locking=self._use_locking, ) def get_config(self): config = super().get_config() config.update( { "learning_rate": self._serialize_hyperparameter( "learning_rate" ), "decay": self._initial_decay, "rho": self._serialize_hyperparameter("rho"), "epsilon": self.epsilon, } ) return config
tf-keras/tf_keras/optimizers/legacy/adadelta.py/0
{ "file_path": "tf-keras/tf_keras/optimizers/legacy/adadelta.py", "repo_id": "tf-keras", "token_count": 2677 }
187
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RMSprop optimizer implementation.""" import numpy as np import tensorflow.compat.v2 as tf from tf_keras import backend_config from tf_keras.optimizers.legacy import optimizer_v2 # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export( "keras.optimizers.legacy.RMSprop", v1=["keras.optimizers.RMSprop", "keras.optimizers.legacy.RMSprop"], ) class RMSprop(optimizer_v2.OptimizerV2): r"""Optimizer that implements the RMSprop algorithm. The gist of RMSprop is to: - Maintain a moving (discounted) average of the square of gradients - Divide the gradient by the root of this average This implementation of RMSprop uses plain momentum, not Nesterov momentum. The centered version additionally maintains a moving average of the gradients, and uses that average to estimate the variance. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. rho: Discounting factor for the history/coming gradient. Defaults to `0.9`. momentum: A scalar or a scalar `Tensor`. Defaults to `0.0`. epsilon: A small constant for numerical stability. This epsilon is "epsilon hat" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to `1e-7`. centered: Boolean. If `True`, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to `True` may help with training, but is slightly more expensive in terms of computation and memory. Defaults to `False`. name: Optional name prefix for the operations created when applying gradients. Defaults to `"RMSprop"`. **kwargs: keyword arguments. Allowed arguments are `clipvalue`, `clipnorm`, `global_clipnorm`. If `clipvalue` (float) is set, the gradient of each weight is clipped to be no higher than this value. If `clipnorm` (float) is set, the gradient of each weight is individually clipped so that its norm is no higher than this value. If `global_clipnorm` (float) is set the gradient of all weights is clipped so that their global norm is no higher than this value. Note that in the dense implementation of this algorithm, variables and their corresponding accumulators (momentum, gradient moving average, square gradient moving average) will be updated even if the gradient is zero (i.e. accumulators will decay, momentum will be applied). The sparse implementation (used when the gradient is an `IndexedSlices` object, typically because of `tf.gather` or an embedding lookup in the forward pass) will not update variable slices or their accumulators unless those slices were used in the forward pass (nor is there an "eventual" correction to account for these omitted updates). This leads to more efficient updates for large embedding lookup tables (where most of the slices are not accessed in a particular graph execution), but differs from the published algorithm. Usage: >>> opt = tf.keras.optimizers.legacy.RMSprop(learning_rate=0.1) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> var1.numpy() 9.683772 Reference: - [Hinton, 2012]( http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) """ _HAS_AGGREGATE_GRAD = True def __init__( self, learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-7, centered=False, name="RMSprop", **kwargs, ): """Construct a new RMSprop optimizer. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to `0.001`. rho: Discounting factor for the history/coming gradient. Defaults to `0.9`. momentum: A scalar or a scalar `Tensor`. Defaults to `0.0`. epsilon: A small constant for numerical stability. This epsilon is "epsilon hat" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to 1e-7. centered: Boolean. If `True`, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to `True` may help with training, but is slightly more expensive in terms of computation and memory. Defaults to `False`. name: Optional name prefix for the operations created when applying gradients. Defaults to "RMSprop". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. @compatibility(eager) When eager execution is enabled, `learning_rate`, `decay`, `momentum`, and `epsilon` can each be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility """ super().__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) self._set_hyper("decay", self._initial_decay) self._set_hyper("rho", rho) self._momentum = False if ( isinstance(momentum, tf.Tensor) or callable(momentum) or momentum > 0 ): self._momentum = True if isinstance(momentum, (int, float)) and ( momentum < 0 or momentum > 1 ): raise ValueError( "`momentum` must be between [0, 1]. Received: " f"momentum={momentum} (of type {type(momentum)})." ) self._set_hyper("momentum", momentum) self.epsilon = epsilon or backend_config.epsilon() self.centered = centered def _create_slots(self, var_list): for var in var_list: self.add_slot(var, "rms") if self._momentum: for var in var_list: self.add_slot(var, "momentum") if self.centered: for var in var_list: self.add_slot(var, "mg") def _prepare_local(self, var_device, var_dtype, apply_state): super()._prepare_local(var_device, var_dtype, apply_state) rho = tf.identity(self._get_hyper("rho", var_dtype)) apply_state[(var_device, var_dtype)].update( dict( neg_lr_t=-apply_state[(var_device, var_dtype)]["lr_t"], epsilon=tf.convert_to_tensor(self.epsilon, var_dtype), rho=rho, momentum=tf.identity(self._get_hyper("momentum", var_dtype)), one_minus_rho=1.0 - rho, ) ) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (apply_state or {}).get( (var_device, var_dtype) ) or self._fallback_apply_state(var_device, var_dtype) rms = self.get_slot(var, "rms") if self._momentum: mom = self.get_slot(var, "momentum") if self.centered: mg = self.get_slot(var, "mg") return tf.raw_ops.ResourceApplyCenteredRMSProp( var=var.handle, mg=mg.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, use_locking=self._use_locking, ) else: return tf.raw_ops.ResourceApplyRMSProp( var=var.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, use_locking=self._use_locking, ) else: rms_t = coefficients["rho"] * rms + coefficients[ "one_minus_rho" ] * tf.square(grad) rms_t = tf.compat.v1.assign( rms, rms_t, use_locking=self._use_locking ) denom_t = rms_t if self.centered: mg = self.get_slot(var, "mg") mg_t = ( coefficients["rho"] * mg + coefficients["one_minus_rho"] * grad ) mg_t = tf.compat.v1.assign( mg, mg_t, use_locking=self._use_locking ) denom_t = rms_t - tf.square(mg_t) var_t = var - coefficients["lr_t"] * grad / ( tf.sqrt(denom_t) + coefficients["epsilon"] ) return tf.compat.v1.assign( var, var_t, use_locking=self._use_locking ).op def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = (apply_state or {}).get( (var_device, var_dtype) ) or self._fallback_apply_state(var_device, var_dtype) rms = self.get_slot(var, "rms") if self._momentum: mom = self.get_slot(var, "momentum") if self.centered: mg = self.get_slot(var, "mg") return tf.raw_ops.ResourceSparseApplyCenteredRMSProp( var=var.handle, mg=mg.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, indices=indices, use_locking=self._use_locking, ) else: return tf.raw_ops.ResourceSparseApplyRMSProp( var=var.handle, ms=rms.handle, mom=mom.handle, lr=coefficients["lr_t"], rho=coefficients["rho"], momentum=coefficients["momentum"], epsilon=coefficients["epsilon"], grad=grad, indices=indices, use_locking=self._use_locking, ) else: rms_scaled_g_values = (grad * grad) * coefficients["one_minus_rho"] rms_t = tf.compat.v1.assign( rms, rms * coefficients["rho"], use_locking=self._use_locking ) with tf.control_dependencies([rms_t]): rms_t = self._resource_scatter_add( rms, indices, rms_scaled_g_values ) rms_slice = tf.gather(rms_t, indices) denom_slice = rms_slice if self.centered: mg = self.get_slot(var, "mg") mg_scaled_g_values = grad * coefficients["one_minus_rho"] mg_t = tf.compat.v1.assign( mg, mg * coefficients["rho"], use_locking=self._use_locking ) with tf.control_dependencies([mg_t]): mg_t = self._resource_scatter_add( mg, indices, mg_scaled_g_values ) mg_slice = tf.gather(mg_t, indices) denom_slice = rms_slice - tf.square(mg_slice) var_update = self._resource_scatter_add( var, indices, coefficients["neg_lr_t"] * grad / (tf.sqrt(denom_slice) + coefficients["epsilon"]), ) if self.centered: return tf.group(*[var_update, rms_t, mg_t]) return tf.group(*[var_update, rms_t]) def set_weights(self, weights): params = self.weights # Override set_weights for backward compatibility of TF-Keras V1 # optimizer since it does not include iteration at head of the weight # list. Set iteration to 0. if len(params) == len(weights) + 1: weights = [np.array(0)] + weights super().set_weights(weights) def get_config(self): config = super().get_config() config.update( { "learning_rate": self._serialize_hyperparameter( "learning_rate" ), "decay": self._initial_decay, "rho": self._serialize_hyperparameter("rho"), "momentum": self._serialize_hyperparameter("momentum"), "epsilon": self.epsilon, "centered": self.centered, } ) return config RMSProp = RMSprop
tf-keras/tf_keras/optimizers/legacy/rmsprop.py/0
{ "file_path": "tf-keras/tf_keras/optimizers/legacy/rmsprop.py", "repo_id": "tf-keras", "token_count": 6936 }
188
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for learning rate schedule API.""" import math import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras.optimizers.legacy import gradient_descent from tf_keras.optimizers.schedules import learning_rate_schedule from tf_keras.testing_infra import test_combinations def _maybe_serialized(lr_decay, serialize_and_deserialize): if serialize_and_deserialize: serialized = learning_rate_schedule.serialize(lr_decay) return learning_rate_schedule.deserialize(serialized) else: return lr_decay @test_combinations.generate( test_combinations.combine(serialize=[False, True], mode=["graph", "eager"]) ) class LRDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testContinuous(self, serialize): self.evaluate(tf.compat.v1.global_variables_initializer()) step = 5 decayed_lr = learning_rate_schedule.ExponentialDecay(0.05, 10, 0.96) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = 0.05 * 0.96 ** (5.0 / 10.0) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testStaircase(self, serialize): if tf.executing_eagerly(): step = tf.Variable(0) self.evaluate(tf.compat.v1.global_variables_initializer()) decayed_lr = learning_rate_schedule.ExponentialDecay( 0.1, 3, 0.96, staircase=True ) decayed_lr = _maybe_serialized(decayed_lr, serialize) # No change to learning rate due to staircase expected = 0.1 self.evaluate(step.assign(1)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) expected = 0.1 self.evaluate(step.assign(2)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # Decayed learning rate expected = 0.1 * 0.96 ** (100 // 3) self.evaluate(step.assign(100)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testVariables(self, serialize): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): step = tf.Variable(1) assign_1 = step.assign(1) assign_2 = step.assign(2) assign_100 = step.assign(100) decayed_lr = learning_rate_schedule.ExponentialDecay( 0.1, 3, 0.96, staircase=True ) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) # No change to learning rate self.evaluate(assign_1.op) self.assertAllClose(self.evaluate(decayed_lr(step)), 0.1, 1e-6) self.evaluate(assign_2.op) self.assertAllClose(self.evaluate(decayed_lr(step)), 0.1, 1e-6) # Decayed learning rate self.evaluate(assign_100.op) expected = 0.1 * 0.96 ** (100 // 3) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testPiecewiseConstant(self, serialize): x = tf.Variable(-999) decayed_lr = learning_rate_schedule.PiecewiseConstantDecay( [100, 110, 120], [1.0, 0.1, 0.01, 0.001] ) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6) self.evaluate(x.assign(100)) self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6) self.evaluate(x.assign(105)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6) self.evaluate(x.assign(110)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6) self.evaluate(x.assign(120)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.01, 1e-6) self.evaluate(x.assign(999)) self.assertAllClose(self.evaluate(decayed_lr(x)), 0.001, 1e-6) def testPiecewiseFunction(self, serialize): if not tf.executing_eagerly(): self.skipTest("Run on eager mode only.") del serialize v = tf.Variable(1.0) def loss_fn(): return v * v learning_rate = learning_rate_schedule.PiecewiseConstantDecay( [1.0], [1.0, 0.1] ) opt = gradient_descent.SGD(learning_rate=learning_rate) @tf.function def minimize(): with tf.GradientTape() as tape: loss = loss_fn() g = tape.gradient(loss, [v]) opt.apply_gradients(list(zip(g, [v]))) minimize() self.assertAllEqual(v.read_value(), -1.0) def testPiecewiseConstantEdgeCases(self, serialize): # Test casting boundaries from int32 to int64. x_int64 = tf.Variable(0, dtype=tf.int64) boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7] decayed_lr = learning_rate_schedule.PiecewiseConstantDecay( boundaries, values ) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6) self.evaluate(x_int64.assign(1)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6) self.evaluate(x_int64.assign(2)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.5, 1e-6) self.evaluate(x_int64.assign(3)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.6, 1e-6) self.evaluate(x_int64.assign(4)) self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.7, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @test_combinations.generate( test_combinations.combine(serialize=[False, True], mode=["graph", "eager"]) ) class LinearDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testHalfWay(self, serialize): step = 5 lr = 0.05 end_lr = 0.0 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr * 0.5 self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testEnd(self, serialize): step = 10 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testHalfWayWithEnd(self, serialize): step = 5 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr + end_lr) * 0.5 self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEnd(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay(lr, 10, end_lr) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEndWithCycle(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, cycle=True ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.25 + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @test_combinations.generate( test_combinations.combine(serialize=[False, True], mode=["graph", "eager"]) ) class SqrtDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testHalfWay(self, serialize): step = 5 lr = 0.05 end_lr = 0.0 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr * 0.5**power self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testEnd(self, serialize): step = 10 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testHalfWayWithEnd(self, serialize): step = 5 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.5**power + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEnd(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testBeyondEndWithCycle(self, serialize): step = 15 lr = 0.05 end_lr = 0.001 power = 0.5 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, 10, end_lr, power=power, cycle=True ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = (lr - end_lr) * 0.25**power + end_lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @test_combinations.generate( test_combinations.combine(serialize=[False, True], mode=["graph", "eager"]) ) class PolynomialDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testBeginWithCycle(self, serialize): lr = 0.001 decay_steps = 10 step = 0 decayed_lr = learning_rate_schedule.PolynomialDecay( lr, decay_steps, cycle=True ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = lr self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) # @parameterized.named_parameters( # ("NotSerialized", False), # ("Serialized", True)) @test_combinations.generate( test_combinations.combine(serialize=[False, True], mode=["graph", "eager"]) ) class InverseDecayTestV2(tf.test.TestCase, parameterized.TestCase): def testDecay(self, serialize): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = learning_rate_schedule.InverseTimeDecay( initial_lr, k, decay_rate ) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + i / k * decay_rate) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) self.evaluate(step.assign_add(1)) def testStaircase(self, serialize): initial_lr = 0.1 k = 10 decay_rate = 0.96 step = tf.Variable(0) decayed_lr = learning_rate_schedule.InverseTimeDecay( initial_lr, k, decay_rate, staircase=True ) decayed_lr = _maybe_serialized(decayed_lr, serialize) self.evaluate(tf.compat.v1.global_variables_initializer()) for i in range(k + 1): expected = initial_lr / (1 + decay_rate * (i // k)) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) self.evaluate(step.assign_add(1)) @test_combinations.generate( test_combinations.combine(serialize=[False, True], mode=["graph", "eager"]) ) class CosineDecayTestV2(tf.test.TestCase, parameterized.TestCase): def np_cosine_decay(self, step, decay_steps, alpha=0.0): step = min(step, decay_steps) completed_fraction = step / decay_steps decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha def testDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay( initial_lr, num_training_steps ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def linear_warmup(self, step, warmup_steps, initial_lr, target_lr): completed_fraction = step / warmup_steps total_delta = target_lr - initial_lr return completed_fraction * total_delta def testWarmup(self, serialize): warmup_steps = 1500 initial_lr = 0.0 target_lr = 10.0 for step in range(0, 1500, 250): lr = learning_rate_schedule.CosineDecay( initial_lr, 0, warmup_target=target_lr, warmup_steps=warmup_steps, ) lr = _maybe_serialized(lr, serialize) expected = self.linear_warmup( step, warmup_steps, initial_lr, target_lr ) self.assertAllClose(self.evaluate(lr(step)), expected) def testAlpha(self, serialize): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay( initial_lr, num_training_steps, alpha ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps, alpha) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testFloat64InitLearningRate(self, serialize): num_training_steps = 1000 initial_lr = np.float64(1.0) for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecay( initial_lr, num_training_steps ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testWarmupDecay(self, serialize): warmup_steps = 2000 decay_steps = 1000 initial_lr = 0.0 target_lr = 10.0 for step in range(0, 3000, 250): lr = learning_rate_schedule.CosineDecay( initial_lr, decay_steps, warmup_target=target_lr, warmup_steps=warmup_steps, ) lr = _maybe_serialized(lr, serialize) if step < warmup_steps + 1: expected = self.linear_warmup( step, warmup_steps, initial_lr, target_lr ) else: expected = target_lr * self.np_cosine_decay( step - warmup_steps, decay_steps ) self.assertAllClose(self.evaluate(lr(step)), expected) @test_combinations.generate( test_combinations.combine(serialize=[False, True], mode=["graph", "eager"]) ) class CosineDecayRestartsTestV2(tf.test.TestCase, parameterized.TestCase): def np_cosine_decay_restarts( self, step, decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0 ): fac = 1.0 while step >= decay_steps: step -= decay_steps decay_steps *= t_mul fac *= m_mul completed_fraction = step / decay_steps decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction)) return (1.0 - alpha) * decay + alpha def testDecay(self, serialize): num_training_steps = 1000 initial_lr = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testFloat64InitLearningRate(self, serialize): num_training_steps = 1000 initial_lr = np.float64(1.0) for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts(step, num_training_steps) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testAlpha(self, serialize): num_training_steps = 1000 initial_lr = 1.0 alpha = 0.1 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, alpha=alpha ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, alpha=alpha ) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testMMul(self, serialize): num_training_steps = 1000 initial_lr = 1.0 m_mul = 0.9 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, m_mul=m_mul ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, m_mul=m_mul ) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) def testTMul(self, serialize): num_training_steps = 1000 initial_lr = 1.0 t_mul = 1.0 for step in range(0, 1500, 250): decayed_lr = learning_rate_schedule.CosineDecayRestarts( initial_lr, num_training_steps, t_mul=t_mul ) decayed_lr = _maybe_serialized(decayed_lr, serialize) expected = self.np_cosine_decay_restarts( step, num_training_steps, t_mul=t_mul ) self.assertAllClose(self.evaluate(decayed_lr(step)), expected, 1e-6) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/optimizers/schedules/learning_rate_schedule_test.py/0
{ "file_path": "tf-keras/tf_keras/optimizers/schedules/learning_rate_schedule_test.py", "repo_id": "tf-keras", "token_count": 9381 }
189
# -*- coding: utf-8 -*- # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for text data preprocessing utils.""" import collections import numpy as np import tensorflow.compat.v2 as tf from tf_keras.preprocessing import text class TestText(tf.test.TestCase): def test_one_hot(self): sample_text = "The cat sat on the mat." encoded = text.one_hot(sample_text, 5) self.assertLen(encoded, 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 0) sample_text = "The-cat-sat-on-the-mat" encoded2 = text.one_hot( sample_text, 5, analyzer=lambda t: t.lower().split("-") ) self.assertEqual(encoded, encoded2) self.assertLen(encoded, 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 0) def test_hashing_trick_hash(self): sample_text = "The cat sat on the mat." encoded = text.hashing_trick(sample_text, 5) self.assertLen(encoded, 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 1) def test_hashing_trick_md5(self): sample_text = "The cat sat on the mat." encoded = text.hashing_trick(sample_text, 5, hash_function="md5") self.assertLen(encoded, 6) self.assertLessEqual(np.max(encoded), 4) self.assertGreaterEqual(np.min(encoded), 1) def test_tokenizer(self): sample_texts = [ "The cat sat on the mat.", "The dog sat on the log.", "Dogs and cats living together.", ] tokenizer = text.Tokenizer(num_words=10) tokenizer.fit_on_texts(sample_texts) sequences = [] for seq in tokenizer.texts_to_sequences_generator(sample_texts): sequences.append(seq) self.assertLess(np.max(np.max(np.asarray(sequences, dtype=object))), 10) self.assertEqual(np.min(np.min(np.asarray(sequences, dtype=object))), 1) tokenizer.fit_on_sequences(sequences) for mode in ["binary", "count", "tfidf", "freq"]: tokenizer.texts_to_matrix(sample_texts, mode) def test_tokenizer_serde_no_fitting(self): tokenizer = text.Tokenizer(num_words=100) tokenizer_json = tokenizer.to_json() recovered = text.tokenizer_from_json(tokenizer_json) self.assertEqual(tokenizer.get_config(), recovered.get_config()) self.assertEqual(tokenizer.word_docs, recovered.word_docs) self.assertEqual(tokenizer.word_counts, recovered.word_counts) self.assertEqual(tokenizer.word_index, recovered.word_index) self.assertEqual(tokenizer.index_word, recovered.index_word) self.assertEqual(tokenizer.index_docs, recovered.index_docs) def test_tokenizer_serde_fitting(self): sample_texts = [ "There was a time that the pieces fit, but I watched " "them fall away", "Mildewed and smoldering, strangled by our coveting", "I've done the math enough to know the dangers of our second " "guessing", ] tokenizer = text.Tokenizer(num_words=100) tokenizer.fit_on_texts(sample_texts) seq_generator = tokenizer.texts_to_sequences_generator(sample_texts) sequences = [seq for seq in seq_generator] tokenizer.fit_on_sequences(sequences) tokenizer_json = tokenizer.to_json() recovered = text.tokenizer_from_json(tokenizer_json) self.assertEqual(tokenizer.char_level, recovered.char_level) self.assertEqual(tokenizer.document_count, recovered.document_count) self.assertEqual(tokenizer.filters, recovered.filters) self.assertEqual(tokenizer.lower, recovered.lower) self.assertEqual(tokenizer.num_words, recovered.num_words) self.assertEqual(tokenizer.oov_token, recovered.oov_token) self.assertEqual(tokenizer.word_docs, recovered.word_docs) self.assertEqual(tokenizer.word_counts, recovered.word_counts) self.assertEqual(tokenizer.word_index, recovered.word_index) self.assertEqual(tokenizer.index_word, recovered.index_word) self.assertEqual(tokenizer.index_docs, recovered.index_docs) def test_sequential_fit(self): texts = [ "The cat sat on the mat.", "The dog sat on the log.", "Dogs and cats living together.", ] word_sequences = [ ["The", "cat", "is", "sitting"], ["The", "dog", "is", "standing"], ] tokenizer = text.Tokenizer() tokenizer.fit_on_texts(texts) tokenizer.fit_on_texts(word_sequences) self.assertEqual(tokenizer.document_count, 5) tokenizer.texts_to_matrix(texts) tokenizer.texts_to_matrix(word_sequences) def test_text_to_word_sequence(self): sample_text = "hello! ? world!" self.assertEqual( text.text_to_word_sequence(sample_text), ["hello", "world"] ) def test_text_to_word_sequence_multichar_split(self): sample_text = "hello!stop?world!" self.assertEqual( text.text_to_word_sequence(sample_text, split="stop"), ["hello", "world"], ) def test_text_to_word_sequence_unicode(self): sample_text = "ali! veli? kırk dokuz elli" self.assertEqual( text.text_to_word_sequence(sample_text), ["ali", "veli", "kırk", "dokuz", "elli"], ) def test_text_to_word_sequence_unicode_multichar_split(self): sample_text = "ali!stopveli?stopkırkstopdokuzstopelli" self.assertEqual( text.text_to_word_sequence(sample_text, split="stop"), ["ali", "veli", "kırk", "dokuz", "elli"], ) def test_tokenizer_unicode(self): sample_texts = [ "ali veli kırk dokuz elli", "ali veli kırk dokuz elli veli kırk dokuz", ] tokenizer = text.Tokenizer(num_words=5) tokenizer.fit_on_texts(sample_texts) self.assertLen(tokenizer.word_counts, 5) def test_tokenizer_oov_flag(self): """Test of Out of Vocabulary (OOV) flag in text.Tokenizer.""" x_train = ["This text has only known words"] x_test = ["This text has some unknown words"] # 2 OOVs: some, unknown # Default, without OOV flag tokenizer = text.Tokenizer() tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) self.assertLen(x_test_seq[0], 4) # discards 2 OOVs # With OOV feature tokenizer = text.Tokenizer(oov_token="<unk>") tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) self.assertLen(x_test_seq[0], 6) # OOVs marked in place def test_tokenizer_oov_flag_and_num_words(self): x_train = ["This text has only known words this text"] x_test = ["This text has some unknown words"] tokenizer = text.Tokenizer(num_words=3, oov_token="<unk>") tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) trans_text = " ".join(tokenizer.index_word[t] for t in x_test_seq[0]) self.assertLen(x_test_seq[0], 6) self.assertEqual(trans_text, "this <unk> <unk> <unk> <unk> <unk>") def test_sequences_to_texts_with_num_words_and_oov_token(self): x_train = ["This text has only known words this text"] x_test = ["This text has some unknown words"] tokenizer = text.Tokenizer(num_words=3, oov_token="<unk>") tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) trans_text = tokenizer.sequences_to_texts(x_test_seq) self.assertEqual(trans_text, ["this <unk> <unk> <unk> <unk> <unk>"]) def test_sequences_to_texts_no_num_words(self): x_train = ["This text has only known words this text"] x_test = ["This text has some unknown words"] tokenizer = text.Tokenizer(oov_token="<unk>") tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) trans_text = tokenizer.sequences_to_texts(x_test_seq) self.assertEqual(trans_text, ["this text has <unk> <unk> words"]) def test_sequences_to_texts_no_oov_token(self): x_train = ["This text has only known words this text"] x_test = ["This text has some unknown words"] tokenizer = text.Tokenizer(num_words=3) tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) trans_text = tokenizer.sequences_to_texts(x_test_seq) self.assertEqual(trans_text, ["this text"]) def test_sequences_to_texts_no_num_words_no_oov_token(self): x_train = ["This text has only known words this text"] x_test = ["This text has some unknown words"] tokenizer = text.Tokenizer() tokenizer.fit_on_texts(x_train) x_test_seq = tokenizer.texts_to_sequences(x_test) trans_text = tokenizer.sequences_to_texts(x_test_seq) self.assertEqual(trans_text, ["this text has words"]) def test_sequences_to_texts(self): texts = [ "The cat sat on the mat.", "The dog sat on the log.", "Dogs and cats living together.", ] tokenizer = text.Tokenizer(num_words=10, oov_token="<unk>") tokenizer.fit_on_texts(texts) tokenized_text = tokenizer.texts_to_sequences(texts) trans_text = tokenizer.sequences_to_texts(tokenized_text) self.assertEqual( trans_text, [ "the cat sat on the mat", "the dog sat on the log", "dogs <unk> <unk> <unk> <unk>", ], ) def test_tokenizer_lower_flag(self): """Tests for `lower` flag in text.Tokenizer.""" # word level tokenizer with sentences as texts word_tokenizer = text.Tokenizer(lower=True) texts = [ "The cat sat on the mat.", "The dog sat on the log.", "Dog and Cat living Together.", ] word_tokenizer.fit_on_texts(texts) expected_word_counts = collections.OrderedDict( [ ("the", 4), ("cat", 2), ("sat", 2), ("on", 2), ("mat", 1), ("dog", 2), ("log", 1), ("and", 1), ("living", 1), ("together", 1), ] ) self.assertEqual(word_tokenizer.word_counts, expected_word_counts) # word level tokenizer with word_sequences as texts word_tokenizer = text.Tokenizer(lower=True) word_sequences = [ ["The", "cat", "is", "sitting"], ["The", "dog", "is", "standing"], ] word_tokenizer.fit_on_texts(word_sequences) expected_word_counts = collections.OrderedDict( [ ("the", 2), ("cat", 1), ("is", 2), ("sitting", 1), ("dog", 1), ("standing", 1), ] ) self.assertEqual(word_tokenizer.word_counts, expected_word_counts) # char level tokenizer with sentences as texts char_tokenizer = text.Tokenizer(lower=True, char_level=True) texts = [ "The cat sat on the mat.", "The dog sat on the log.", "Dog and Cat living Together.", ] char_tokenizer.fit_on_texts(texts) expected_word_counts = collections.OrderedDict( [ ("t", 11), ("h", 5), ("e", 6), (" ", 14), ("c", 2), ("a", 6), ("s", 2), ("o", 6), ("n", 4), ("m", 1), (".", 3), ("d", 3), ("g", 5), ("l", 2), ("i", 2), ("v", 1), ("r", 1), ] ) self.assertEqual(char_tokenizer.word_counts, expected_word_counts) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/preprocessing/text_test.py/0
{ "file_path": "tf-keras/tf_keras/preprocessing/text_test.py", "repo_id": "tf-keras", "token_count": 6110 }
190
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ,============================================================================ """Tests for model saving in the HDF5 format.""" import os import shutil import uuid import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.engine import training from tf_keras.optimizers import optimizer_v1 from tf_keras.saving.legacy import hdf5_format from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils try: import h5py except ImportError: h5py = None @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class TestWeightSavingAndLoading(tf.test.TestCase, parameterized.TestCase): def _save_model_dir(self, dirname="saved_model"): temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) return os.path.join(temp_dir, dirname) @test_combinations.run_with_all_weight_formats def test_weight_loading(self): saved_model_dir = self._save_model_dir() save_format = test_utils.get_save_format() with self.cached_session(): a = keras.layers.Input(shape=(2,)) x = keras.layers.Dense(3)(a) b = keras.layers.Dense(1)(x) model = keras.models.Model(a, b) x = np.random.random((3, 2)) ref_y = model.predict(x) weights = model.get_weights() model.set_weights(weights) y = model.predict(x) self.assertAllClose(ref_y, y) with self.assertRaises(ValueError): model.set_weights(weights[1:]) with self.assertRaises(ValueError): model.set_weights(weights[::-1]) model.save_weights(saved_model_dir, save_format=save_format) model.load_weights(saved_model_dir) y = model.predict(x) self.assertAllClose(ref_y, y) def test_weight_preprocessing(self): input_dim = 3 output_dim = 3 size = 2 cases = [ [ (keras.layers.Bidirectional(keras.layers.SimpleRNN(2))), [np.random.random((2, 1)), np.random.random((2, 1))], (None, 3, 2), ], [ (keras.layers.TimeDistributed(keras.layers.Dense(1))), [np.random.random((2, 1)), np.random.random((1,))], (None, 3, 2), ], [ (keras.layers.Conv1D(output_dim, size, use_bias=False)), [np.random.random((output_dim, input_dim, size, 1))], (None, 4, input_dim), ], [ ( keras.layers.Conv2D( output_dim, size, use_bias=False, data_format="channels_first", ) ), [np.random.random((output_dim, input_dim, size, size))], (None, input_dim, 4, 4), ], [ ( keras.layers.Conv2DTranspose( output_dim, size, use_bias=False, data_format="channels_first", ) ), [np.random.random((output_dim, input_dim, size, size))], (None, input_dim, 4, 4), ], [ ( keras.layers.Conv2DTranspose( output_dim, size, use_bias=False, data_format="channels_last", ) ), [np.random.random((size, size, input_dim, output_dim))], (None, 4, 4, input_dim), ], [ ( keras.layers.Conv3D( output_dim, size, use_bias=False, data_format="channels_first", ) ), [np.random.random((output_dim, input_dim, size, size, size))], (None, input_dim, 4, 4, 4), ], [ (keras.layers.GRUV1(output_dim)), [ np.random.random((input_dim, output_dim)), np.random.random((output_dim, output_dim)), np.random.random((output_dim,)), np.random.random((input_dim, output_dim)), np.random.random((output_dim, output_dim)), np.random.random((output_dim,)), np.random.random((input_dim, output_dim)), np.random.random((output_dim, output_dim)), np.random.random((output_dim,)), ], (None, 4, input_dim), ], [ (keras.layers.LSTMV1(output_dim)), [ np.random.random((input_dim, output_dim)), np.random.random((output_dim, output_dim)), np.random.random((output_dim,)), np.random.random((input_dim, output_dim)), np.random.random((output_dim, output_dim)), np.random.random((output_dim,)), np.random.random((input_dim, output_dim)), np.random.random((output_dim, output_dim)), np.random.random((output_dim,)), np.random.random((input_dim, output_dim)), np.random.random((output_dim, output_dim)), np.random.random((output_dim,)), ], (None, 4, input_dim), ], ] for layer, weights, input_shape in cases: layer.build(input_shape) _ = hdf5_format.preprocess_weights_for_loading( layer, weights, original_keras_version="1" ) model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)]) _ = hdf5_format.preprocess_weights_for_loading( model, model.weights, original_keras_version="1" ) x = keras.Input((2,)) y = keras.layers.Dense(2)(x) model = keras.models.Model(x, y) _ = hdf5_format.preprocess_weights_for_loading( model, model.weights, original_keras_version="1" ) @parameterized.named_parameters( ("gru", keras.layers.GRU, {"units": 2, "input_shape": (3, 5)}), ( "gru_with_reset_after", keras.layers.GRU, {"units": 2, "input_shape": (3, 5), "reset_after": True}, ), ("lstm", keras.layers.LSTM, {"units": 2, "input_shape": (3, 5)}), ( "cudnngru", keras.layers.CuDNNGRU, {"units": 2, "input_shape": (3, 5)}, ), ( "cudnnlstm", keras.layers.CuDNNLSTM, {"units": 2, "input_shape": (3, 5)}, ), ) def test_preprocess_weights_for_loading_rnn_should_be_idempotent( self, layer_class, layer_args ): with self.cached_session(): layer = layer_class(**layer_args) layer.build(input_shape=layer_args.get("input_shape")) weights1 = layer.get_weights() weights2 = hdf5_format.preprocess_weights_for_loading( layer, weights1 ) _ = [ self.assertAllClose(x, y, rtol=1e-05) for (x, y) in zip(weights1, weights2) ] def test_sequential_weight_loading(self): if h5py is None: return h5_path = self._save_model_dir("test.h5") num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 with self.cached_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, input_dim=input_dim)) model.add(keras.layers.Dense(num_classes)) x = np.random.random((batch_size, input_dim)) ref_y = model.predict(x) model.save_weights(h5_path) model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, input_dim=input_dim)) model.add(keras.layers.Dense(num_classes)) model.load_weights(h5_path) y = model.predict(x) self.assertAllClose(y, ref_y) @test_combinations.run_with_all_saved_model_formats( exclude_formats=["tf_no_traces"] ) def test_nested_model_weight_loading(self): save_format = test_utils.get_save_format() saved_model_dir = self._save_model_dir() batch_size = 5 shape = (None, None, 3) with self.cached_session(): def gen_model(): def seq_model(): model = keras.models.Sequential( [ keras.layers.Conv2D(3, 1, input_shape=shape), keras.layers.BatchNormalization(), ] ) return model x = inner_inputs = keras.layers.Input((None, None, 3)) x = seq_model()(x) x = seq_model()(x) inner_model = keras.models.Model(inner_inputs, x) inputs = keras.layers.Input(shape) return keras.models.Model(inputs, inner_model(inputs)) model = gen_model() x = np.random.random((batch_size, 1, 1, 3)) ref_y = model.predict(x) model.save_weights(saved_model_dir, save_format=save_format) model = gen_model() model.load_weights(saved_model_dir) y = model.predict(x) self.assertAllClose(y, ref_y) def test_sequential_weight_loading_group_name_with_incorrect_length(self): if h5py is None: return h5_path = self._save_model_dir("test.h5") num_hidden = 5 input_dim = 3 num_classes = 2 with self.cached_session(): ref_model = keras.models.Sequential() ref_model.add( keras.layers.Dense(num_hidden, input_dim=input_dim, name="d1") ) ref_model.add(keras.layers.Dense(num_classes, name="d2")) ref_model.compile( loss=keras.losses.MSE, optimizer="rmsprop", metrics=[keras.metrics.categorical_accuracy], ) f_ref_model = h5py.File(h5_path, "w") hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model) f_model = h5py.File(h5_path, "r") model = keras.models.Sequential() model.add( keras.layers.Dense( num_hidden, use_bias=False, input_dim=input_dim, name="d1" ) ) model.add(keras.layers.Dense(num_classes, name="d2")) model.compile( loss=keras.losses.MSE, optimizer="rmsprop", metrics=[keras.metrics.categorical_accuracy], ) with self.assertRaises( ValueError, msg=( "Weight count mismatch for layer #0 (named d1). " "Layer expects 1 weight(s). Received 2 saved weight(s)" ), ): hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model) hdf5_format.load_weights_from_hdf5_group_by_name( f_model, model, skip_mismatch=True ) self.assertAllClose( keras.backend.get_value(ref_model.layers[1].kernel), keras.backend.get_value(model.layers[1].kernel), ) def test_sequential_weight_loading_group_name_with_incorrect_shape(self): if h5py is None: return h5_path = self._save_model_dir("test.h5") num_hidden = 5 input_dim = 3 num_classes = 2 with tf.Graph().as_default(), self.cached_session(): ref_model = keras.models.Sequential() ref_model.add( keras.layers.Dense(num_hidden, input_dim=input_dim, name="d1") ) ref_model.add(keras.layers.Dense(num_classes, name="d2")) ref_model.compile( loss=keras.losses.MSE, optimizer=optimizer_v1.RMSprop(lr=0.0001), metrics=[keras.metrics.categorical_accuracy], ) f_ref_model = h5py.File(h5_path, "w") keras.backend.set_value( ref_model.layers[1].bias, [3.5] * num_classes ) hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model) f_model = h5py.File(h5_path, "r") model = keras.models.Sequential() model.add( keras.layers.Dense( num_hidden + 5, input_dim=input_dim, name="d1" ) ) model.add(keras.layers.Dense(num_classes, name="d2")) model.compile( loss=keras.losses.MSE, optimizer=optimizer_v1.RMSprop(lr=0.0001), metrics=[keras.metrics.categorical_accuracy], ) with self.assertRaises( ValueError, msg=( "Shape mismatch in layer #0 (named d1) for weight " "d1_1/kernel:0. Weight expects shape (3, 10). " "Received saved weight with shape (3, 5)" ), ): hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model) hdf5_format.load_weights_from_hdf5_group_by_name( f_model, model, skip_mismatch=True ) self.assertAllClose( [3.5] * num_classes, keras.backend.get_value(model.layers[1].bias), ) @test_combinations.run_with_all_saved_model_formats( exclude_formats=["tf_no_traces"] ) @test_combinations.run_with_all_model_types def test_load_weights_from_saved_model(self): save_path = self._save_model_dir() save_format = test_utils.get_save_format() if save_format == "h5" and test_utils.get_model_type() == "subclass": # TODO(b/173646281): HDF5 format currently does not allow saving # subclassed models. return with self.cached_session(): model = test_utils.get_small_mlp(1, 4, input_dim=3) data = np.random.random((1, 3)) labels = np.random.random((1, 4)) model.compile(loss="mse", optimizer="rmsprop") model.fit(data, labels) model.save(save_path, save_format=save_format) new_model = test_utils.get_small_mlp(1, 4, input_dim=3) if test_utils.get_model_type() == "subclass": # Call on test data to build the model. new_model.predict(data) new_model.load_weights(save_path) self.assertAllClose(model.weights, new_model.weights) class SubclassedModel(training.Model): def __init__(self): super().__init__() self.x_layer = keras.layers.Dense(3) self.b_layer = keras.layers.Dense(1) def call(self, a): return self.b_layer(self.x_layer(a)) class TestWeightSavingAndLoadingTFFormat( tf.test.TestCase, parameterized.TestCase ): @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_tensorflow_format_overwrite(self): with self.cached_session() as session: model = SubclassedModel() temp_dir = self.get_temp_dir() prefix = os.path.join(temp_dir, "ckpt") x = tf.constant(np.random.random((3, 2)), dtype=tf.float32) executing_eagerly = tf.executing_eagerly() model(x) if not executing_eagerly: session.run([v.initializer for v in model.variables]) model.save_weights(prefix, save_format="tensorflow") model.save_weights(prefix, save_format="tensorflow", overwrite=True) with self.assertRaises(EOFError): # Indirectly tests that the user is prompted model.save_weights( prefix, save_format="tensorflow", overwrite=False ) def test_no_default_session(self): with tf.Graph().as_default(): self.assertFalse(tf.compat.v1.get_default_session()) data = np.random.random((1000, 32)).astype(np.float32) labels = np.random.random((1000, 10)).astype(np.float32) model = keras.models.Sequential( [ keras.layers.Dense(10, activation="softmax"), keras.layers.Dense(10, activation="softmax"), ] ) model.compile( optimizer=tf.compat.v1.train.RMSPropOptimizer(0.001), loss="categorical_crossentropy", metrics=["accuracy"], ) model.fit(data, labels) fname = os.path.join(self.get_temp_dir(), "weights", "ckpt") model.save_weights(fname) model.load_weights(fname) def test_no_graph_pollution(self): with tf.compat.v1.get_default_graph().as_default(): graph = tf.Graph() with graph.as_default(), self.session(graph) as session: model = SubclassedModel() temp_dir = self.get_temp_dir() prefix = os.path.join(temp_dir, "ckpt") x = tf.constant(np.random.random((3, 2)), dtype=tf.float32) model(x) session.run([v.initializer for v in model.variables]) model.save_weights(prefix, save_format="tensorflow") op_count = len(graph.get_operations()) model.save_weights(prefix, save_format="tensorflow") self.assertLen(graph.get_operations(), op_count) model.load_weights(prefix) op_count = len(graph.get_operations()) model.load_weights(prefix) self.assertLen(graph.get_operations(), op_count) def _weight_loading_test_template(self, make_model_fn): with self.cached_session(): model = make_model_fn() model.compile( loss="mse", optimizer=tf.compat.v1.train.RMSPropOptimizer(0.1), metrics=["acc", keras.metrics.CategoricalAccuracy()], ) temp_dir = self.get_temp_dir() prefix = os.path.join(temp_dir, "ckpt") train_x = np.random.random((3, 2)) train_y = np.random.random((3,)) x = tf.constant(train_x, dtype=tf.float32) model.train_on_batch(train_x, train_y) model.save_weights(prefix, save_format="tf") ref_y_before_train = model.predict(train_x) model.train_on_batch(train_x, train_y) ref_y_after_train = model.predict(train_x) for v in model.variables: self.evaluate(v.assign(tf.random.normal(shape=tf.shape(v)))) self.addCleanup(shutil.rmtree, temp_dir) model.load_weights(prefix) self.assertAllClose(ref_y_before_train, self.evaluate(model(x))) # Test restore-on-create if this is a subclassed Model (graph # Networks will have already created their variables). load_model = make_model_fn() load_model.load_weights(prefix) self.assertAllClose( ref_y_before_train, self.evaluate(load_model(x)) ) load_model = make_model_fn() load_model.load_weights(prefix) # We need to run some of the restore ops for predict(), but not all # variables have been created yet (optimizer slot variables). Tests # incremental restore. load_model.predict(train_x) load_model.compile( loss="mse", optimizer=tf.compat.v1.train.RMSPropOptimizer(0.1), metrics=["acc", keras.metrics.CategoricalAccuracy()], ) load_model.train_on_batch(train_x, train_y) self.assertAllClose(ref_y_after_train, self.evaluate(load_model(x))) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_weight_loading_graph_model(self): def _make_graph_model(): a = keras.layers.Input(shape=(2,)) x = keras.layers.Dense(3)(a) b = keras.layers.Dense(1)(x) return keras.models.Model(a, b) self._weight_loading_test_template(_make_graph_model) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_weight_loading_subclassed_model(self): self._weight_loading_test_template(SubclassedModel) def _new_layer_weight_loading_test_template( self, first_model_fn, second_model_fn ): with self.cached_session() as session: model = first_model_fn() temp_dir = self.get_temp_dir() prefix = os.path.join(temp_dir, "ckpt") x = tf.constant(np.random.random((3, 2)), dtype=tf.float32) executing_eagerly = tf.executing_eagerly() ref_y_tensor = model(x) if not executing_eagerly: session.run([v.initializer for v in model.variables]) ref_y = self.evaluate(ref_y_tensor) model.save_weights(prefix) self.assertEqual(prefix, tf.train.latest_checkpoint(temp_dir)) for v in model.variables: self.evaluate(v.assign(tf.random.normal(shape=tf.shape(v)))) self.addCleanup(shutil.rmtree, temp_dir) second_model = second_model_fn() status = second_model.load_weights(prefix) second_model(x) status.run_restore_ops() second_model.save_weights(prefix) # Check that the second model's checkpoint loads into the original # model status = model.load_weights(prefix) status.run_restore_ops(session) y = self.evaluate(model(x)) self.assertAllClose(ref_y, y) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_weight_loading_graph_model_added_layer(self): def _save_graph_model(): a = keras.layers.Input(shape=(2,)) x = keras.layers.Dense(3, name="first")(a) b = keras.layers.Dense(1, name="second")(x) return keras.models.Model(a, b) def _restore_graph_model(): a = keras.layers.Input(shape=(2,)) x = keras.layers.Dense(3, name="first")(a) y = keras.layers.Dense(1, name="second")(x) b = keras.layers.Dense(3, name="secondjr")(y) return keras.models.Model(a, b) self._new_layer_weight_loading_test_template( _save_graph_model, _restore_graph_model ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_weight_loading_graph_model_added_no_weight_layer(self): def _save_graph_model(): a = keras.layers.Input(shape=(2,)) x = keras.layers.Dense(3, name="first")(a) b = keras.layers.Dense(1, name="second")(x) return keras.models.Model(a, b) def _restore_graph_model(): a = keras.layers.Input(shape=(2,)) x = keras.layers.Dense(3, name="first")(a) b = keras.layers.Dense(1, name="second")(x) y = keras.layers.Dropout(rate=0.1)(b) return keras.models.Model(a, y) self._new_layer_weight_loading_test_template( _save_graph_model, _restore_graph_model ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_weight_loading_subclassed_model_added_layer(self): class SubclassedModelRestore(training.Model): def __init__(self): super().__init__() self.x_layer = keras.layers.Dense(3) self.y_layer = keras.layers.Dense(3) self.b_layer = keras.layers.Dense(1) def call(self, a): return self.b_layer(self.y_layer(self.x_layer(a))) self._new_layer_weight_loading_test_template( SubclassedModel, SubclassedModelRestore ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_incompatible_checkpoint(self): save_path = tf.train.Checkpoint().save( os.path.join(self.get_temp_dir(), "ckpt") ) m = DummySubclassModel() with self.assertRaisesRegex(AssertionError, "Nothing to load"): m.load_weights(save_path) m.dense = keras.layers.Dense(2) m.dense(tf.constant([[1.0]])) with self.assertRaisesRegex( AssertionError, "Nothing except the root object matched" ): m.load_weights(save_path) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_directory_passed(self): with self.cached_session(): m = DummySubclassModel() v = m.add_weight(name="v", shape=[]) self.evaluate(v.assign(42.0)) prefix = os.path.join( self.get_temp_dir(), str(uuid.uuid4()), "ckpt/" ) m.save_weights(prefix) self.evaluate(v.assign(2.0)) m.load_weights(prefix) self.assertEqual(42.0, self.evaluate(v)) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_relative_path(self): with self.cached_session(): m = DummySubclassModel() v = m.add_weight(name="v", shape=[]) os.chdir(self.get_temp_dir()) prefix = "ackpt" self.evaluate(v.assign(42.0)) m.save_weights(prefix) self.assertTrue(tf.io.gfile.exists("ackpt.index")) self.evaluate(v.assign(1.0)) m.load_weights(prefix) self.assertEqual(42.0, self.evaluate(v)) prefix = "subdir/ackpt" self.evaluate(v.assign(43.0)) m.save_weights(prefix) self.assertTrue(tf.io.gfile.exists("subdir/ackpt.index")) self.evaluate(v.assign(2.0)) m.load_weights(prefix) self.assertEqual(43.0, self.evaluate(v)) prefix = "ackpt/" self.evaluate(v.assign(44.0)) m.save_weights(prefix) self.assertTrue(tf.io.gfile.exists("ackpt/.index")) self.evaluate(v.assign(3.0)) m.load_weights(prefix) self.assertEqual(44.0, self.evaluate(v)) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_nonexistent_prefix_directory(self): with self.cached_session(): m = DummySubclassModel() v = m.add_weight(name="v", shape=[]) self.evaluate(v.assign(42.0)) prefix = os.path.join( self.get_temp_dir(), str(uuid.uuid4()), "bckpt" ) m.save_weights(prefix) self.evaluate(v.assign(2.0)) m.load_weights(prefix) self.assertEqual(42.0, self.evaluate(v)) class DummySubclassModel(training.Model): pass if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/saving/legacy/save_weights_test.py/0
{ "file_path": "tf-keras/tf_keras/saving/legacy/save_weights_test.py", "repo_id": "tf-keras", "token_count": 15238 }
191
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests reviving models from config and SavedModel. These tests ensure that a model revived from a combination of config and SavedModel have the expected structure. """ # TODO(kathywu): Move relevant tests from saved_model_test to import shutil import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras import backend from tf_keras.saving.legacy.saved_model import load as keras_load from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils from tf_keras.utils import CustomObjectScope class SubclassedModelNoConfig(keras.Model): def __init__(self, a, b): super().__init__() self.a = a self.b = b self.shared = CustomLayerNoConfig(a, b) self.all_layers = [] def build(self, input_shape): self.all_layers.extend( [ self.shared, CustomLayerWithConfig(self.a + 1, self.b + 2), CustomLayerNoConfig(self.a + 3, self.b + 4), keras.Sequential( [ # TODO(b/145029112): Bug with losses when there are # shared layers. self.shared, <-- Enable when bug is # fixed. CustomLayerNoConfig(self.a + 5, self.b + 6) ] ), ] ) super().build(input_shape) def call(self, inputs): x = inputs for layer in self.all_layers: x = layer(x) return x class SparseDense(keras.layers.Dense): def call(self, inputs): input_shape = tf.stack( (tf.reduce_prod(tf.shape(inputs)[:-1]), self.kernel.shape[0]) ) output_shape = tf.concat( (tf.shape(inputs)[:-1], [self.kernel.shape[1]]), -1 ) x = tf.sparse.reshape(inputs, input_shape) return tf.reshape( self.activation( tf.sparse.sparse_dense_matmul(x, self.kernel) + self.bias ), output_shape, ) class SubclassedSparseModelNoConfig(keras.Model): def __init__(self, a, b): super().__init__() self.a = a self.shared = CustomLayerNoConfig(a, b) self.all_layers = [SparseDense(4)] def call(self, inputs): x = inputs for layer in self.all_layers: x = layer(x) return self.shared(x + self.a) class SubclassedModelWithConfig(SubclassedModelNoConfig): def get_config(self): return {"a": self.a, "b": self.b} @classmethod def from_config(cls, config): return cls(**config) class CustomLayerNoConfig(keras.layers.Layer): def __init__(self, a, b, name=None): super().__init__(name=name) self.a = tf.Variable(a, name="a") self.b = b def a_regularizer(): return self.a * 2 self.add_loss(a_regularizer) self.sum_metric = keras.metrics.Sum(name="inputs_sum") self.unused_metric = keras.metrics.Sum(name="not_added_to_metrics") def build(self, input_shape): self.c = tf.Variable( tf.constant(1.0, shape=input_shape[1:]), name=self.name + "_c" ) def call(self, inputs): self.add_loss(tf.reduce_sum(inputs)) self.add_metric(self.sum_metric(inputs)) self.add_metric(inputs, aggregation="mean", name="mean") return inputs + self.c class CustomLayerWithConfig(CustomLayerNoConfig): def get_config(self): return {"a": backend.get_value(self.a), "b": self.b, "name": self.name} class CustomNetworkDefaultConfig(keras.Model): def __init__(self, num_classes, name=None): inputs = keras.Input((2, 3), name="inputs") x = keras.layers.Flatten(name="flatten")(inputs) y = keras.layers.Dense(num_classes, name="outputs")(x) super().__init__(inputs, y, name=name) class CustomNetworkWithConfig(CustomNetworkDefaultConfig): def __init__(self, num_classes, name=None): super().__init__(num_classes, name=name) self._config_dict = dict(num_classes=num_classes) def get_config(self): return self._config_dict @classmethod def from_config(cls, config): return cls(config["num_classes"], name=config.get("name")) class CustomNetworkWithConfigName(CustomNetworkWithConfig): def __init__(self, num_classes, name=None): super().__init__(num_classes, name=name) self._config_dict["name"] = self.name class UnregisteredCustomSequentialModel(keras.Sequential): # This class is *not* registered in the CustomObjectScope. def __init__(self, **kwargs): super().__init__(**kwargs) self.add(keras.layers.InputLayer(input_shape=(2, 3))) class FunctionalSubclassModel(keras.Model): def __init__(self, units): self.units = units my_input = keras.Input(shape=(2, 3), name="inputs") dense = keras.layers.Dense(self.units, activation="relu", name="dense") output = dense(my_input) outputs = {"output": output} super().__init__(inputs=[my_input], outputs=outputs) def get_config(self): return {"units": self.units} class FunctionalSubclassModelWrongConfig(FunctionalSubclassModel): def get_config(self): return {} # The WideDeepModel, whose name conflicts with a TF-Keras built-in model, is # registered in these tests. class WideDeepModel(SubclassedModelWithConfig): pass class ReviveTestBase(test_combinations.TestCase): def setUp(self): super().setUp() self.path = self.get_temp_dir() self.addCleanup(shutil.rmtree, self.path, ignore_errors=True) def _assert_revived_correctness(self, model, revived): self.assertAllEqual(model.input_names, revived.input_names) self.assertAllEqual(model.output_names, revived.output_names) if model.inputs is not None: self.assertTrue( all( [ i.shape.as_list() == r.shape.as_list() and i.dtype == r.dtype for (i, r) in zip(model.inputs, revived.inputs) ] ) ) self.assertTrue( all( [ i.shape.as_list() == r.shape.as_list() and i.dtype == r.dtype for (i, r) in zip(model.outputs, revived.outputs) ] ) ) self.assertAllClose( self.evaluate(model.weights), self.evaluate(revived.weights) ) input_arr = tf.constant(np.random.random((2, 2, 3)).astype(np.float32)) if isinstance(revived.save_spec()[0][0], tf.SparseTensorSpec): input_arr = tf.sparse.from_dense(input_arr) self.assertAllClose(model(input_arr), revived(input_arr)) self.assertAllClose(sum(model.losses), sum(revived.losses)) self.assertAllClose(len(model.losses), len(revived.losses)) self.assertEqual(len(model.metrics), len(revived.metrics)) # TODO(b/150403085): Investigate why the metric order changes when # running this test in tf-nightly. self.assertAllClose( sorted([m.result() for m in model.metrics]), sorted([m.result() for m in revived.metrics]), ) model_layers = {layer.name: layer for layer in model.layers} revived_layers = {layer.name: layer for layer in revived.layers} self.assertAllEqual(model_layers.keys(), revived_layers.keys()) for name in model_layers: model_layer = model_layers[name] revived_layer = revived_layers[name] self.assertEqual(model_layer.name, revived_layer.name) self.assertEqual(model_layer.dtype, revived_layer.dtype) self.assertEqual(model_layer.trainable, revived_layer.trainable) if "WithConfig" in type(model_layer).__name__: self.assertEqual(type(model_layer), type(revived_layer)) else: # When loading layers from SavedModel, a new class is # dynamically created with the same name. self.assertEqual( type(model_layer).__name__, type(revived_layer).__name__ ) # These tests take a while to run, so each should run in a separate shard # (putting them in the same TestCase resolves this). class TestBigModelRevive(ReviveTestBase): @test_combinations.run_with_all_model_types def test_revive(self): input_shape = None if test_utils.get_model_type() == "functional": input_shape = (2, 3) layer_with_config = CustomLayerWithConfig(1.0, 2) layer_without_config = CustomLayerNoConfig(3.0, 4) subclassed_with_config = SubclassedModelWithConfig(4.0, 6.0) subclassed_without_config = SubclassedModelNoConfig(7.0, 8.0) inputs = keras.Input((2, 3)) x = CustomLayerWithConfig(1.0, 2)(inputs) x = CustomLayerNoConfig(3.0, 4)(x) x = SubclassedModelWithConfig(4.0, 6.0)(x) x = SubclassedModelNoConfig(7.0, 8.0)(x) inner_model_functional = keras.Model(inputs, x) inner_model_sequential = keras.Sequential( [ CustomLayerWithConfig(1.0, 2), CustomLayerNoConfig(3.0, 4), SubclassedModelWithConfig(4.0, 6.0), SubclassedModelNoConfig(7.0, 8.0), ] ) class SubclassedModel(keras.Model): def __init__(self): super().__init__() self.all_layers = [ CustomLayerWithConfig(1.0, 2), CustomLayerNoConfig(3.0, 4), SubclassedModelWithConfig(4.0, 6.0), SubclassedModelNoConfig(7.0, 8.0), ] def call(self, inputs): x = inputs for layer in self.all_layers: x = layer(x) return x inner_model_subclassed = SubclassedModel() layers = [ layer_with_config, layer_without_config, subclassed_with_config, subclassed_without_config, inner_model_functional, inner_model_sequential, inner_model_subclassed, ] model = test_utils.get_model_from_layers( layers, input_shape=input_shape ) # Run data through the Model to create save spec and weights. model.predict(np.ones((10, 2, 3)), batch_size=10) # Test that the correct checkpointed values are loaded, whether the # layer is created from the config or SavedModel. layer_with_config.c.assign(2 * layer_with_config.c) layer_without_config.c.assign(3 * layer_without_config.c) model.save(self.path, save_format="tf") revived = keras_load.load(self.path) self._assert_revived_correctness(model, revived) class TestModelRevive(ReviveTestBase): def test_revive_subclassed_with_nested_model(self): model = SubclassedModelNoConfig(1.0, 2.0) # Run data through the Model to create save spec and weights. model.predict(np.ones((10, 2, 3)), batch_size=10) model.save(self.path, save_format="tf") revived = keras_load.load(self.path) self._assert_revived_correctness(model, revived) def test_revive_subclassed_with_sparse_model(self): model = SubclassedSparseModelNoConfig(1.0, 2.0) # Run data through the Model to create save spec and weights. x = tf.sparse.from_dense(np.ones((10, 2, 3), dtype=np.float32)) model.predict(x, batch_size=10) model.save(self.path, save_format="tf") revived = keras_load.load(self.path) self._assert_revived_correctness(model, revived) def test_revive_unregistered_sequential(self): model = UnregisteredCustomSequentialModel() x = np.random.random((2, 2, 3)).astype(np.float32) model(x) model.save(self.path, save_format="tf") revived = keras_load.load(self.path) self._assert_revived_correctness(model, revived) def test_revive_sequential_inputs(self): model = keras.models.Sequential( [ keras.Input((None,), dtype=tf.string), keras.layers.Lambda(tf.strings.lower), ] ) model.save(self.path, save_format="tf") revived = keras_load.load(self.path) revived_layers = list( revived._flatten_layers(include_self=False, recursive=False) ) self.assertEqual(tf.string, revived_layers[0].dtype) @parameterized.named_parameters( ("default_config", CustomNetworkDefaultConfig), ("with_config", CustomNetworkWithConfig), ("with_config_name", CustomNetworkWithConfigName), ) def test_revive_network(self, model_cls): model = model_cls(8) model.save(self.path, include_optimizer=False, save_format="tf") revived = keras_load.load(self.path, compile=False) self._assert_revived_correctness(model, revived) def test_functional_subclass(self): model = FunctionalSubclassModel(32) model.save(self.path, save_format="tf") revived = keras_load.load(self.path, compile=False) self._assert_revived_correctness(model, revived) def test_functional_subclass_wrong_config(self): model = FunctionalSubclassModelWrongConfig(32) model.save(self.path, save_format="tf") with self.assertRaisesRegex(TypeError, "required positional arguments"): keras_load.load(self.path, compile=False) def test_load_compiled_metrics(self): model = test_utils.get_small_sequential_mlp(1, 3) # Compile with dense categorical accuracy model.compile("rmsprop", "mse", "acc") x = np.random.random((5, 10)).astype(np.float32) y_true = np.random.random((5, 3)).astype(np.float32) model.train_on_batch(x, y_true) model.save(self.path, include_optimizer=True, save_format="tf") revived = keras_load.load(self.path, compile=True) self.assertAllClose( model.test_on_batch(x, y_true), revived.test_on_batch(x, y_true) ) # Compile with sparse categorical accuracy model.compile("rmsprop", "mse", "acc") y_true = np.random.randint(0, 3, (5, 1)).astype(np.float32) model.train_on_batch(x, y_true) model.save(self.path, include_optimizer=True, save_format="tf") revived = keras_load.load(self.path, compile=True) self.assertAllClose( model.test_on_batch(x, y_true), revived.test_on_batch(x, y_true) ) def test_revived_model_has_save_spec(self): model = SubclassedModelWithConfig(2, 3) model.predict(np.random.random((5, 10)).astype(np.float32)) model.save(self.path, save_format="tf") revived = keras_load.load(self.path, compile=True) self.assertAllEqual( model._get_save_spec(dynamic_batch=False), revived._get_save_spec(dynamic_batch=False), ) def test_load_model_with_name_conflict_registered_works(self): model = WideDeepModel(2, 3) model(np.random.random((5, 10)).astype(np.float32)) model.save(self.path, save_format="tf") keras_load.load(self.path, compile=True) if __name__ == "__main__": tf.compat.v1.enable_eager_execution() with CustomObjectScope( { "CustomLayerWithConfig": CustomLayerWithConfig, "CustomNetworkWithConfig": CustomNetworkWithConfig, "CustomNetworkWithConfigName": CustomNetworkWithConfigName, "SubclassedModelWithConfig": SubclassedModelWithConfig, "FunctionalSubclassModel": FunctionalSubclassModel, "FunctionalSubclassModelWrongConfig": FunctionalSubclassModelWrongConfig, # noqa: E501 "WideDeepModel": WideDeepModel, } ): tf.test.main()
tf-keras/tf_keras/saving/legacy/saved_model/revive_test.py/0
{ "file_path": "tf-keras/tf_keras/saving/legacy/saved_model/revive_test.py", "repo_id": "tf-keras", "token_count": 7682 }
192
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Object config serialization and deserialization logic.""" import importlib import inspect import threading import types import warnings import numpy as np import tensorflow.compat.v2 as tf from tf_keras.saving import object_registration from tf_keras.saving.legacy import serialization as legacy_serialization from tf_keras.saving.legacy.saved_model.utils import in_tf_saved_model_scope from tf_keras.utils import generic_utils # isort: off from tensorflow.python.util import tf_export from tensorflow.python.util.tf_export import keras_export PLAIN_TYPES = (str, int, float, bool) SHARED_OBJECTS = threading.local() SAFE_MODE = threading.local() # TODO(nkovela): Debug serialization of decorated functions inside lambdas # to allow for serialization of custom_gradient. NON_SERIALIZABLE_CLASS_MODULES = ("tensorflow.python.ops.custom_gradient",) # List of TF-Keras modules with built-in string representations for defaults BUILTIN_MODULES = ( "activations", "constraints", "initializers", "losses", "metrics", "optimizers", "regularizers", ) class Config: def __init__(self, **config): self.config = config def serialize(self): return serialize_keras_object(self.config) class SafeModeScope: """Scope to propagate safe mode flag to nested deserialization calls.""" def __init__(self, safe_mode=True): self.safe_mode = safe_mode def __enter__(self): self.original_value = in_safe_mode() SAFE_MODE.safe_mode = self.safe_mode def __exit__(self, *args, **kwargs): SAFE_MODE.safe_mode = self.original_value @keras_export("keras.__internal__.enable_unsafe_deserialization") def enable_unsafe_deserialization(): """Disables safe mode globally, allowing deserialization of lambdas.""" SAFE_MODE.safe_mode = False def in_safe_mode(): return getattr(SAFE_MODE, "safe_mode", None) class ObjectSharingScope: """Scope to enable detection and reuse of previously seen objects.""" def __enter__(self): SHARED_OBJECTS.enabled = True SHARED_OBJECTS.id_to_obj_map = {} SHARED_OBJECTS.id_to_config_map = {} def __exit__(self, *args, **kwargs): SHARED_OBJECTS.enabled = False SHARED_OBJECTS.id_to_obj_map = {} SHARED_OBJECTS.id_to_config_map = {} def get_shared_object(obj_id): """Retrieve an object previously seen during deserialization.""" if getattr(SHARED_OBJECTS, "enabled", False): return SHARED_OBJECTS.id_to_obj_map.get(obj_id, None) def record_object_after_serialization(obj, config): """Call after serializing an object, to keep track of its config.""" if config["module"] == "__main__": config["module"] = None # Ensures module is None when no module found if not getattr(SHARED_OBJECTS, "enabled", False): return # Not in a sharing scope obj_id = int(id(obj)) if obj_id not in SHARED_OBJECTS.id_to_config_map: SHARED_OBJECTS.id_to_config_map[obj_id] = config else: config["shared_object_id"] = obj_id prev_config = SHARED_OBJECTS.id_to_config_map[obj_id] prev_config["shared_object_id"] = obj_id def record_object_after_deserialization(obj, obj_id): """Call after deserializing an object, to keep track of it in the future.""" if not getattr(SHARED_OBJECTS, "enabled", False): return # Not in a sharing scope SHARED_OBJECTS.id_to_obj_map[obj_id] = obj @keras_export( "keras.saving.serialize_keras_object", "keras.utils.serialize_keras_object" ) def serialize_keras_object(obj): """Retrieve the config dict by serializing the TF-Keras object. `serialize_keras_object()` serializes a TF-Keras object to a python dictionary that represents the object, and is a reciprocal function of `deserialize_keras_object()`. See `deserialize_keras_object()` for more information about the config format. Args: obj: the TF-Keras object to serialize. Returns: A python dict that represents the object. The python dict can be deserialized via `deserialize_keras_object()`. """ # Fall back to legacy serialization for all TF1 users or if # wrapped by in_tf_saved_model_scope() to explicitly use legacy # saved_model logic. if not tf.__internal__.tf2.enabled() or in_tf_saved_model_scope(): return legacy_serialization.serialize_keras_object(obj) if obj is None: return obj if isinstance(obj, PLAIN_TYPES): return obj if isinstance(obj, (list, tuple)): config_arr = [serialize_keras_object(x) for x in obj] return tuple(config_arr) if isinstance(obj, tuple) else config_arr if isinstance(obj, dict): return serialize_dict(obj) # Special cases: if isinstance(obj, bytes): return { "class_name": "__bytes__", "config": {"value": obj.decode("utf-8")}, } if isinstance(obj, tf.TensorShape): return obj.as_list() if obj._dims is not None else None if isinstance(obj, tf.Tensor): return { "class_name": "__tensor__", "config": { "value": obj.numpy().tolist(), "dtype": obj.dtype.name, }, } if type(obj).__module__ == np.__name__: if isinstance(obj, np.ndarray) and obj.ndim > 0: return { "class_name": "__numpy__", "config": { "value": obj.tolist(), "dtype": obj.dtype.name, }, } else: # Treat numpy floats / etc as plain types. return obj.item() if isinstance(obj, tf.DType): return obj.name if isinstance(obj, tf.compat.v1.Dimension): return obj.value if isinstance(obj, types.FunctionType) and obj.__name__ == "<lambda>": warnings.warn( "The object being serialized includes a `lambda`. This is unsafe. " "In order to reload the object, you will have to pass " "`safe_mode=False` to the loading function. " "Please avoid using `lambda` in the " "future, and use named Python functions instead. " f"This is the `lambda` being serialized: {inspect.getsource(obj)}", stacklevel=2, ) return { "class_name": "__lambda__", "config": { "value": generic_utils.func_dump(obj), }, } if isinstance(obj, tf.TypeSpec): ts_config = obj._serialize() # TensorShape and tf.DType conversion ts_config = list( map( lambda x: x.as_list() if isinstance(x, tf.TensorShape) else (x.name if isinstance(x, tf.DType) else x), ts_config, ) ) spec_name = obj.__class__.__name__ registered_name = None if hasattr(obj, "_tf_extension_type_fields"): # Special casing for ExtensionType ts_config = tf.experimental.extension_type.as_dict(obj) ts_config = serialize_dict(ts_config) registered_name = object_registration.get_registered_name( obj.__class__ ) return { "class_name": "__typespec__", "spec_name": spec_name, "module": obj.__class__.__module__, "config": ts_config, "registered_name": registered_name, } inner_config = _get_class_or_fn_config(obj) config_with_public_class = serialize_with_public_class( obj.__class__, inner_config ) # TODO(nkovela): Add TF ops dispatch handler serialization for # ops.EagerTensor that contains nested numpy array. # Target: NetworkConstructionTest.test_constant_initializer_with_numpy if isinstance(inner_config, str) and inner_config == "op_dispatch_handler": return obj if config_with_public_class is not None: # Special case for non-serializable class modules if any( mod in config_with_public_class["module"] for mod in NON_SERIALIZABLE_CLASS_MODULES ): return obj get_build_and_compile_config(obj, config_with_public_class) record_object_after_serialization(obj, config_with_public_class) return config_with_public_class # Any custom object or otherwise non-exported object if isinstance(obj, types.FunctionType): module = obj.__module__ else: module = obj.__class__.__module__ class_name = obj.__class__.__name__ if module == "builtins": registered_name = None else: if isinstance(obj, types.FunctionType): registered_name = object_registration.get_registered_name(obj) else: registered_name = object_registration.get_registered_name( obj.__class__ ) config = { "module": module, "class_name": class_name, "config": inner_config, "registered_name": registered_name, } get_build_and_compile_config(obj, config) record_object_after_serialization(obj, config) return config def get_build_and_compile_config(obj, config): if hasattr(obj, "get_build_config"): build_config = obj.get_build_config() if build_config is not None: config["build_config"] = serialize_dict(build_config) if hasattr(obj, "get_compile_config"): compile_config = obj.get_compile_config() if compile_config is not None: config["compile_config"] = serialize_dict(compile_config) return def serialize_with_public_class(cls, inner_config=None): """Serializes classes from public TF-Keras API or object registration. Called to check and retrieve the config of any class that has a public TF-Keras API or has been registered as serializable via `keras.saving.register_keras_serializable()`. """ # This gets the `keras.*` exported name, such as "keras.optimizers.Adam". keras_api_name = tf_export.get_canonical_name_for_symbol( cls, api_name="keras" ) # Case of custom or unknown class object if keras_api_name is None: registered_name = object_registration.get_registered_name(cls) if registered_name is None: return None # Return custom object config with corresponding registration name return { "module": cls.__module__, "class_name": cls.__name__, "config": inner_config, "registered_name": registered_name, } # Split canonical TF-Keras API name into a TF-Keras module and class name. parts = keras_api_name.split(".") return { "module": ".".join(parts[:-1]), "class_name": parts[-1], "config": inner_config, "registered_name": None, } def serialize_with_public_fn(fn, config, fn_module_name=None): """Serializes functions from public TF-Keras API or object registration. Called to check and retrieve the config of any function that has a public TF-Keras API or has been registered as serializable via `keras.saving.register_keras_serializable()`. If function's module name is already known, returns corresponding config. """ if fn_module_name: return { "module": fn_module_name, "class_name": "function", "config": config, "registered_name": config, } keras_api_name = tf_export.get_canonical_name_for_symbol( fn, api_name="keras" ) if keras_api_name: parts = keras_api_name.split(".") return { "module": ".".join(parts[:-1]), "class_name": "function", "config": config, "registered_name": config, } else: registered_name = object_registration.get_registered_name(fn) if not registered_name and not fn.__module__ == "builtins": return None return { "module": fn.__module__, "class_name": "function", "config": config, "registered_name": registered_name, } def _get_class_or_fn_config(obj): """Return the object's config depending on its type.""" # Functions / lambdas: if isinstance(obj, types.FunctionType): return obj.__name__ # All classes: if hasattr(obj, "get_config"): config = obj.get_config() if not isinstance(config, dict): raise TypeError( f"The `get_config()` method of {obj} should return " f"a dict. It returned: {config}" ) return serialize_dict(config) elif hasattr(obj, "__name__"): return object_registration.get_registered_name(obj) else: raise TypeError( f"Cannot serialize object {obj} of type {type(obj)}. " "To be serializable, " "a class must implement the `get_config()` method." ) def serialize_dict(obj): return {key: serialize_keras_object(value) for key, value in obj.items()} @keras_export( "keras.saving.deserialize_keras_object", "keras.utils.deserialize_keras_object", ) def deserialize_keras_object( config, custom_objects=None, safe_mode=True, **kwargs ): """Retrieve the object by deserializing the config dict. The config dict is a Python dictionary that consists of a set of key-value pairs, and represents a TF-Keras object, such as an `Optimizer`, `Layer`, `Metrics`, etc. The saving and loading library uses the following keys to record information of a TF-Keras object: - `class_name`: String. This is the name of the class, as exactly defined in the source code, such as "LossesContainer". - `config`: Dict. Library-defined or user-defined key-value pairs that store the configuration of the object, as obtained by `object.get_config()`. - `module`: String. The path of the python module, such as "keras.engine.compile_utils". Built-in TF-Keras classes expect to have prefix `keras`. - `registered_name`: String. The key the class is registered under via `keras.saving.register_keras_serializable(package, name)` API. The key has the format of '{package}>{name}', where `package` and `name` are the arguments passed to `register_keras_serializable()`. If `name` is not provided, it uses the class name. If `registered_name` successfully resolves to a class (that was registered), the `class_name` and `config` values in the dict will not be used. `registered_name` is only used for non-built-in classes. For example, the following dictionary represents the built-in Adam optimizer with the relevant config: ```python dict_structure = { "class_name": "Adam", "config": { "amsgrad": false, "beta_1": 0.8999999761581421, "beta_2": 0.9990000128746033, "decay": 0.0, "epsilon": 1e-07, "learning_rate": 0.0010000000474974513, "name": "Adam" }, "module": "keras.optimizers", "registered_name": None } # Returns an `Adam` instance identical to the original one. deserialize_keras_object(dict_structure) ``` If the class does not have an exported TF-Keras namespace, the library tracks it by its `module` and `class_name`. For example: ```python dict_structure = { "class_name": "LossesContainer", "config": { "losses": [...], "total_loss_mean": {...}, }, "module": "keras.engine.compile_utils", "registered_name": "LossesContainer" } # Returns a `LossesContainer` instance identical to the original one. deserialize_keras_object(dict_structure) ``` And the following dictionary represents a user-customized `MeanSquaredError` loss: ```python @keras.saving.register_keras_serializable(package='my_package') class ModifiedMeanSquaredError(keras.losses.MeanSquaredError): ... dict_structure = { "class_name": "ModifiedMeanSquaredError", "config": { "fn": "mean_squared_error", "name": "mean_squared_error", "reduction": "auto" }, "registered_name": "my_package>ModifiedMeanSquaredError" } # Returns the `ModifiedMeanSquaredError` object deserialize_keras_object(dict_structure) ``` Args: config: Python dict describing the object. custom_objects: Python dict containing a mapping between custom object names the corresponding classes or functions. safe_mode: Boolean, whether to disallow unsafe `lambda` deserialization. When `safe_mode=False`, loading an object has the potential to trigger arbitrary code execution. This argument is only applicable to the TF-Keras v3 model format. Defaults to `True`. Returns: The object described by the `config` dictionary. """ safe_scope_arg = in_safe_mode() # Enforces SafeModeScope safe_mode = safe_scope_arg if safe_scope_arg is not None else safe_mode module_objects = kwargs.pop("module_objects", None) custom_objects = custom_objects or {} tlco = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__ gco = object_registration._GLOBAL_CUSTOM_OBJECTS custom_objects = {**custom_objects, **tlco, **gco} # Optional deprecated argument for legacy deserialization call printable_module_name = kwargs.pop("printable_module_name", "object") if kwargs: raise ValueError( "The following argument(s) are not supported: " f"{list(kwargs.keys())}" ) # Fall back to legacy deserialization for all TF1 users or if # wrapped by in_tf_saved_model_scope() to explicitly use legacy # saved_model logic. if not tf.__internal__.tf2.enabled() or in_tf_saved_model_scope(): return legacy_serialization.deserialize_keras_object( config, module_objects, custom_objects, printable_module_name ) if config is None: return None if ( isinstance(config, str) and custom_objects and custom_objects.get(config) is not None ): # This is to deserialize plain functions which are serialized as # string names by legacy saving formats. return custom_objects[config] if isinstance(config, (list, tuple)): return [ deserialize_keras_object( x, custom_objects=custom_objects, safe_mode=safe_mode ) for x in config ] if module_objects is not None: inner_config, fn_module_name, has_custom_object = None, None, False if isinstance(config, dict): if "config" in config: inner_config = config["config"] if "class_name" not in config: raise ValueError( f"Unknown `config` as a `dict`, config={config}" ) # Check case where config is function or class and in custom objects if custom_objects and ( config["class_name"] in custom_objects or config.get("registered_name") in custom_objects or ( isinstance(inner_config, str) and inner_config in custom_objects ) ): has_custom_object = True # Case where config is function but not in custom objects elif config["class_name"] == "function": fn_module_name = config["module"] if fn_module_name == "builtins": config = config["config"] else: config = config["registered_name"] # Case where config is class but not in custom objects else: if config.get("module", "_") is None: raise TypeError( "Cannot deserialize object of type " f"`{config['class_name']}`. If " f"`{config['class_name']}` is a custom class, please " "register it using the " "`@keras.saving.register_keras_serializable()` " "decorator." ) config = config["class_name"] if not has_custom_object: # Return if not found in either module objects or custom objects if config not in module_objects: # Object has already been deserialized return config if isinstance(module_objects[config], types.FunctionType): return deserialize_keras_object( serialize_with_public_fn( module_objects[config], config, fn_module_name ), custom_objects=custom_objects, ) return deserialize_keras_object( serialize_with_public_class( module_objects[config], inner_config=inner_config ), custom_objects=custom_objects, ) if isinstance(config, PLAIN_TYPES): return config if not isinstance(config, dict): raise TypeError(f"Could not parse config: {config}") if "class_name" not in config or "config" not in config: return { key: deserialize_keras_object( value, custom_objects=custom_objects, safe_mode=safe_mode ) for key, value in config.items() } class_name = config["class_name"] inner_config = config["config"] or {} custom_objects = custom_objects or {} # Special cases: if class_name == "__tensor__": return tf.constant(inner_config["value"], dtype=inner_config["dtype"]) if class_name == "__numpy__": return np.array(inner_config["value"], dtype=inner_config["dtype"]) if config["class_name"] == "__bytes__": return inner_config["value"].encode("utf-8") if config["class_name"] == "__lambda__": if safe_mode: raise ValueError( "Requested the deserialization of a `lambda` object. " "This carries a potential risk of arbitrary code execution " "and thus it is disallowed by default. If you trust the " "source of the saved model, you can pass `safe_mode=False` to " "the loading function in order to allow `lambda` loading." ) return generic_utils.func_load(inner_config["value"]) if config["class_name"] == "__typespec__": cls = _retrieve_class_or_fn( config["spec_name"], config["registered_name"], config["module"], obj_type="class", full_config=config, custom_objects=custom_objects, ) # Special casing for ExtensionType.Spec if hasattr(cls, "_tf_extension_type_fields"): inner_config = { key: deserialize_keras_object( value, custom_objects=custom_objects, safe_mode=safe_mode ) for key, value in inner_config.items() } # Deserialization of dict created by ExtensionType.as_dict() return cls(**inner_config) # Instantiate ExtensionType.Spec if config["registered_name"] is not None: return cls.from_config(inner_config) # Conversion to TensorShape and tf.DType inner_config = map( lambda x: tf.TensorShape(x) if isinstance(x, list) else (getattr(tf, x) if hasattr(tf.dtypes, str(x)) else x), inner_config, ) return cls._deserialize(tuple(inner_config)) # Below: classes and functions. module = config.get("module", None) registered_name = config.get("registered_name", class_name) if class_name == "function": fn_name = inner_config return _retrieve_class_or_fn( fn_name, registered_name, module, obj_type="function", full_config=config, custom_objects=custom_objects, ) # Below, handling of all classes. # First, is it a shared object? if "shared_object_id" in config: obj = get_shared_object(config["shared_object_id"]) if obj is not None: return obj cls = _retrieve_class_or_fn( class_name, registered_name, module, obj_type="class", full_config=config, custom_objects=custom_objects, ) if isinstance(cls, types.FunctionType): return cls if not hasattr(cls, "from_config"): raise TypeError( f"Unable to reconstruct an instance of '{class_name}' because " f"the class is missing a `from_config()` method. " f"Full object config: {config}" ) # Instantiate the class from its config inside a custom object scope # so that we can catch any custom objects that the config refers to. custom_obj_scope = object_registration.custom_object_scope(custom_objects) safe_mode_scope = SafeModeScope(safe_mode) with custom_obj_scope, safe_mode_scope: instance = cls.from_config(inner_config) build_config = config.get("build_config", None) if build_config: instance.build_from_config(build_config) compile_config = config.get("compile_config", None) if compile_config: instance.compile_from_config(compile_config) if "shared_object_id" in config: record_object_after_deserialization( instance, config["shared_object_id"] ) return instance def _retrieve_class_or_fn( name, registered_name, module, obj_type, full_config, custom_objects=None ): # If there is a custom object registered via # `register_keras_serializable()`, that takes precedence. if obj_type == "function": custom_obj = object_registration.get_registered_object( name, custom_objects=custom_objects ) else: custom_obj = object_registration.get_registered_object( registered_name, custom_objects=custom_objects ) if custom_obj is not None: return custom_obj if module: # If it's a TF-Keras built-in object, # we cannot always use direct import, because the exported # module name might not match the package structure # (e.g. experimental symbols). if ( module == "tf_keras" or module == "keras" or module.startswith("keras.") or module.startswith("tf_keras.") ): api_name = module + "." + name # Legacy internal APIs are stored in TF API naming dict # with `compat.v1` prefix if "__internal__.legacy" in api_name: api_name = "compat.v1." + api_name obj = tf_export.get_symbol_from_name(api_name) if obj is not None: return obj # Configs of TF-Keras built-in functions do not contain identifying # information other than their name (e.g. 'acc' or 'tanh'). This special # case searches the TF-Keras modules that contain built-ins to retrieve # the corresponding function from the identifying string. if obj_type == "function" and module == "builtins": for mod in BUILTIN_MODULES: obj = tf_export.get_symbol_from_name( "keras." + mod + "." + name ) if obj is not None: return obj # Retrieval of registered custom function in a package filtered_dict = { k: v for k, v in custom_objects.items() if k.endswith(full_config["config"]) } if filtered_dict: return next(iter(filtered_dict.values())) # Otherwise, attempt to retrieve the class object given the `module` # and `class_name`. Import the module, find the class. try: # Change module name from `keras.` to `tf_keras.` if module.startswith("keras"): module = "tf_" + module mod = importlib.import_module(module) except ModuleNotFoundError: raise TypeError( f"Could not deserialize {obj_type} '{name}' because " f"its parent module {module} cannot be imported. " f"Full object config: {full_config}" ) obj = vars(mod).get(name, None) if obj is None: # Special case for keras.metrics.metrics if registered_name is not None: obj = vars(mod).get(registered_name, None) # Support for `__qualname__` if name.count(".") == 1: outer_name, inner_name = name.split(".") outer_obj = vars(mod).get(outer_name, None) obj = ( getattr(outer_obj, inner_name, None) if outer_obj is not None else None ) if obj is not None: return obj raise TypeError( f"Could not locate {obj_type} '{name}'. " "Make sure custom classes are decorated with " "`@keras.saving.register_keras_serializable()`. " f"Full object config: {full_config}" )
tf-keras/tf_keras/saving/serialization_lib.py/0
{ "file_path": "tf-keras/tf_keras/saving/serialization_lib.py", "repo_id": "tf-keras", "token_count": 13172 }
193
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ,============================================================================ """Tests for `get_config` backwards compatibility.""" import tensorflow.compat.v2 as tf from tf_keras.engine import sequential from tf_keras.engine import training from tf_keras.testing_infra import test_combinations from tf_keras.tests import get_config_samples @test_combinations.run_all_keras_modes class TestGetConfigBackwardsCompatible(test_combinations.TestCase): def test_functional_dnn(self): model = training.Model.from_config(get_config_samples.FUNCTIONAL_DNN) self.assertLen(model.layers, 3) def test_functional_cnn(self): model = training.Model.from_config(get_config_samples.FUNCTIONAL_CNN) self.assertLen(model.layers, 4) def test_functional_lstm(self): model = training.Model.from_config(get_config_samples.FUNCTIONAL_LSTM) self.assertLen(model.layers, 3) def test_sequential_dnn(self): model = sequential.Sequential.from_config( get_config_samples.SEQUENTIAL_DNN ) self.assertLen(model.layers, 2) def test_sequential_cnn(self): model = sequential.Sequential.from_config( get_config_samples.SEQUENTIAL_CNN ) self.assertLen(model.layers, 3) def test_sequential_lstm(self): model = sequential.Sequential.from_config( get_config_samples.SEQUENTIAL_LSTM ) self.assertLen(model.layers, 2) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/tests/get_config_test.py/0
{ "file_path": "tf-keras/tf_keras/tests/get_config_test.py", "repo_id": "tf-keras", "token_count": 768 }
194
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import functools import os import weakref import tensorflow.compat.v2 as tf from tf_keras.engine import input_layer from tf_keras.engine import sequential from tf_keras.engine import training from tf_keras.layers import core from tf_keras.layers import reshaping from tf_keras.optimizers.legacy import adam from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils # isort: off from tensorflow.python.checkpoint import ( checkpoint as trackable_utils, ) from tensorflow.python.eager import context from tensorflow.python.framework import ( test_util as tf_test_utils, ) from tensorflow.python.platform import tf_logging as logging class MyModel(training.Model): """A concrete Model for testing.""" def __init__(self): super().__init__() self._named_dense = core.Dense(1, use_bias=True) self._second = core.Dense(1, use_bias=False) # We can still track Trackables which aren't Layers. self._non_layer = NonLayerTrackable() def call(self, values): ret = self._second(self._named_dense(values)) return ret class NonLayerTrackable(tf.Module): def __init__(self): super().__init__() self.a_variable = trackable_utils.add_variable( self, name="a_variable", shape=[] ) class InterfaceTests(tf.test.TestCase): def testLayerDeduplication(self): model = training.Model() layer_one = core.Dense(1) layer_two = core.Dense(1) model.other_path = [layer_one, layer_two] model.l2 = layer_two model.l1 = layer_one self.assertEqual([layer_one, layer_two], model.layers) def testSaveWithOnlyKerasSession(self): with tf.Graph().as_default(), self.cached_session(): inp = input_layer.Input([1]) dense = core.Dense(1)(inp) model = training.Model(inp, dense) model.compile(optimizer="sgd", loss="mse") model.fit([1.0], [2.0]) checkpoint = tf.train.Checkpoint(model=model) checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt")) class CheckpointingTests(test_combinations.TestCase): @tf_test_utils.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testNamingWithOptimizer(self): input_value = tf.constant([[3.0]]) model = MyModel() # A nuisance Model using the same optimizer. Its slot variables should # not go in the checkpoint, since it is never depended on. other_model = MyModel() optimizer = adam.Adam(0.001) step = tf.compat.v1.train.get_or_create_global_step() root_trackable = tf.train.Checkpoint( optimizer=optimizer, model=model, step=step ) with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) train_op = tf.group( optimizer.apply_gradients(zip(gradients, variables)), step.assign_add(1), ) with tf.GradientTape() as tape: loss = other_model(input_value) variables = other_model.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) self.evaluate(trackable_utils.gather_initializers(root_trackable)) self.evaluate(train_op) ( named_variables, serialized_graph, _, ) = tf.__internal__.tracking.ObjectGraphView( root_trackable ).serialize_object_graph() expected_slot_keys = ( "model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m", "model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v", "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m", "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v", "model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m", "model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v", ) expected_checkpoint_names = ( # Created in the root node, so no prefix. "step", "model/_second/kernel", "model/_named_dense/kernel", "model/_named_dense/bias", # non-Layer dependency of the model "model/_non_layer/a_variable", "optimizer/learning_rate", "optimizer/beta_1", "optimizer/beta_2", "optimizer/iter", "optimizer/decay", ) + expected_slot_keys suffix = "/.ATTRIBUTES/VARIABLE_VALUE" expected_checkpoint_names = [ name + suffix for name in expected_checkpoint_names ] named_variables = {v.name: v for v in named_variables} self.assertEqual( len(expected_checkpoint_names), len(named_variables.keys()) ) # Check that we've created the right full_names of objects (not # exhaustive) expected_names = { "step" + suffix: "global_step", "model/_second/kernel" + suffix: "my_model/dense_1/kernel", "model/_named_dense/kernel" + suffix: "my_model/dense/kernel", "optimizer/beta_1" + suffix: "Adam/beta_1", "optimizer/beta_2" + suffix: "Adam/beta_2", } for nodes in serialized_graph.nodes: for attribute in nodes.attributes: expected_name = expected_names.pop( attribute.checkpoint_key, None ) if expected_name is not None: self.assertEqual(expected_name, attribute.full_name) self.assertEmpty(expected_names) # Spot check the generated protocol buffers. self.assertEqual( "optimizer", serialized_graph.nodes[0].children[1].local_name ) optimizer_node = serialized_graph.nodes[ serialized_graph.nodes[0].children[1].node_id ] children = [node.local_name for node in optimizer_node.children] self.assertEqual( # hyper variable dependencies len(["beta_1", "beta_2", "iter", "decay", "learning_rate"]), len(children), ) serialized_slot_keys = [] for slot in optimizer_node.slot_variables: for attribute in serialized_graph.nodes[ slot.slot_variable_node_id ].attributes: serialized_slot_keys.append(attribute.checkpoint_key) self.assertEqual( len([key + suffix for key in expected_slot_keys]), len(serialized_slot_keys), ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def testSaveRestore(self): with self.test_session(): model = MyModel() optimizer = adam.Adam(0.001) root_trackable = tf.train.Checkpoint( optimizer=optimizer, model=model ) input_value = tf.constant([[3.0]]) with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) train_op = optimizer.apply_gradients(zip(gradients, variables)) self.assertFalse(root_trackable.save_counter.trainable) self.evaluate(trackable_utils.gather_initializers(root_trackable)) self.evaluate(train_op) prefix = os.path.join(self.get_temp_dir(), "ckpt") self.evaluate( tf.compat.v1.assign(model._named_dense.variables[1], [42.0]) ) m_bias_slot = optimizer.get_slot( model._named_dense.variables[1], "m" ) self.evaluate(tf.compat.v1.assign(m_bias_slot, [1.5])) save_path = root_trackable.save(file_prefix=prefix) self.evaluate( tf.compat.v1.assign(model._named_dense.variables[1], [43.0]) ) self.evaluate(tf.compat.v1.assign(root_trackable.save_counter, 3)) optimizer_variables = self.evaluate( sorted(optimizer.variables(), key=lambda v: v.name) ) self.evaluate(tf.compat.v1.assign(m_bias_slot, [-2.0])) # Immediate restoration status = root_trackable.restore( save_path=save_path ).assert_consumed() status.run_restore_ops() self.assertAllEqual( [42.0], self.evaluate(model._named_dense.variables[1]) ) self.assertAllEqual(1, self.evaluate(root_trackable.save_counter)) self.assertAllEqual([1.5], self.evaluate(m_bias_slot)) if not tf.executing_eagerly(): # Restore-on-create is only supported when executing eagerly return on_create_model = MyModel() on_create_optimizer = adam.Adam(0.001) on_create_root = tf.train.Checkpoint( optimizer=on_create_optimizer, model=on_create_model ) # Deferred restoration status = on_create_root.restore(save_path=save_path) status.assert_nontrivial_match() status.assert_existing_objects_matched() with self.assertRaises(AssertionError): status.assert_consumed() on_create_model(tf.constant([[3.0]])) # create variables self.assertAllEqual(1, self.evaluate(on_create_root.save_counter)) self.assertAllEqual( [42.0], self.evaluate(on_create_model._named_dense.variables[1]) ) on_create_m_bias_slot = on_create_optimizer.get_slot( on_create_model._named_dense.variables[1], "m" ) status.assert_existing_objects_matched() if not tf.executing_eagerly(): with self.assertRaises(AssertionError): status.assert_consumed() # Optimizer slot variables are created when the original variable is # restored. self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot)) dummy_var = tf.Variable([1.0]) on_create_optimizer.minimize( loss=dummy_var.read_value, var_list=[dummy_var] ) status.assert_existing_objects_matched() status.assert_consumed() self.assertAllEqual( optimizer_variables, # Creation order is different, so .variables() needs to be # re-sorted. self.evaluate( sorted(optimizer.variables(), key=lambda v: v.name) ), ) # TODO(allenl): Debug garbage created by this test in python3. def testDeferredRestorationUsageEager(self): """An idiomatic eager execution example.""" num_training_steps = 10 checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") for training_continuation in range(3): model = MyModel() optimizer = adam.Adam(0.001) root = tf.train.Checkpoint(optimizer=optimizer, model=model) root.restore(tf.train.latest_checkpoint(checkpoint_directory)) for _ in range(num_training_steps): # TODO(allenl): Use a Dataset and serialize/checkpoint it. input_value = tf.constant([[3.0]]) with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) root.save(file_prefix=checkpoint_prefix) self.assertEqual( (training_continuation + 1) * num_training_steps, root.optimizer.iterations.numpy(), ) def testUsageGraph(self): """Expected usage when graph building.""" with context.graph_mode(): num_training_steps = 10 checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") for training_continuation in range(3): with tf.Graph().as_default(): model = MyModel() optimizer = adam.Adam(0.001) root = tf.compat.v1.train.Checkpoint( optimizer=optimizer, model=model ) input_value = tf.constant([[3.0]]) with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) train_op = optimizer.apply_gradients( zip(gradients, variables) ) checkpoint_path = tf.train.latest_checkpoint( checkpoint_directory ) with self.session( graph=tf.compat.v1.get_default_graph() ) as session: status = root.restore(save_path=checkpoint_path) status.initialize_or_restore(session=session) if checkpoint_path is None: self.assertEqual(0, training_continuation) with self.assertRaises(AssertionError): status.assert_consumed() with self.assertRaises(AssertionError): status.assert_existing_objects_matched() else: status.assert_consumed() status.assert_existing_objects_matched() for _ in range(num_training_steps): session.run(train_op) root.save( file_prefix=checkpoint_prefix, session=session ) self.assertEqual( (training_continuation + 1) * num_training_steps, session.run(root.optimizer.iterations), ) self.assertEqual( training_continuation + 1, session.run(root.save_counter), ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def testAgnosticUsage(self): """Graph/eager agnostic usage.""" # Does create garbage when executing eagerly due to ops.Graph() # creation. with self.test_session(): num_training_steps = 10 checkpoint_directory = self.get_temp_dir() optimizer = adam.Adam(0.001) def _train_fn(model, input_value): with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) return optimizer.apply_gradients(zip(gradients, variables)) for training_continuation in range(3): with test_utils.device(should_use_gpu=True): model = MyModel() root = tf.train.Checkpoint(optimizer=optimizer, model=model) manager = tf.train.CheckpointManager( root, checkpoint_directory, max_to_keep=1 ) status = root.restore(save_path=manager.latest_checkpoint) input_value = tf.constant([[3.0]]) train_fn = functools.partial(_train_fn, model, input_value) if not tf.executing_eagerly(): train_fn = functools.partial(self.evaluate, train_fn()) status.initialize_or_restore() for _ in range(num_training_steps): train_fn() manager.save() self.assertEqual( (training_continuation + 1) * num_training_steps, self.evaluate(root.optimizer.iterations), ) self.assertEqual( training_continuation + 1, self.evaluate(root.save_counter), ) @test_combinations.generate(test_combinations.combine(mode=["eager"])) def testPartialRestoreWarningObject(self): optimizer = adam.Adam(0.0) original_root = tf.train.Checkpoint( v1=tf.Variable(2.0), v2=tf.Variable(3.0), optimizer=optimizer ) # Create a slot variable to save optimizer.minimize(original_root.v1.read_value, [original_root.v1]) prefix = os.path.join(self.get_temp_dir(), "ckpt") save_path = original_root.save(prefix) partial_root = tf.train.Checkpoint(v1=tf.Variable(0.0)) weak_partial_root = weakref.ref(partial_root) weak_v1 = weakref.ref(partial_root.v1) partial_root.restore(save_path) self.assertEqual(2.0, partial_root.v1.numpy()) with tf.compat.v1.test.mock.patch.object( logging, "warning" ) as mock_log: del partial_root self.assertIsNone(weak_partial_root()) self.assertIsNone(weak_v1()) messages = str(mock_log.call_args_list) self.assertIn("(root).v2'", messages) self.assertIn("(root).optimizer's state 'm' for (root).v1", messages) self.assertNotIn("(root).v1'", messages) self.assertIn("expect_partial()", messages) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def testWithDefun(self): with self.test_session(): num_training_steps = 2 checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") for training_continuation in range(3): with test_utils.device(should_use_gpu=True): model = MyModel() # Don't actually train so we can test variable values optimizer = adam.Adam(0.0) root = tf.train.Checkpoint(optimizer=optimizer, model=model) checkpoint_path = tf.train.latest_checkpoint( checkpoint_directory ) status = root.restore(save_path=checkpoint_path) def train_fn(): @tf.function def _call_model(x): return model(x) with tf.GradientTape() as tape: loss = _call_model(tf.constant([[3.0]])) gradients = tape.gradient(loss, model.variables) return optimizer.apply_gradients( zip(gradients, model.variables) ) if not tf.executing_eagerly(): train_fn = functools.partial(self.evaluate, train_fn()) status.initialize_or_restore() for _ in range(num_training_steps): train_fn() if training_continuation > 0: status.assert_consumed() self.assertAllClose( [[42.0]], self.evaluate(model.variables[0]) ) else: self.evaluate(model.variables[0].assign([[42.0]])) root.save(file_prefix=checkpoint_prefix) self.assertEqual( (training_continuation + 1) * num_training_steps, self.evaluate(optimizer.iterations), ) self.assertEqual( training_continuation + 1, self.evaluate(root.save_counter), ) @test_combinations.generate(test_combinations.combine(mode=["eager"])) def testAnonymousVarsInInit(self): class Model(training.Model): def __init__(self): super().__init__() self.w = tf.Variable(0.0) self.b = tf.Variable(0.0) self.vars = [self.w, self.b] def call(self, x): return x * self.w + self.b model = Model() optimizer = adam.Adam(learning_rate=0.05) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) for _ in range(2): checkpoint.save(checkpoint_prefix) with tf.GradientTape() as tape: loss = (tf.constant(1.0) - model(tf.constant(1.0))) ** 2 grad = tape.gradient(loss, model.vars) optimizer.apply_gradients( [(g, v) for g, v in zip(grad, model.vars)] ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def testDeferredSlotRestoration(self): with self.test_session(): checkpoint_directory = self.get_temp_dir() root = tf.train.Checkpoint() root.var = trackable_utils.add_variable( root, name="var", initializer=0.0 ) optimizer = adam.Adam(0.1) variables = [root.var] gradients = [1.0] train_op = optimizer.apply_gradients(zip(gradients, variables)) # Note that `optimizer` has not been added as a dependency of # `root`. Create a one-off grouping so that slot variables for # `root.var` get initialized too. self.evaluate( trackable_utils.gather_initializers( tf.train.Checkpoint(root=root, optimizer=optimizer) ) ) self.evaluate(train_op) self.evaluate(tf.compat.v1.assign(root.var, 12.0)) no_slots_path = root.save( os.path.join(checkpoint_directory, "no_slots") ) root.optimizer = optimizer self.evaluate(tf.compat.v1.assign(root.var, 13.0)) self.evaluate( tf.compat.v1.assign( optimizer.get_slot(slot_name="m", var=root.var), 14.0 ) ) slots_path = root.save( os.path.join(checkpoint_directory, "with_slots") ) new_root = tf.train.Checkpoint() # Load the slot-containing checkpoint (deferred), then immediately # overwrite the non-slot variable (also deferred). slot_status = new_root.restore(slots_path) no_slot_status = new_root.restore(no_slots_path) with self.assertRaises(AssertionError): no_slot_status.assert_consumed() new_root.var = trackable_utils.add_variable( new_root, name="var", shape=[] ) no_slot_status.assert_consumed() no_slot_status.run_restore_ops() self.assertEqual(12.0, self.evaluate(new_root.var)) new_root.optimizer = adam.Adam(0.1) slot_status.assert_existing_objects_matched() if not tf.executing_eagerly(): with self.assertRaisesRegex( AssertionError, "Unresolved object" ): slot_status.assert_consumed() self.assertEqual(12.0, self.evaluate(new_root.var)) if tf.executing_eagerly(): # Slot variables are only created with restoring initializers # when executing eagerly. self.assertEqual( 14.0, self.evaluate( new_root.optimizer.get_slot( slot_name="m", var=new_root.var ) ), ) else: # Slot variables are not created eagerly when graph building. with self.assertRaises(KeyError): new_root.optimizer.get_slot(slot_name="m", var=new_root.var) variables = [new_root.var] gradients = [1.0] train_op = new_root.optimizer.apply_gradients( zip(gradients, variables) ) # The slot variable now exists; restore() didn't create it, but we # should now have a restore op for it. slot_status.run_restore_ops() if not tf.executing_eagerly(): # The train op hasn't run when graph building, so the slot # variable has its restored value. It has run in eager, so the # value will be different. self.assertEqual( 14.0, self.evaluate( new_root.optimizer.get_slot( slot_name="m", var=new_root.var ) ), ) self.evaluate(train_op) slot_status.assert_consumed() def testManySavesGraph(self): """Saves after the first should not modify the graph.""" with context.graph_mode(): graph = tf.Graph() with graph.as_default(), self.session(graph): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = tf.train.Checkpoint() obj.var = tf.Variable(0.0, name="v") obj.opt = adam.Adam(0.1) variables = [obj.var] gradients = [1.0] obj.opt.apply_gradients(zip(gradients, variables)) self.evaluate(trackable_utils.gather_initializers(obj)) obj.save(checkpoint_prefix) graph.finalize() obj.save(checkpoint_prefix) def testManyRestoresGraph(self): """Restores after the first should not modify the graph.""" with context.graph_mode(): graph = tf.Graph() with graph.as_default(), self.session(graph): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = tf.train.Checkpoint() obj.var = tf.Variable(0.0, name="v") obj.opt = adam.Adam(0.1) variables = [obj.var] gradients = [1.0] obj.opt.apply_gradients(zip(gradients, variables)) self.evaluate(trackable_utils.gather_initializers(obj)) save_path = obj.save(checkpoint_prefix) obj.restore(save_path) graph.finalize() obj.restore(save_path) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_sequential(self): with self.test_session(): model = sequential.Sequential() checkpoint = tf.train.Checkpoint(model=model) model.add(core.Dense(4)) second_dense = core.Dense(5) model.add(second_dense) model(tf.constant([[1.0]])) checkpoint.restore(None).initialize_or_restore() self.evaluate( second_dense.bias.assign(tf.constant([1.0, 2.0, 3.0, 4.0, 5.0])) ) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = checkpoint.save(checkpoint_prefix) self.evaluate( second_dense.bias.assign(tf.constant([5.0, 6.0, 7.0, 8.0, 9.0])) ) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.assertAllEqual( [1.0, 2.0, 3.0, 4.0, 5.0], self.evaluate(second_dense.bias) ) deferred_sequential = sequential.Sequential() deferred_sequential_checkpoint = tf.train.Checkpoint( model=deferred_sequential ) status = deferred_sequential_checkpoint.restore(save_path) deferred_sequential.add(core.Dense(4)) deferred_second_dense = core.Dense(5) deferred_sequential.add(deferred_second_dense) deferred_sequential(tf.constant([[1.0]])) status.run_restore_ops() self.assertAllEqual( [1.0, 2.0, 3.0, 4.0, 5.0], self.evaluate(deferred_second_dense.bias), ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def test_initialize_if_not_restoring(self): with self.test_session(): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") optimizer_only_prefix = os.path.join(checkpoint_directory, "opt") with test_utils.device(should_use_gpu=True): model = MyModel() optimizer = adam.Adam(0.001) root = tf.train.Checkpoint( model=model ) # Do not save the optimizer with the checkpoint. optimizer_checkpoint = tf.train.Checkpoint(optimizer=optimizer) checkpoint_path = tf.train.latest_checkpoint( checkpoint_directory ) status = root.restore(save_path=checkpoint_path) input_value = tf.constant([[3.0]]) def train_fn(): with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) return optimizer.apply_gradients(zip(gradients, variables)) if not tf.executing_eagerly(): train_fn = functools.partial(self.evaluate, train_fn()) status.initialize_or_restore() # TODO(tanzheny): Add hyper variables to .variables(), and set # them with set_weights etc. variables_not_in_the_variables_property = [ obj for obj in optimizer._hyper.values() if isinstance(obj, tf.Variable) ] self.evaluate( [ v.initializer for v in optimizer.variables() + variables_not_in_the_variables_property ] ) train_fn() model_save_path = root.save(file_prefix=checkpoint_prefix) self.evaluate(optimizer.beta_1.assign(42.0)) optimizer_save_path = optimizer_checkpoint.save( optimizer_only_prefix ) del train_fn # Restore into a graph with the optimizer with test_utils.device(should_use_gpu=True): model = MyModel() optimizer = adam.Adam(0.001) root = tf.train.Checkpoint(optimizer=optimizer, model=model) status = root.restore(save_path=model_save_path) input_value = tf.constant([[3.0]]) def train_fn1(): with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) return optimizer.apply_gradients(zip(gradients, variables)) if not tf.executing_eagerly(): train_fn1 = functools.partial(self.evaluate, train_fn1()) status.initialize_or_restore() train_fn1() with self.assertRaises(AssertionError): status.assert_existing_objects_matched() with self.assertRaises(AssertionError): status.assert_consumed() del train_fn1 # Make sure initialization doesn't clobber later restores with test_utils.device(should_use_gpu=True): model = MyModel() optimizer = adam.Adam(0.001, beta_1=1.0) root = tf.train.Checkpoint(optimizer=optimizer, model=model) opt_root = tf.train.Checkpoint(optimizer=optimizer) status = root.restore(save_path=model_save_path) init_only_optimizer_status = opt_root.restore(save_path=None) optimizer_status = opt_root.restore( save_path=optimizer_save_path ) input_value = tf.constant([[3.0]]) def train_fn2(): with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) return optimizer.apply_gradients(zip(gradients, variables)) if not tf.executing_eagerly(): train_fn2 = functools.partial(self.evaluate, train_fn2()) optimizer_status.run_restore_ops() status.initialize_or_restore() init_only_optimizer_status.initialize_or_restore() train_fn2() self.assertEqual(42.0, self.evaluate(optimizer.beta_1)) class _ManualScope(tf.Module): def __call__(self): with tf.compat.v1.variable_scope("ManualScope") as vs: self.variable_scope = vs with trackable_utils.capture_dependencies(template=self): return self._build() def _build(self): return tf.compat.v1.get_variable(name="in_manual_scope", shape=[]) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class TemplateTests(test_combinations.TestCase): def test_trackable_save_restore(self): with self.test_session(): def _templated(): v = tf.compat.v1.get_variable( "v", shape=[1], initializer=tf.compat.v1.zeros_initializer(), use_resource=True, ) v2 = tf.compat.v1.get_variable( "v2", shape=[1], initializer=tf.compat.v1.zeros_initializer(), use_resource=True, ) manual = _ManualScope() return v, v + 1.0, v2, manual, manual() save_template = tf.compat.v1.make_template("s1", _templated) v1_save, _, v2_save, manual_scope, manual_scope_v = save_template() self.assertEqual( set( [ id(v1_save), id(v2_save), id(manual_scope), id(manual_scope_v), id(save_template), ] ), set(map(id, trackable_utils.list_objects(save_template))), ) self.assertDictEqual( {"in_manual_scope": manual_scope_v}, manual_scope._trackable_children(), ) optimizer = adam.Adam(0.0) save_root = tf.train.Checkpoint( my_template=save_template, optimizer=optimizer ) optimizer.minimize(v1_save.read_value, var_list=[v1_save]) self.evaluate([v.initializer for v in save_template.variables]) optimizer_variables = optimizer.variables() + list( optimizer._hyper.values() ) self.evaluate([v.initializer for v in optimizer_variables]) self.evaluate(v1_save.assign([12.0])) self.evaluate(v2_save.assign([14.0])) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = save_root.save(checkpoint_prefix) load_template = tf.compat.v1.make_template("s2", _templated) load_optimizer = adam.Adam(0.0) load_root = tf.train.Checkpoint( my_template=load_template, optimizer=load_optimizer ) status = load_root.restore(save_path) var, var_plus_one, var2, _, _ = load_template() load_optimizer.minimize(var.read_value, var_list=[var]) children = load_template._trackable_children() self.assertEqual({"v", "v2", "ManualScope"}, children.keys()) status.assert_consumed().run_restore_ops() self.assertAllEqual([12.0], self.evaluate(var)) self.assertAllEqual([13.0], self.evaluate(var_plus_one)) self.assertAllEqual([14.0], self.evaluate(var2)) class CheckpointCompatibilityTests(test_combinations.TestCase): def _initialized_model(self): input_value = tf.constant([[3.0]]) model = MyModel() optimizer = adam.Adam(0.001) root_trackable = tf.train.Checkpoint(optimizer=optimizer, model=model) with tf.GradientTape() as tape: loss = model(input_value) variables = model.trainable_variables gradients = tape.gradient(loss, variables) train_op = optimizer.apply_gradients(zip(gradients, variables)) self.evaluate(trackable_utils.gather_initializers(root_trackable)) self.evaluate(train_op) # A regular variable, a slot variable, and a non-slot Optimizer variable # with known values to check when loading. self.evaluate(model._named_dense.bias.assign([1.0])) self.evaluate( optimizer.get_slot( var=model._named_dense.bias, slot_name="m" ).assign([2.0]) ) self.evaluate(optimizer.beta_1.assign(3.0)) return root_trackable def _set_sentinels(self, root_trackable): self.evaluate(root_trackable.model._named_dense.bias.assign([101.0])) self.evaluate( root_trackable.optimizer.get_slot( var=root_trackable.model._named_dense.bias, slot_name="m" ).assign([102.0]) ) self.evaluate(root_trackable.optimizer.beta_1.assign(103.0)) def _check_sentinels(self, root_trackable): self.assertAllEqual( [1.0], self.evaluate(root_trackable.model._named_dense.bias) ) self.assertAllEqual( [2.0], self.evaluate( root_trackable.optimizer.get_slot( var=root_trackable.model._named_dense.bias, slot_name="m" ) ), ) self.assertAllEqual(3.0, self.evaluate(root_trackable.optimizer.beta_1)) def _write_name_based_checkpoint(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") with context.graph_mode(): save_graph = tf.Graph() with save_graph.as_default(), self.session( graph=save_graph ) as session: root = self._initialized_model() name_saver = tf.compat.v1.train.Saver() return name_saver.save( sess=session, save_path=checkpoint_prefix, global_step=root.optimizer.iterations, ) @test_combinations.generate( test_combinations.combine(mode=["graph", "eager"]) ) def testLoadFromNameBasedSaver(self): """Save a name-based checkpoint, load it using the object-based API.""" with test_utils.device(should_use_gpu=True): with self.test_session(): save_path = self._write_name_based_checkpoint() root = self._initialized_model() self._set_sentinels(root) with self.assertRaises(AssertionError): self._check_sentinels(root) object_saver = tf.train.Checkpoint(root=root) self._set_sentinels(root) status = object_saver.read(save_path) if tf.executing_eagerly(): self._check_sentinels(root) if tf.executing_eagerly(): status.assert_consumed() status.assert_existing_objects_matched() status.assert_nontrivial_match() else: # When graph building, we haven't read any keys, so we don't # know whether the restore will be complete. with self.assertRaisesRegex(AssertionError, "not restored"): status.assert_consumed() with self.assertRaisesRegex(AssertionError, "not restored"): status.assert_existing_objects_matched() with self.assertRaisesRegex(AssertionError, "not restored"): status.assert_nontrivial_match() status.run_restore_ops() self._check_sentinels(root) self._set_sentinels(root) status = object_saver.read(save_path) status.initialize_or_restore() status.assert_nontrivial_match() self._check_sentinels(root) # Check that there is no error when keys are missing from the # name-based checkpoint. root.not_in_name_checkpoint = tf.Variable([1.0]) status = object_saver.read(save_path) with self.assertRaises(AssertionError): status.assert_existing_objects_matched() def testSaveGraphLoadEager(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") with context.graph_mode(): save_graph = tf.Graph() with save_graph.as_default(), self.session(graph=save_graph): root = self._initialized_model() save_path = root.save(file_prefix=checkpoint_prefix) with tf.__internal__.eager_context.eager_mode(): root = self._initialized_model() self._set_sentinels(root) root.restore(save_path).assert_consumed() self._check_sentinels(root) def testSaveEagerLoadGraph(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") with tf.__internal__.eager_context.eager_mode(): root = self._initialized_model() save_path = root.save(file_prefix=checkpoint_prefix) with context.graph_mode(): save_graph = tf.Graph() with save_graph.as_default(), self.session(graph=save_graph): root = self._initialized_model() self._set_sentinels(root) root.restore(save_path).assert_consumed().run_restore_ops() self._check_sentinels(root) def testIgnoreSaveCounter(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") with self.cached_session() as session: # Create and save a model using Saver() before using a Checkpoint. # This generates a snapshot without the Checkpoint's `save_counter`. model = sequential.Sequential() model.add(reshaping.Flatten(input_shape=(1,))) model.add(core.Dense(1)) name_saver = tf.compat.v1.train.Saver(model.trainable_variables) save_path = name_saver.save( sess=session, save_path=checkpoint_prefix, global_step=1 ) # Checkpoint.restore must successfully load that checkpoint. ckpt = tf.train.Checkpoint(model=model) status = ckpt.restore(save_path) status.assert_existing_objects_matched() # It should, however, refuse to load a checkpoint where an unrelated # `save_counter` variable is missing. model.layers[1].var = tf.Variable(0.0, name="save_counter") status = ckpt.restore(save_path) with self.assertRaises(AssertionError): status.assert_existing_objects_matched() if __name__ == "__main__": tf.compat.v1.enable_eager_execution() tf.test.main()
tf-keras/tf_keras/tests/tracking_util_test.py/0
{ "file_path": "tf-keras/tf_keras/tests/tracking_util_test.py", "repo_id": "tf-keras", "token_count": 23447 }
195
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TF-Keras composite tensor support.""" import numpy as np import scipy.sparse import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.engine import input_layer from tf_keras.layers import Dense from tf_keras.layers import Embedding from tf_keras.layers import Layer from tf_keras.layers import core from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils # Define test-only Layer classes to validate passing Sparse and Ragged tensors # between layers. class ToDense(Layer): """Create a dense (standard) tensor from the given input tensor.""" def __init__(self, default_value, **kwargs): super().__init__(**kwargs) self._default_value = default_value def call(self, inputs): if isinstance(inputs, dict): # Dicts are no longer flattened. # Always a single element in these tests. inputs = tf.nest.flatten(inputs)[0] if isinstance(inputs, tf.RaggedTensor): output = inputs.to_tensor(default_value=self._default_value) elif isinstance(inputs, tf.SparseTensor): output = tf.sparse.to_dense( inputs, default_value=self._default_value ) elif isinstance(inputs, tf.Tensor): output = inputs else: raise TypeError(f"Unexpected tensor type {type(inputs).__name__}") # Return a float so that we can compile models with this as the final # layer. return tf.cast(output, tf.float32) class ToRagged(Layer): """Create a ragged tensor based on a given dense tensor.""" def __init__(self, padding, ragged_rank=1, **kwargs): super().__init__(**kwargs) self._padding = padding self._ragged_rank = ragged_rank def call(self, inputs): return tf.RaggedTensor.from_tensor( inputs, padding=self._padding, ragged_rank=self._ragged_rank ) class ToSparse(Layer): """Create a sparse tensor based on a given dense tensor.""" def call(self, inputs): indices = tf.where(tf.not_equal(inputs, 0)) values = tf.gather_nd(inputs, indices) shape = tf.shape(inputs, out_type=tf.int64) return tf.SparseTensor(indices, values, dense_shape=shape) class _SubclassModel(keras.Model): """A TF-Keras subclass model.""" def __init__(self, layers, i_layer=None): super().__init__() # Note that clone and build doesn't support lists of layers in # subclassed models. Adding each layer directly here. for i, layer in enumerate(layers): setattr(self, self._layer_name_for_i(i), layer) self.num_layers = len(layers) if i_layer is not None: self._set_inputs(i_layer) def _layer_name_for_i(self, i): return f"layer{i}" def call(self, inputs, **kwargs): x = inputs for i in range(self.num_layers): layer = getattr(self, self._layer_name_for_i(i)) x = layer(x) return x def get_model_from_layers_with_input( layers, input_shape=None, input_dtype=None, model_input=None ): """Builds a model from a sequence of layers.""" if model_input is not None and input_shape is not None: raise ValueError("Cannot specify a model_input and an input shape.") model_type = test_utils.get_model_type() if model_type == "subclass": return _SubclassModel(layers, model_input) if model_type == "sequential": model = keras.models.Sequential() if model_input is not None: model.add(model_input) elif input_shape is not None: model.add(keras.Input(shape=input_shape, dtype=input_dtype)) for layer in layers: model.add(layer) return model if model_type == "functional": if model_input is not None: inputs = model_input else: if not input_shape: raise ValueError( "Cannot create a functional model from layers with no " "input shape." ) inputs = keras.Input(shape=input_shape, dtype=input_dtype) outputs = inputs for layer in layers: outputs = layer(outputs) return keras.Model(inputs, outputs) raise ValueError(f"Unknown model type {model_type}") def get_test_mode_kwargs(): run_eagerly = test_utils.should_run_eagerly() return { "run_eagerly": run_eagerly, } @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes class CompositeTensorInternalTest(test_combinations.TestCase): def test_internal_ragged_tensors(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0), ToDense(default_value=-1)] model = test_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) expected_output = np.array([[1, -1], [2, 3]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) def test_internal_sparse_tensors(self): # Create a model that accepts an input, converts it to Sparse, and # converts the sparse tensor back to a dense tensor. layers = [ToSparse(), ToDense(default_value=-1)] model = test_utils.get_model_from_layers(layers, input_shape=(None,)) # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data) self.assertAllEqual(expected_output, output) def test_training_internal_ragged_tensors(self): # Create a model that implements y=Mx. This is easy to learn and will # demonstrate appropriate gradient passing. (We have to use # RaggedTensors for this test, as ToSparse() doesn't support gradient # propagation through the layer.) TODO(b/124796939): Investigate this. layers = [core.Dense(2), ToRagged(padding=0), ToDense(default_value=-1)] model = test_utils.get_model_from_layers(layers, input_shape=(1,)) input_data = np.random.rand(1024, 1) expected_data = np.concatenate( (input_data * 3, input_data * 0.5), axis=-1 ) model.compile(loss="mse", optimizer="adam", **get_test_mode_kwargs()) history = model.fit(input_data, expected_data, epochs=10, verbose=0) # If the model trained, the loss stored at history[0] should be # different than the one stored at history[-1]. self.assertNotEqual( history.history["loss"][-1], history.history["loss"][0] ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes class CompositeTensorOutputTest(test_combinations.TestCase): def test_ragged_tensor_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0)] model = test_utils.get_model_from_layers(layers, input_shape=(None,)) model._run_eagerly = test_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data) expected_values = [[1], [2, 3]] self.assertAllEqual(expected_values, output) def test_ragged_tensor_rebatched_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToRagged(padding=0)] model = test_utils.get_model_from_layers(layers, input_shape=(None,)) model._run_eagerly = test_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]]) output = model.predict(input_data, batch_size=2) expected_values = [[1], [2, 3], [4], [5, 6]] self.assertAllEqual(expected_values, output) def test_sparse_tensor_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToSparse()] model = test_utils.get_model_from_layers(layers, input_shape=(None,)) model._run_eagerly = test_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0]]) output = model.predict(input_data) expected_indices = np.array([[0, 0], [1, 0], [1, 1]]) expected_values = np.array([1, 2, 3]) expected_dense_shape = np.array([2, 3]) self.assertAllEqual(output.indices, expected_indices) self.assertAllEqual(output.values, expected_values) self.assertAllEqual(output.dense_shape, expected_dense_shape) def test_sparse_tensor_rebatched_outputs(self): # Create a model that accepts an input, converts it to Ragged, and # converts the ragged tensor back to a dense tensor. layers = [ToSparse()] model = test_utils.get_model_from_layers(layers, input_shape=(None,)) model._run_eagerly = test_utils.should_run_eagerly() # Define some input data with additional padding. input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]]) output = model.predict(input_data, batch_size=2) expected_indices = np.array( [[0, 0], [1, 0], [1, 1], [2, 0], [3, 0], [3, 1]] ) expected_values = np.array([1, 2, 3, 4, 5, 6]) expected_dense_shape = np.array([4, 3]) self.assertAllEqual(output.indices, expected_indices) self.assertAllEqual(output.values, expected_values) self.assertAllEqual(output.dense_shape, expected_dense_shape) def get_input_name(use_dict): # Define the input name. if not use_dict: return None # This is the same as not setting 'name'. elif test_utils.get_model_type() == "subclass": return "input_1" # Subclass models don"t support input names. else: return "test_input_name" def get_kwargs(use_dataset, action="predict"): if use_dataset or not tf.executing_eagerly(): if action == "fit": return {"steps_per_epoch": 1} return {"steps": 1} else: return {"batch_size": 2} def prepare_inputs(data, use_dict, use_dataset, action, input_name): input_data, expected_output = data batch_size = input_data.shape[0] # Prepare the input data. if use_dict: input_data = {input_name: input_data} if use_dataset: if action == "predict": input_data = tf.data.Dataset.from_tensor_slices(input_data).batch( batch_size ) else: input_data = tf.data.Dataset.from_tensor_slices( (input_data, expected_output) ).batch(batch_size) expected_output = None return (input_data, expected_output) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @parameterized.named_parameters( *test_utils.generate_combinations_with_testcase_name( use_dict=[True, False], use_dataset=[True, False], action=["predict", "evaluate", "fit"], ) ) class SparseTensorInputTest(test_combinations.TestCase): def test_sparse_tensors(self, use_dict, use_dataset, action): data = [ ( tf.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3] ), np.array([[[1, -1, -1]], [[2, 3, -1]]]), ), ( tf.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4], ), np.array( [[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]] ), ), ] # Prepare the model to test. input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=(1, None), sparse=True, name=input_name, dtype=tf.int32 ) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs(), ) kwargs = get_kwargs(use_dataset, action) # Prepare the input data for data_element in data: input_data, expected_output = prepare_inputs( data_element, use_dict, use_dataset, action, input_name ) # Perform the action. if action == "predict": result = model.predict(input_data, **kwargs) self.assertAllEqual(expected_output, result) if action == "evaluate": result = model.evaluate(input_data, expected_output, **kwargs) self.assertAllEqual(1.0, result[-1]) if action == "fit": # TODO(momernick): What's the best way of validating that fit # happened? _ = model.fit( input_data, expected_output, shuffle=False, **kwargs ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes class ScipySparseTensorInputTest(test_combinations.TestCase, tf.test.TestCase): def test_sparse_scipy_predict_inputs_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse # tensor back to a dense tensor. Scipy sparse matrices are limited to # 2D, so use a one-dimensional shape; note also that scipy's default # dtype is int64. model_input = input_layer.Input(shape=(3,), sparse=True, dtype=tf.int64) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) input_data = scipy.sparse.coo_matrix( ([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3] ) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3] ) expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_scipy_eval_inputs(self): # Create a model that accepts a sparse input and converts the sparse # tensor back to a dense tensor. Scipy sparse matrices are limited to # 2D, so use a one-dimensional shape; note also that scipy's default # dtype is int64. model_input = input_layer.Input(shape=(3,), sparse=True, dtype=tf.int64) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) input_data = scipy.sparse.coo_matrix( ([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3] ) expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3] ) expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) def test_sparse_scipy_predict_input_dicts_via_input_layer_args(self): # Create a model that accepts a sparse input and converts the sparse # tensor back to a dense tensor. Scipy sparse matrices are limited to # 2D, so use a one-dimensional shape; note also that scipy's default # dtype is int64. if test_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input( shape=(3,), sparse=True, name=input_name, dtype=tf.int64 ) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) input_data = { input_name: scipy.sparse.coo_matrix( ([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3] ) } expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.predict(input_data, steps=1) self.assertAllEqual(expected_output, output) input_data_2 = { input_name: scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3] ) } expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.predict(input_data_2, steps=1) self.assertAllEqual(expected_output_2, output_2) def test_sparse_scipy_eval_input_dicts(self): # Create a model that accepts a sparse input and converts the sparse # tensor back to a dense tensor. Scipy sparse matrices are limited to # 2D, so use a one-dimensional shape; note also that scipy's default # dtype is int64. if test_utils.get_model_type() == "subclass": input_name = "input_1" # Subclass models don"t support input names. else: input_name = "test_input_name" model_input = input_layer.Input( shape=(3,), sparse=True, name=input_name, dtype=tf.int64 ) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"]) input_data = { input_name: scipy.sparse.coo_matrix( ([1, 2, 3], ([0, 1, 1], [0, 0, 1])), shape=[2, 3] ) } expected_output = np.array([[1, -1, -1], [2, 3, -1]]) output = model.evaluate(input_data, expected_output, steps=1) self.assertAllEqual(1.0, output[-1]) input_data_2 = { input_name: scipy.sparse.coo_matrix( ([5, 6, 7, 8], ([0, 1, 1, 2], [0, 0, 1, 1])), shape=[3, 3] ) } expected_output_2 = np.array([[5, -1, -1], [6, 7, -1], [-1, 8, -1]]) output_2 = model.evaluate(input_data_2, expected_output_2, steps=1) self.assertAllEqual(1.0, output_2[-1]) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @parameterized.named_parameters( *test_utils.generate_combinations_with_testcase_name( use_dict=[True, False], use_dataset=[True, False], action=["predict", "evaluate", "fit"], ) ) class RaggedTensorInputTest(test_combinations.TestCase, tf.test.TestCase): def test_ragged_input(self, use_dict, use_dataset, action): data = [ ( tf.ragged.constant([[[1]], [[2, 3]]]), np.array([[[1, -1]], [[2, 3]]]), ) ] # Prepare the model to test. input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=(None, None), ragged=True, name=input_name, dtype=tf.int32, batch_size=2, ) self.assertIsInstance(model_input._type_spec, tf.RaggedTensorSpec) self.assertEqual(model_input.shape.as_list(), [2, None, None]) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs(), ) # Prepare the input data for data_element in data: input_data, expected_output = prepare_inputs( data_element, use_dict, use_dataset, action, input_name ) # Perform the action. if action == "predict": result = model.predict(input_data) self.assertAllEqual(expected_output, result) if action == "evaluate": result = model.evaluate(input_data, expected_output) self.assertAllEqual(1.0, result[-1]) if action == "fit": # TODO(momernick): What's the best way of validating that fit # happened? _ = model.fit(input_data, expected_output, shuffle=False) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @parameterized.named_parameters( *test_utils.generate_combinations_with_testcase_name( use_dict=[True, False], use_dataset=[True, False] ) ) class RaggedTensorInputValidationTest( test_combinations.TestCase, tf.test.TestCase ): def test_ragged_tensor_input_with_one_none_dimension( self, use_dict, use_dataset ): # Define some input data. data = [ ( tf.ragged.constant([[[1, 0]], [[2, 3]]], ragged_rank=1), np.array([[[1, 0]], [[2, 3]]]), ) ] # Prepare the model to test. input_shape = (None, 2) # RaggedTensorInputTest uses (None, None). input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=input_shape, ragged=True, name=input_name, dtype=tf.int32 ) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs(), ) for data_element in data: input_data, expected_output = prepare_inputs( data_element, use_dict, use_dataset, action="predict", input_name=input_name, ) result = model.predict(input_data) self.assertAllEqual(expected_output, result) def test_ragged_tensor_input_with_no_none_dimension( self, use_dict, use_dataset ): # Define some input data. data = [ ( tf.ragged.constant([[[1, 0]], [[2, 3]]], ragged_rank=0), np.array([[[1, 0]], [[2, 3]]]), ) ] # Prepare the model to test. input_shape = (1, 2) # RaggedTensorInputTest uses (None, None). input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=input_shape, ragged=True, name=input_name, dtype=tf.int32 ) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs(), ) kwargs = get_kwargs(use_dataset) for data_element in data: input_data, expected_output = prepare_inputs( data_element, use_dict, use_dataset, action="predict", input_name=input_name, ) result = model.predict(input_data, **kwargs) self.assertAllEqual(expected_output, result) @test_combinations.run_with_all_model_types() @test_combinations.run_all_keras_modes(always_skip_v1=True) class CompositeTensorModelPredictTest(test_combinations.TestCase): def _normalize_shape(self, shape): if not isinstance(shape, tuple): shape = tuple(shape.as_list()) return shape def test_sparse_tensor_model_predict(self): # Create a model that accepts a sparse input and runs a "Dense" layer on # it. model_input = input_layer.Input( shape=(3,), sparse=True, dtype=tf.float32 ) self.assertEqual([None, 3], model_input.shape.as_list()) layers = [Dense(2)] model = get_model_from_layers_with_input( layers, model_input=model_input ) sparse_input = tf.SparseTensor( # A two-row matrix indices=[(0, 0), (0, 1), (0, 2), (5, 0), (5, 1), (5, 2)], values=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dense_shape=(6, 3), ) shape = model(sparse_input).shape self.assertEqual((6, 2), self._normalize_shape(shape)) shape = model.predict(sparse_input, steps=1).shape self.assertEqual((6, 2), self._normalize_shape(shape)) def test_ragged_tensor_model_predict(self): # Create a model that accepts a sparse input and runs a "Dense" layer on # it. model_input = input_layer.Input(shape=(None,), ragged=True) self.assertEqual([None, None], model_input.shape.as_list()) layers = [Embedding(input_dim=7, output_dim=5)] model = get_model_from_layers_with_input( layers, model_input=model_input ) ragged_input = tf.ragged.constant( [ [1, 2, 3, 4, 5], [2, 4], ] ) shape = model(ragged_input).shape self.assertEqual((2, None, 5), self._normalize_shape(shape)) shape = model.predict(ragged_input, steps=1).shape self.assertEqual((2, None, 5), self._normalize_shape(shape)) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/utils/composite_tensor_support_test.py/0
{ "file_path": "tf-keras/tf_keras/utils/composite_tensor_support_test.py", "repo_id": "tf-keras", "token_count": 12681 }
196
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities related to image handling.""" import io import pathlib import warnings import numpy as np import tensorflow.compat.v2 as tf from tf_keras import backend # isort: off from tensorflow.python.util.tf_export import keras_export try: from PIL import Image as pil_image try: pil_image_resampling = pil_image.Resampling except AttributeError: pil_image_resampling = pil_image except ImportError: pil_image = None pil_image_resampling = None if pil_image_resampling is not None: _PIL_INTERPOLATION_METHODS = { "nearest": pil_image_resampling.NEAREST, "bilinear": pil_image_resampling.BILINEAR, "bicubic": pil_image_resampling.BICUBIC, "hamming": pil_image_resampling.HAMMING, "box": pil_image_resampling.BOX, "lanczos": pil_image_resampling.LANCZOS, } ResizeMethod = tf.image.ResizeMethod _TF_INTERPOLATION_METHODS = { "bilinear": ResizeMethod.BILINEAR, "nearest": ResizeMethod.NEAREST_NEIGHBOR, "bicubic": ResizeMethod.BICUBIC, "area": ResizeMethod.AREA, "lanczos3": ResizeMethod.LANCZOS3, "lanczos5": ResizeMethod.LANCZOS5, "gaussian": ResizeMethod.GAUSSIAN, "mitchellcubic": ResizeMethod.MITCHELLCUBIC, } @keras_export("keras.preprocessing.image.smart_resize", v1=[]) def smart_resize(x, size, interpolation="bilinear"): """Resize images to a target size without aspect ratio distortion. Warning: `tf.keras.preprocessing.image.smart_resize` is not recommended for new code. Prefer `tf.keras.layers.Resizing`, which provides the same functionality as a preprocessing layer and adds `tf.RaggedTensor` support. See the [preprocessing layer guide]( https://www.tensorflow.org/guide/keras/preprocessing_layers) for an overview of preprocessing layers. TensorFlow image datasets typically yield images that have each a different size. However, these images need to be batched before they can be processed by TF-Keras layers. To be batched, images need to share the same height and width. You could simply do: ```python size = (200, 200) ds = ds.map(lambda img: tf.image.resize(img, size)) ``` However, if you do this, you distort the aspect ratio of your images, since in general they do not all have the same aspect ratio as `size`. This is fine in many cases, but not always (e.g. for GANs this can be a problem). Note that passing the argument `preserve_aspect_ratio=True` to `resize` will preserve the aspect ratio, but at the cost of no longer respecting the provided target size. Because `tf.image.resize` doesn't crop images, your output images will still have different sizes. This calls for: ```python size = (200, 200) ds = ds.map(lambda img: smart_resize(img, size)) ``` Your output images will actually be `(200, 200)`, and will not be distorted. Instead, the parts of the image that do not fit within the target size get cropped out. The resizing process is: 1. Take the largest centered crop of the image that has the same aspect ratio as the target size. For instance, if `size=(200, 200)` and the input image has size `(340, 500)`, we take a crop of `(340, 340)` centered along the width. 2. Resize the cropped image to the target size. In the example above, we resize the `(340, 340)` crop to `(200, 200)`. Args: x: Input image or batch of images (as a tensor or NumPy array). Must be in format `(height, width, channels)` or `(batch_size, height, width, channels)`. size: Tuple of `(height, width)` integer. Target size. interpolation: String, interpolation to use for resizing. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`. Defaults to `'bilinear'`. Returns: Array with shape `(size[0], size[1], channels)`. If the input image was a NumPy array, the output is a NumPy array, and if it was a TF tensor, the output is a TF tensor. """ if len(size) != 2: raise ValueError( f"Expected `size` to be a tuple of 2 integers, but got: {size}." ) img = tf.convert_to_tensor(x) if img.shape.rank is not None: if img.shape.rank < 3 or img.shape.rank > 4: raise ValueError( "Expected an image array with shape `(height, width, " "channels)`, or `(batch_size, height, width, channels)`, but " f"got input with incorrect rank, of shape {img.shape}." ) shape = tf.shape(img) height, width = shape[-3], shape[-2] target_height, target_width = size if img.shape.rank is not None: static_num_channels = img.shape[-1] else: static_num_channels = None crop_height = tf.cast( tf.cast(width * target_height, "float32") / target_width, "int32" ) crop_width = tf.cast( tf.cast(height * target_width, "float32") / target_height, "int32" ) # Set back to input height / width if crop_height / crop_width is not # smaller. crop_height = tf.minimum(height, crop_height) crop_width = tf.minimum(width, crop_width) crop_box_hstart = tf.cast( tf.cast(height - crop_height, "float32") / 2, "int32" ) crop_box_wstart = tf.cast( tf.cast(width - crop_width, "float32") / 2, "int32" ) if img.shape.rank == 4: crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([-1, crop_height, crop_width, -1]) else: crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([crop_height, crop_width, -1]) img = tf.slice(img, crop_box_start, crop_box_size) img = tf.image.resize(images=img, size=size, method=interpolation) # Apparent bug in resize_images_v2 may cause shape to be lost if img.shape.rank is not None: if img.shape.rank == 4: img.set_shape((None, None, None, static_num_channels)) if img.shape.rank == 3: img.set_shape((None, None, static_num_channels)) if isinstance(x, np.ndarray): return img.numpy() return img def get_interpolation(interpolation): interpolation = interpolation.lower() if interpolation not in _TF_INTERPOLATION_METHODS: raise NotImplementedError( "Value not recognized for `interpolation`: {}. Supported values " "are: {}".format(interpolation, _TF_INTERPOLATION_METHODS.keys()) ) return _TF_INTERPOLATION_METHODS[interpolation] @keras_export( "keras.utils.array_to_img", "keras.preprocessing.image.array_to_img" ) def array_to_img(x, data_format=None, scale=True, dtype=None): """Converts a 3D Numpy array to a PIL Image instance. Usage: ```python from PIL import Image img = np.random.random(size=(100, 100, 3)) pil_img = tf.keras.utils.array_to_img(img) ``` Args: x: Input data, in any form that can be converted to a Numpy array. data_format: Image data format, can be either `"channels_first"` or `"channels_last"`. None means the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it uses `"channels_last"`). Defaults to `None`. scale: Whether to rescale the image such that minimum and maximum values are 0 and 255 respectively. Defaults to `True`. dtype: Dtype to use. None makes the global setting `tf.keras.backend.floatx()` to be used (unless you changed it, it uses `"float32"`). Defaults to `None`. Returns: A PIL Image instance. Raises: ImportError: if PIL is not available. ValueError: if invalid `x` or `data_format` is passed. """ if data_format is None: data_format = backend.image_data_format() if dtype is None: dtype = backend.floatx() if pil_image is None: raise ImportError( "Could not import PIL.Image. " "The use of `array_to_img` requires PIL." ) x = np.asarray(x, dtype=dtype) if x.ndim != 3: raise ValueError( "Expected image array to have rank 3 (single image). " f"Got array with shape: {x.shape}" ) if data_format not in {"channels_first", "channels_last"}: raise ValueError(f"Invalid data_format: {data_format}") # Original Numpy array x has format (height, width, channel) # or (channel, height, width) # but target PIL image has format (width, height, channel) if data_format == "channels_first": x = x.transpose(1, 2, 0) if scale: x = x - np.min(x) x_max = np.max(x) if x_max != 0: x /= x_max x *= 255 if x.shape[2] == 4: # RGBA return pil_image.fromarray(x.astype("uint8"), "RGBA") elif x.shape[2] == 3: # RGB return pil_image.fromarray(x.astype("uint8"), "RGB") elif x.shape[2] == 1: # grayscale if np.max(x) > 255: # 32-bit signed integer grayscale image. PIL mode "I" return pil_image.fromarray(x[:, :, 0].astype("int32"), "I") return pil_image.fromarray(x[:, :, 0].astype("uint8"), "L") else: raise ValueError(f"Unsupported channel number: {x.shape[2]}") @keras_export( "keras.utils.img_to_array", "keras.preprocessing.image.img_to_array" ) def img_to_array(img, data_format=None, dtype=None): """Converts a PIL Image instance to a Numpy array. Usage: ```python from PIL import Image img_data = np.random.random(size=(100, 100, 3)) img = tf.keras.utils.array_to_img(img_data) array = tf.keras.utils.image.img_to_array(img) ``` Args: img: Input PIL Image instance. data_format: Image data format, can be either `"channels_first"` or `"channels_last"`. None means the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it uses `"channels_last"`). Defaults to `None`. dtype: Dtype to use. None makes the global setting `tf.keras.backend.floatx()` to be used (unless you changed it, it uses `"float32"`). Defaults to `None`. Returns: A 3D Numpy array. Raises: ValueError: if invalid `img` or `data_format` is passed. """ if data_format is None: data_format = backend.image_data_format() if dtype is None: dtype = backend.floatx() if data_format not in {"channels_first", "channels_last"}: raise ValueError(f"Unknown data_format: {data_format}") # Numpy array x has format (height, width, channel) # or (channel, height, width) # but original PIL image has format (width, height, channel) x = np.asarray(img, dtype=dtype) if len(x.shape) == 3: if data_format == "channels_first": x = x.transpose(2, 0, 1) elif len(x.shape) == 2: if data_format == "channels_first": x = x.reshape((1, x.shape[0], x.shape[1])) else: x = x.reshape((x.shape[0], x.shape[1], 1)) else: raise ValueError(f"Unsupported image shape: {x.shape}") return x @keras_export("keras.utils.save_img", "keras.preprocessing.image.save_img") def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): """Saves an image stored as a Numpy array to a path or file object. Args: path: Path or file object. x: Numpy array. data_format: Image data format, either `"channels_first"` or `"channels_last"`. file_format: Optional file format override. If omitted, the format to use is determined from the filename extension. If a file object was used instead of a filename, this parameter should always be used. scale: Whether to rescale image values to be within `[0, 255]`. **kwargs: Additional keyword arguments passed to `PIL.Image.save()`. """ if data_format is None: data_format = backend.image_data_format() img = array_to_img(x, data_format=data_format, scale=scale) if img.mode == "RGBA" and (file_format == "jpg" or file_format == "jpeg"): warnings.warn( "The JPG format does not support RGBA images, converting to RGB." ) img = img.convert("RGB") img.save(path, format=file_format, **kwargs) @keras_export("keras.utils.load_img", "keras.preprocessing.image.load_img") def load_img( path, grayscale=False, color_mode="rgb", target_size=None, interpolation="nearest", keep_aspect_ratio=False, ): """Loads an image into PIL format. Usage: ```python image = tf.keras.utils.load_img(image_path) input_arr = tf.keras.utils.img_to_array(image) input_arr = np.array([input_arr]) # Convert single image to a batch. predictions = model.predict(input_arr) ``` Args: path: Path to image file. grayscale: DEPRECATED use `color_mode="grayscale"`. color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`. Default: `"rgb"`. The desired image format. target_size: Either `None` (default to original size) or tuple of ints `(img_height, img_width)`. interpolation: Interpolation method used to resample the image if the target size is different from that of the loaded image. Supported methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version 1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also supported. By default, `"nearest"` is used. keep_aspect_ratio: Boolean, whether to resize images to a target size without aspect ratio distortion. The image is cropped in the center with target aspect ratio before resizing. Returns: A PIL Image instance. Raises: ImportError: if PIL is not available. ValueError: if interpolation method is not supported. """ if grayscale: warnings.warn( 'grayscale is deprecated. Please use color_mode = "grayscale"' ) color_mode = "grayscale" if pil_image is None: raise ImportError( "Could not import PIL.Image. The use of `load_img` requires PIL." ) if isinstance(path, io.BytesIO): img = pil_image.open(path) elif isinstance(path, (pathlib.Path, bytes, str)): if isinstance(path, pathlib.Path): path = str(path.resolve()) with open(path, "rb") as f: img = pil_image.open(io.BytesIO(f.read())) else: raise TypeError( f"path should be path-like or io.BytesIO, not {type(path)}" ) if color_mode == "grayscale": # if image is not already an 8-bit, 16-bit or 32-bit grayscale image # convert it to an 8-bit grayscale image. if img.mode not in ("L", "I;16", "I"): img = img.convert("L") elif color_mode == "rgba": if img.mode != "RGBA": img = img.convert("RGBA") elif color_mode == "rgb": if img.mode != "RGB": img = img.convert("RGB") else: raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"') if target_size is not None: width_height_tuple = (target_size[1], target_size[0]) if img.size != width_height_tuple: if interpolation not in _PIL_INTERPOLATION_METHODS: raise ValueError( "Invalid interpolation method {} specified. Supported " "methods are {}".format( interpolation, ", ".join(_PIL_INTERPOLATION_METHODS.keys()), ) ) resample = _PIL_INTERPOLATION_METHODS[interpolation] if keep_aspect_ratio: width, height = img.size target_width, target_height = width_height_tuple crop_height = (width * target_height) // target_width crop_width = (height * target_width) // target_height # Set back to input height / width # if crop_height / crop_width is not smaller. crop_height = min(height, crop_height) crop_width = min(width, crop_width) crop_box_hstart = (height - crop_height) // 2 crop_box_wstart = (width - crop_width) // 2 crop_box_wend = crop_box_wstart + crop_width crop_box_hend = crop_box_hstart + crop_height crop_box = [ crop_box_wstart, crop_box_hstart, crop_box_wend, crop_box_hend, ] img = img.resize(width_height_tuple, resample, box=crop_box) else: img = img.resize(width_height_tuple, resample) return img
tf-keras/tf_keras/utils/image_utils.py/0
{ "file_path": "tf-keras/tf_keras/utils/image_utils.py", "repo_id": "tf-keras", "token_count": 7526 }
197
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Numpy-related utilities.""" import numpy as np # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.utils.to_categorical") def to_categorical(y, num_classes=None, dtype="float32"): """Converts a class vector (integers) to binary class matrix. E.g. for use with `categorical_crossentropy`. Args: y: Array-like with class values to be converted into a matrix (integers from 0 to `num_classes - 1`). num_classes: Total number of classes. If `None`, this would be inferred as `max(y) + 1`. dtype: The data type expected by the input. Default: `'float32'`. Returns: A binary matrix representation of the input as a NumPy array. The class axis is placed last. Example: >>> a = tf.keras.utils.to_categorical([0, 1, 2, 3], num_classes=4) >>> print(a) [[1. 0. 0. 0.] [0. 1. 0. 0.] [0. 0. 1. 0.] [0. 0. 0. 1.]] >>> b = tf.constant([.9, .04, .03, .03, ... .3, .45, .15, .13, ... .04, .01, .94, .05, ... .12, .21, .5, .17], ... shape=[4, 4]) >>> loss = tf.keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.82807 0.1011 1.77196] >>> loss = tf.keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0. 0.] """ y = np.array(y, dtype="int") input_shape = y.shape # Shrink the last dimension if the shape is (..., 1). if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: input_shape = tuple(input_shape[:-1]) y = y.reshape(-1) if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical @keras_export("keras.utils.to_ordinal") def to_ordinal(y, num_classes=None, dtype="float32"): """Converts a class vector (integers) to an ordinal regression matrix. This utility encodes class vector to ordinal regression/classification matrix where each sample is indicated by a row and rank of that sample is indicated by number of ones in that row. Args: y: Array-like with class values to be converted into a matrix (integers from 0 to `num_classes - 1`). num_classes: Total number of classes. If `None`, this would be inferred as `max(y) + 1`. dtype: The data type expected by the input. Default: `'float32'`. Returns: An ordinal regression matrix representation of the input as a NumPy array. The class axis is placed last. Example: >>> a = tf.keras.utils.to_ordinal([0, 1, 2, 3], num_classes=4) >>> print(a) [[0. 0. 0.] [1. 0. 0.] [1. 1. 0.] [1. 1. 1.]] """ y = np.array(y, dtype="int") input_shape = y.shape # Shrink the last dimension if the shape is (..., 1). if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: input_shape = tuple(input_shape[:-1]) y = y.reshape(-1) if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] range_values = np.arange(num_classes - 1) range_values = np.tile(np.expand_dims(range_values, 0), [n, 1]) ordinal = np.zeros((n, num_classes - 1), dtype=dtype) ordinal[range_values < np.expand_dims(y, -1)] = 1 output_shape = input_shape + (num_classes - 1,) ordinal = np.reshape(ordinal, output_shape) return ordinal @keras_export("keras.utils.normalize") def normalize(x, axis=-1, order=2): """Normalizes a Numpy array. Args: x: Numpy array to normalize. axis: axis along which to normalize. order: Normalization order (e.g. `order=2` for L2 norm). Returns: A normalized copy of the array. """ l2 = np.atleast_1d(np.linalg.norm(x, order, axis)) l2[l2 == 0] = 1 return x / np.expand_dims(l2, axis)
tf-keras/tf_keras/utils/np_utils.py/0
{ "file_path": "tf-keras/tf_keras/utils/np_utils.py", "repo_id": "tf-keras", "token_count": 1956 }
198
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for timeseries_dataset.""" import numpy as np import tensorflow.compat.v2 as tf from tf_keras.testing_infra import test_utils from tf_keras.utils import timeseries_dataset @test_utils.run_v2_only class TimeseriesDatasetTest(tf.test.TestCase): def test_basics(self): # Test ordering, targets, sequence length, batch size data = np.arange(100) targets = data * 2 dataset = timeseries_dataset.timeseries_dataset_from_array( data, targets, sequence_length=9, batch_size=5 ) # Expect 19 batches for i, batch in enumerate(dataset): self.assertLen(batch, 2) inputs, targets = batch if i < 18: self.assertEqual(inputs.shape, (5, 9)) if i == 18: # Last batch: size 2 self.assertEqual(inputs.shape, (2, 9)) # Check target values self.assertAllClose(targets, inputs[:, 0] * 2) for j in range(min(5, len(inputs))): # Check each sample in the batch self.assertAllClose( inputs[j], np.arange(i * 5 + j, i * 5 + j + 9) ) def test_timeseries_regression(self): # Test simple timeseries regression use case data = np.arange(10) offset = 3 targets = data[offset:] dataset = timeseries_dataset.timeseries_dataset_from_array( data, targets, sequence_length=offset, batch_size=1 ) i = 0 for batch in dataset: self.assertLen(batch, 2) inputs, targets = batch self.assertEqual(inputs.shape, (1, 3)) # Check values self.assertAllClose(targets[0], data[offset + i]) self.assertAllClose(inputs[0], data[i : i + offset]) i += 1 self.assertEqual(i, 7) # Expect 7 batches def test_no_targets(self): data = np.arange(50) dataset = timeseries_dataset.timeseries_dataset_from_array( data, None, sequence_length=10, batch_size=5 ) # Expect 9 batches i = None for i, batch in enumerate(dataset): if i < 8: self.assertEqual(batch.shape, (5, 10)) elif i == 8: self.assertEqual(batch.shape, (1, 10)) for j in range(min(5, len(batch))): # Check each sample in the batch self.assertAllClose( batch[j], np.arange(i * 5 + j, i * 5 + j + 10) ) self.assertEqual(i, 8) def test_shuffle(self): # Test cross-epoch random order and seed determinism data = np.arange(10) targets = data * 2 dataset = timeseries_dataset.timeseries_dataset_from_array( data, targets, sequence_length=5, batch_size=1, shuffle=True, seed=123, ) first_seq = None for x, y in dataset.take(1): self.assertNotAllClose(x, np.arange(0, 5)) self.assertAllClose(x[:, 0] * 2, y) first_seq = x # Check that a new iteration with the same dataset yields different # results for x, _ in dataset.take(1): self.assertNotAllClose(x, first_seq) # Check determism with same seed dataset = timeseries_dataset.timeseries_dataset_from_array( data, targets, sequence_length=5, batch_size=1, shuffle=True, seed=123, ) for x, _ in dataset.take(1): self.assertAllClose(x, first_seq) def test_sampling_rate(self): data = np.arange(100) targets = data * 2 dataset = timeseries_dataset.timeseries_dataset_from_array( data, targets, sequence_length=9, batch_size=5, sampling_rate=2 ) for i, batch in enumerate(dataset): self.assertLen(batch, 2) inputs, targets = batch if i < 16: self.assertEqual(inputs.shape, (5, 9)) if i == 16: # Last batch: size 4 self.assertEqual(inputs.shape, (4, 9)) # Check target values self.assertAllClose(inputs[:, 0] * 2, targets) for j in range(min(5, len(inputs))): # Check each sample in the batch start_index = i * 5 + j end_index = start_index + 9 * 2 self.assertAllClose( inputs[j], np.arange(start_index, end_index, 2) ) def test_sequence_stride(self): data = np.arange(100) targets = data * 2 dataset = timeseries_dataset.timeseries_dataset_from_array( data, targets, sequence_length=9, batch_size=5, sequence_stride=3 ) for i, batch in enumerate(dataset): self.assertLen(batch, 2) inputs, targets = batch if i < 6: self.assertEqual(inputs.shape, (5, 9)) if i == 6: # Last batch: size 1 self.assertEqual(inputs.shape, (1, 9)) # Check target values self.assertAllClose(inputs[:, 0] * 2, targets) for j in range(min(5, len(inputs))): # Check each sample in the batch start_index = i * 5 * 3 + j * 3 end_index = start_index + 9 self.assertAllClose( inputs[j], np.arange(start_index, end_index) ) def test_start_and_end_index(self): data = np.arange(100) dataset = timeseries_dataset.timeseries_dataset_from_array( data, None, sequence_length=9, batch_size=5, sequence_stride=3, sampling_rate=2, start_index=10, end_index=90, ) for batch in dataset: self.assertAllLess(batch[0], 90) self.assertAllGreater(batch[0], 9) def test_errors(self): # bad start index with self.assertRaisesRegex(ValueError, "`start_index` must be "): _ = timeseries_dataset.timeseries_dataset_from_array( np.arange(10), None, 3, start_index=-1 ) with self.assertRaisesRegex(ValueError, "`start_index` must be "): _ = timeseries_dataset.timeseries_dataset_from_array( np.arange(10), None, 3, start_index=11 ) # bad end index with self.assertRaisesRegex(ValueError, "`end_index` must be "): _ = timeseries_dataset.timeseries_dataset_from_array( np.arange(10), None, 3, end_index=-1 ) with self.assertRaisesRegex(ValueError, "`end_index` must be "): _ = timeseries_dataset.timeseries_dataset_from_array( np.arange(10), None, 3, end_index=11 ) # bad sampling_rate with self.assertRaisesRegex(ValueError, "`sampling_rate` must be "): _ = timeseries_dataset.timeseries_dataset_from_array( np.arange(10), None, 3, sampling_rate=0 ) # bad sequence stride with self.assertRaisesRegex(ValueError, "`sequence_stride` must be "): _ = timeseries_dataset.timeseries_dataset_from_array( np.arange(10), None, 3, sequence_stride=0 ) def test_not_batched(self): data = np.arange(100) dataset = timeseries_dataset.timeseries_dataset_from_array( data, None, sequence_length=9, batch_size=None, shuffle=True ) sample = next(iter(dataset)) self.assertEqual(len(sample.shape), 1) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/utils/timeseries_dataset_test.py/0
{ "file_path": "tf-keras/tf_keras/utils/timeseries_dataset_test.py", "repo_id": "tf-keras", "token_count": 4212 }
199
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import keras_nlp from autokeras.auto_model import AutoModel from autokeras.blocks import BertBlock from autokeras.blocks import CategoricalToNumerical from autokeras.blocks import ClassificationHead from autokeras.blocks import ConvBlock from autokeras.blocks import DenseBlock from autokeras.blocks import EfficientNetBlock from autokeras.blocks import Embedding from autokeras.blocks import Flatten from autokeras.blocks import ImageAugmentation from autokeras.blocks import ImageBlock from autokeras.blocks import Merge from autokeras.blocks import Normalization from autokeras.blocks import RegressionHead from autokeras.blocks import ResNetBlock from autokeras.blocks import RNNBlock from autokeras.blocks import SpatialReduction from autokeras.blocks import StructuredDataBlock from autokeras.blocks import TemporalReduction from autokeras.blocks import TextBlock from autokeras.blocks import TextToIntSequence from autokeras.blocks import TextToNgramVector from autokeras.blocks import Transformer from autokeras.blocks import XceptionBlock from autokeras.engine.block import Block from autokeras.engine.head import Head from autokeras.engine.node import Node from autokeras.keras_layers import CastToFloat32 from autokeras.keras_layers import ExpandLastDim from autokeras.keras_layers import MultiCategoryEncoding from autokeras.nodes import ImageInput from autokeras.nodes import Input from autokeras.nodes import StructuredDataInput from autokeras.nodes import TextInput from autokeras.nodes import TimeseriesInput from autokeras.tasks import ImageClassifier from autokeras.tasks import ImageRegressor from autokeras.tasks import StructuredDataClassifier from autokeras.tasks import StructuredDataRegressor from autokeras.tasks import TextClassifier from autokeras.tasks import TextRegressor from autokeras.tasks import TimeseriesForecaster from autokeras.tuners import BayesianOptimization from autokeras.tuners import Greedy from autokeras.tuners import Hyperband from autokeras.tuners import RandomSearch from autokeras.utils.io_utils import image_dataset_from_directory from autokeras.utils.io_utils import text_dataset_from_directory from autokeras.utils.utils import check_kt_version from autokeras.utils.utils import check_tf_version __version__ = "1.1.1dev" check_tf_version() check_kt_version() CUSTOM_OBJECTS = { "BertPreprocessor": keras_nlp.models.BertPreprocessor, "BertBackbone": keras_nlp.models.BertBackbone, "CastToFloat32": CastToFloat32, "ExpandLastDim": ExpandLastDim, "MultiCategoryEncoding": MultiCategoryEncoding, }
autokeras/autokeras/__init__.py/0
{ "file_path": "autokeras/autokeras/__init__.py", "repo_id": "autokeras", "token_count": 907 }
0
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import tensorflow as tf from tensorflow import keras from tensorflow import nest from tensorflow.keras import activations from tensorflow.keras import layers from tensorflow.keras import losses from autokeras import adapters from autokeras import analysers from autokeras import hyper_preprocessors as hpps_module from autokeras import preprocessors from autokeras.blocks import reduction from autokeras.engine import head as head_module from autokeras.utils import types from autokeras.utils import utils class ClassificationHead(head_module.Head): """Classification Dense layers. Use sigmoid and binary crossentropy for binary classification and multi-label classification. Use softmax and categorical crossentropy for multi-class (more than 2) classification. Use Accuracy as metrics by default. The targets passing to the head would have to be tf.data.Dataset, np.ndarray, pd.DataFrame or pd.Series. It can be raw labels, one-hot encoded if more than two classes, or binary encoded for binary classification. The raw labels will be encoded to one column if two classes were found, or one-hot encoded if more than two classes were found. # Arguments num_classes: Int. Defaults to None. If None, it will be inferred from the data. multi_label: Boolean. Defaults to False. loss: A Keras loss function. Defaults to use `binary_crossentropy` or `categorical_crossentropy` based on the number of classes. metrics: A list of Keras metrics. Defaults to use 'accuracy'. dropout: Float. The dropout rate for the layers. If left unspecified, it will be tuned automatically. """ def __init__( self, num_classes: Optional[int] = None, multi_label: bool = False, loss: Optional[types.LossType] = None, metrics: Optional[types.MetricsType] = None, dropout: Optional[float] = None, **kwargs ): self.num_classes = num_classes self.multi_label = multi_label self.dropout = dropout if metrics is None: metrics = ["accuracy"] if loss is None: loss = self.infer_loss() super().__init__(loss=loss, metrics=metrics, **kwargs) # Infered from analyser. self._encoded = None self._encoded_for_sigmoid = None self._encoded_for_softmax = None self._add_one_dimension = False self._labels = None def infer_loss(self): if not self.num_classes: return None if self.num_classes == 2 or self.multi_label: return losses.BinaryCrossentropy() return losses.CategoricalCrossentropy() def get_config(self): config = super().get_config() config.update( { "num_classes": self.num_classes, "multi_label": self.multi_label, "dropout": self.dropout, } ) return config def build(self, hp, inputs=None): inputs = nest.flatten(inputs) utils.validate_num_inputs(inputs, 1) input_node = inputs[0] output_node = input_node # Reduce the tensor to a vector. if len(output_node.shape) > 2: output_node = reduction.SpatialReduction().build(hp, output_node) if self.dropout is not None: dropout = self.dropout else: dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0) if dropout > 0: output_node = layers.Dropout(dropout)(output_node) output_node = layers.Dense(self.shape[-1])(output_node) if isinstance(self.loss, keras.losses.BinaryCrossentropy): output_node = layers.Activation( activations.sigmoid, name=self.name )(output_node) else: output_node = layers.Softmax(name=self.name)(output_node) return output_node def get_adapter(self): return adapters.ClassificationAdapter(name=self.name) def get_analyser(self): return analysers.ClassificationAnalyser( name=self.name, multi_label=self.multi_label ) def config_from_analyser(self, analyser): super().config_from_analyser(analyser) self.num_classes = analyser.num_classes self.loss = self.infer_loss() self._encoded = analyser.encoded self._encoded_for_sigmoid = analyser.encoded_for_sigmoid self._encoded_for_softmax = analyser.encoded_for_softmax self._add_one_dimension = len(analyser.shape) == 1 self._labels = analyser.labels def get_hyper_preprocessors(self): hyper_preprocessors = [] if self._add_one_dimension: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.AddOneDimension() ) ) if self.dtype in [tf.uint8, tf.uint16, tf.uint32, tf.uint64]: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.CastToInt32() ) ) if not self._encoded and self.dtype != tf.string: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.CastToString() ) ) if self._encoded_for_sigmoid: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.SigmoidPostprocessor() ) ) elif self._encoded_for_softmax: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.SoftmaxPostprocessor() ) ) elif self.num_classes == 2: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.LabelEncoder(self._labels) ) ) else: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.OneHotEncoder(self._labels) ) ) return hyper_preprocessors class RegressionHead(head_module.Head): """Regression Dense layers. The targets passing to the head would have to be tf.data.Dataset, np.ndarray, pd.DataFrame or pd.Series. It can be single-column or multi-column. The values should all be numerical. # Arguments output_dim: Int. The number of output dimensions. Defaults to None. If None, it will be inferred from the data. multi_label: Boolean. Defaults to False. loss: A Keras loss function. Defaults to use `mean_squared_error`. metrics: A list of Keras metrics. Defaults to use `mean_squared_error`. dropout: Float. The dropout rate for the layers. If left unspecified, it will be tuned automatically. """ def __init__( self, output_dim: Optional[int] = None, loss: types.LossType = "mean_squared_error", metrics: Optional[types.MetricsType] = None, dropout: Optional[float] = None, **kwargs ): if metrics is None: metrics = ["mean_squared_error"] super().__init__(loss=loss, metrics=metrics, **kwargs) self.output_dim = output_dim self.dropout = dropout def get_config(self): config = super().get_config() config.update({"output_dim": self.output_dim, "dropout": self.dropout}) return config def build(self, hp, inputs=None): inputs = nest.flatten(inputs) utils.validate_num_inputs(inputs, 1) input_node = inputs[0] output_node = input_node if self.dropout is not None: dropout = self.dropout else: dropout = hp.Choice("dropout", [0.0, 0.25, 0.5], default=0) if dropout > 0: output_node = layers.Dropout(dropout)(output_node) output_node = reduction.Flatten().build(hp, output_node) output_node = layers.Dense(self.shape[-1], name=self.name)(output_node) return output_node def config_from_analyser(self, analyser): super().config_from_analyser(analyser) self._add_one_dimension = len(analyser.shape) == 1 def get_adapter(self): return adapters.RegressionAdapter(name=self.name) def get_analyser(self): return analysers.RegressionAnalyser( name=self.name, output_dim=self.output_dim ) def get_hyper_preprocessors(self): hyper_preprocessors = [] if self._add_one_dimension: hyper_preprocessors.append( hpps_module.DefaultHyperPreprocessor( preprocessors.AddOneDimension() ) ) return hyper_preprocessors class SegmentationHead(ClassificationHead): """Segmentation layers. Use sigmoid and binary crossentropy for binary element segmentation. Use softmax and categorical crossentropy for multi-class (more than 2) segmentation. Use Accuracy as metrics by default. The targets passing to the head would have to be tf.data.Dataset, np.ndarray, pd.DataFrame or pd.Series. It can be raw labels, one-hot encoded if more than two classes, or binary encoded for binary element segmentation. The raw labels will be encoded to 0s and 1s if two classes were found, or one-hot encoded if more than two classes were found. One pixel only corresponds to one label. # Arguments num_classes: Int. Defaults to None. If None, it will be inferred from the data. loss: A Keras loss function. Defaults to use `binary_crossentropy` or `categorical_crossentropy` based on the number of classes. metrics: A list of Keras metrics. Defaults to use 'accuracy'. dropout: Float. The dropout rate for the layers. If left unspecified, it will be tuned automatically. """ def __init__( self, num_classes: Optional[int] = None, loss: Optional[types.LossType] = None, metrics: Optional[types.MetricsType] = None, dropout: Optional[float] = None, **kwargs ): super().__init__( loss=loss, metrics=metrics, num_classes=num_classes, dropout=dropout, **kwargs ) def build(self, hp, inputs): return inputs def get_adapter(self): return adapters.SegmentationHeadAdapter(name=self.name)
autokeras/autokeras/blocks/heads.py/0
{ "file_path": "autokeras/autokeras/blocks/heads.py", "repo_id": "autokeras", "token_count": 4829 }
1
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd import autokeras as ak from autokeras import test_utils def test_io_api(tmp_path): num_instances = 20 image_x = test_utils.generate_data( num_instances=num_instances, shape=(28, 28) ) text_x = test_utils.generate_text_data(num_instances=num_instances) image_x = image_x[:num_instances] structured_data_x = ( pd.read_csv(test_utils.TRAIN_CSV_PATH) .to_numpy() .astype(str)[:num_instances] ) classification_y = test_utils.generate_one_hot_labels( num_instances=num_instances, num_classes=3 ) regression_y = test_utils.generate_data( num_instances=num_instances, shape=(1,) ) # Build model and train. automodel = ak.AutoModel( inputs=[ak.ImageInput(), ak.TextInput(), ak.StructuredDataInput()], outputs=[ ak.RegressionHead(metrics=["mae"]), ak.ClassificationHead( loss="categorical_crossentropy", metrics=["accuracy"] ), ], directory=tmp_path, max_trials=2, tuner=ak.RandomSearch, seed=test_utils.SEED, ) automodel.fit( [image_x, text_x, structured_data_x], [regression_y, classification_y], epochs=1, validation_split=0.2, batch_size=4, ) automodel.predict([image_x, text_x, structured_data_x])
autokeras/autokeras/integration_tests/io_api_test.py/0
{ "file_path": "autokeras/autokeras/integration_tests/io_api_test.py", "repo_id": "autokeras", "token_count": 816 }
2
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from tensorflow import nest def batched(dataset): shape = nest.flatten(dataset_shape(dataset))[0] return len(shape) > 0 and shape[0] is None def batch_dataset(dataset, batch_size): if batched(dataset): return dataset return dataset.batch(batch_size) def split_dataset(dataset, validation_split): """Split dataset into training and validation. # Arguments dataset: tf.data.Dataset. The entire dataset to be split. validation_split: Float. The split ratio for the validation set. # Raises ValueError: If the dataset provided is too small to be split. # Returns A tuple of two tf.data.Dataset. The training set and the validation set. """ num_instances = dataset.reduce(np.int64(0), lambda x, _: x + 1).numpy() if num_instances < 2: raise ValueError( "The dataset should at least contain 2 batches to be split." ) validation_set_size = min( max(int(num_instances * validation_split), 1), num_instances - 1 ) train_set_size = num_instances - validation_set_size train_dataset = dataset.take(train_set_size) validation_dataset = dataset.skip(train_set_size) return train_dataset, validation_dataset def dataset_shape(dataset): return tf.compat.v1.data.get_output_shapes(dataset) def unzip_dataset(dataset): return nest.flatten( [ dataset.map(lambda *a: nest.flatten(a)[index]) for index in range(len(nest.flatten(dataset_shape(dataset)))) ] ) def cast_to_string(tensor): if tensor.dtype == tf.string: return tensor return tf.strings.as_string(tensor) def cast_to_float32(tensor): if tensor.dtype == tf.float32: return tensor if tensor.dtype == tf.string: return tf.strings.to_number(tensor, tf.float32) return tf.cast(tensor, tf.float32)
autokeras/autokeras/utils/data_utils.py/0
{ "file_path": "autokeras/autokeras/utils/data_utils.py", "repo_id": "autokeras", "token_count": 941 }
3
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import statistics import sys from benchmark import experiments as exp_module def generate_report(experiments): report = [ ",".join( [ "dataset_name", "average_time", "metrics_average", "metrics_standard_deviation", ] ) ] for experiment in experiments: total_times, metric_values = experiment.run(repeat_times=10) mean_time = statistics.mean(total_times) mean = statistics.mean(metric_values) std = statistics.stdev(metric_values) report.append( ",".join([experiment.name, str(mean_time), str(mean), str(std)]) ) return "\n".join(report) def main(argv): task = sys.argv[1] path = sys.argv[2] report = generate_report(exp_module.get_experiments(task)) with open(path, "w") as file: file.write(report) if __name__ == "__main__": main(sys.argv)
autokeras/benchmark/run.py/0
{ "file_path": "autokeras/benchmark/run.py", "repo_id": "autokeras", "token_count": 602 }
4
<jupyter_start><jupyter_code>!pip install autokeras import pandas as pd import tensorflow as tf import autokeras as ak<jupyter_output><empty_output><jupyter_text>A Simple ExampleThe first step is to prepare your data. Here we use the [Titanicdataset](https://www.kaggle.com/c/titanic) as an example.<jupyter_code>TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv" TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv" train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL) test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)<jupyter_output><empty_output><jupyter_text>The second step is to run the[StructuredDataClassifier](/structured_data_classifier).As a quick demo, we set epochs to 10.You can also leave the epochs unspecified for an adaptive number of epochs.<jupyter_code># Initialize the structured data classifier. clf = ak.StructuredDataClassifier( overwrite=True, max_trials=3 ) # It tries 3 different models. # Feed the structured data classifier with training data. clf.fit( # The path to the train.csv file. train_file_path, # The name of the label column. "survived", epochs=10, ) # Predict with the best model. predicted_y = clf.predict(test_file_path) # Evaluate the best model with testing data. print(clf.evaluate(test_file_path, "survived"))<jupyter_output><empty_output><jupyter_text>Data FormatThe AutoKeras StructuredDataClassifier is quite flexible for the data format.The example above shows how to use the CSV files directly. Besides CSV files,it also supports numpy.ndarray, pandas.DataFrame or [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable). Thedata should be two-dimensional with numerical or categorical values.For the classification labels,AutoKeras accepts both plain labels, i.e. strings or integers, and one-hot encodedencoded labels, i.e. vectors of 0s and 1s.The labels can be numpy.ndarray, pandas.DataFrame, or pandas.Series.The following examples show how the data can be prepared with numpy.ndarray,pandas.DataFrame, and tensorflow.data.Dataset.<jupyter_code># x_train as pandas.DataFrame, y_train as pandas.Series x_train = pd.read_csv(train_file_path) print(type(x_train)) # pandas.DataFrame y_train = x_train.pop("survived") print(type(y_train)) # pandas.Series # You can also use pandas.DataFrame for y_train. y_train = pd.DataFrame(y_train) print(type(y_train)) # pandas.DataFrame # You can also use numpy.ndarray for x_train and y_train. x_train = x_train.to_numpy() y_train = y_train.to_numpy() print(type(x_train)) # numpy.ndarray print(type(y_train)) # numpy.ndarray # Preparing testing data. x_test = pd.read_csv(test_file_path) y_test = x_test.pop("survived") # It tries 10 different models. clf = ak.StructuredDataClassifier(overwrite=True, max_trials=3) # Feed the structured data classifier with training data. clf.fit(x_train, y_train, epochs=10) # Predict with the best model. predicted_y = clf.predict(x_test) # Evaluate the best model with testing data. print(clf.evaluate(x_test, y_test))<jupyter_output><empty_output><jupyter_text>The following code shows how to convert numpy.ndarray to tf.data.Dataset.<jupyter_code>train_set = tf.data.Dataset.from_tensor_slices((x_train.astype(str), y_train)) test_set = tf.data.Dataset.from_tensor_slices( (x_test.to_numpy().astype(str), y_test) ) clf = ak.StructuredDataClassifier(overwrite=True, max_trials=3) # Feed the tensorflow Dataset to the classifier. clf.fit(train_set, epochs=10) # Predict with the best model. predicted_y = clf.predict(test_set) # Evaluate the best model with testing data. print(clf.evaluate(test_set))<jupyter_output><empty_output><jupyter_text>You can also specify the column names and types for the data as follows. The`column_names` is optional if the training data already have the column names,e.g. pandas.DataFrame, CSV file. Any column, whose type is not specified willbe inferred from the training data.<jupyter_code># Initialize the structured data classifier. clf = ak.StructuredDataClassifier( column_names=[ "sex", "age", "n_siblings_spouses", "parch", "fare", "class", "deck", "embark_town", "alone", ], column_types={"sex": "categorical", "fare": "numerical"}, max_trials=10, # It tries 10 different models. overwrite=True, )<jupyter_output><empty_output><jupyter_text>Validation DataBy default, AutoKeras use the last 20% of training data as validation data. Asshown in the example below, you can use `validation_split` to specify thepercentage.<jupyter_code>clf.fit( x_train, y_train, # Split the training data and use the last 15% as validation data. validation_split=0.15, epochs=10, )<jupyter_output><empty_output><jupyter_text>You can also use your own validation setinstead of splitting it from the training data with `validation_data`.<jupyter_code>split = 500 x_val = x_train[split:] y_val = y_train[split:] x_train = x_train[:split] y_train = y_train[:split] clf.fit( x_train, y_train, # Use your own validation set. validation_data=(x_val, y_val), epochs=10, )<jupyter_output><empty_output><jupyter_text>Customized Search SpaceFor advanced users, you may customize your search space by using[AutoModel](/auto_model/automodel-class) instead of[StructuredDataClassifier](/structured_data_classifier). You can configure the[StructuredDataBlock](/block/structureddatablock-class) for some high-levelconfigurations, e.g., `categorical_encoding` for whether to use the[CategoricalToNumerical](/block/categoricaltonumerical-class). You can also donot specify these arguments, which would leave the different choices to betuned automatically. See the following example for detail.<jupyter_code>input_node = ak.StructuredDataInput() output_node = ak.StructuredDataBlock(categorical_encoding=True)(input_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=3 ) clf.fit(x_train, y_train, epochs=10)<jupyter_output><empty_output><jupyter_text>The usage of [AutoModel](/auto_model/automodel-class) is similar to the[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.Basically, you are building a graph, whose edges are blocks and the nodes areintermediate outputs of blocks.To add an edge from `input_node` to `output_node` with`output_node = ak.[some_block]([block_args])(input_node)`.You can even also use more fine grained blocks to customize the search space evenfurther. See the following example.<jupyter_code>input_node = ak.StructuredDataInput() output_node = ak.CategoricalToNumerical()(input_node) output_node = ak.DenseBlock()(output_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 ) clf.fit(x_train, y_train, epochs=1) clf.predict(x_train)<jupyter_output><empty_output><jupyter_text>You can also export the best model found by AutoKeras as a Keras Model.<jupyter_code>model = clf.export_model() model.summary() print(x_train.dtype) # numpy array in object (mixed type) is not supported. # convert it to unicode. model.predict(x_train.astype(str))<jupyter_output><empty_output>
autokeras/docs/ipynb/structured_data_classification.ipynb/0
{ "file_path": "autokeras/docs/ipynb/structured_data_classification.ipynb", "repo_id": "autokeras", "token_count": 2580 }
5
"""shell pip install autokeras """ import tensorflow as tf from tensorflow.keras.datasets import mnist import autokeras as ak """ To make this tutorial easy to follow, we just treat MNIST dataset as a regression dataset. It means we will treat prediction targets of MNIST dataset, which are integers ranging from 0 to 9 as numerical values, so that they can be directly used as the regression targets. ## A Simple Example The first step is to prepare your data. Here we use the MNIST dataset as an example """ (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train[:100] y_train = y_train[:100] print(x_train.shape) # (60000, 28, 28) print(y_train.shape) # (60000,) print(y_train[:3]) # array([7, 2, 1], dtype=uint8) """ The second step is to run the ImageRegressor. It is recommended have more trials for more complicated datasets. This is just a quick demo of MNIST, so we set max_trials to 1. For the same reason, we set epochs to 2. You can also leave the epochs unspecified for an adaptive number of epochs. """ # Initialize the image regressor. reg = ak.ImageRegressor(overwrite=True, max_trials=1) # Feed the image regressor with training data. reg.fit(x_train, y_train, epochs=2) # Predict with the best model. predicted_y = reg.predict(x_test) print(predicted_y) # Evaluate the best model with testing data. print(reg.evaluate(x_test, y_test)) """ ## Validation Data By default, AutoKeras use the last 20% of training data as validation data. As shown in the example below, you can use validation_split to specify the percentage. """ reg.fit( x_train, y_train, # Split the training data and use the last 15% as validation data. validation_split=0.15, epochs=2, ) """ You can also use your own validation set instead of splitting it from the training data with validation_data. """ split = 50000 x_val = x_train[split:] y_val = y_train[split:] x_train = x_train[:split] y_train = y_train[:split] reg.fit( x_train, y_train, # Use your own validation set. validation_data=(x_val, y_val), epochs=2, ) """ ## Customized Search Space For advanced users, you may customize your search space by using AutoModel instead of ImageRegressor. You can configure the ImageBlock for some high-level configurations, e.g., block_type for the type of neural network to search, normalize for whether to do data normalization, augment for whether to do data augmentation. You can also do not specify these arguments, which would leave the different choices to be tuned automatically. See the following example for detail. """ input_node = ak.ImageInput() output_node = ak.ImageBlock( # Only search ResNet architectures. block_type="resnet", # Normalize the dataset. normalize=False, # Do not do data augmentation. augment=False, )(input_node) output_node = ak.RegressionHead()(output_node) reg = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 ) reg.fit(x_train, y_train, epochs=2) """ The usage of AutoModel is similar to the functional API of Keras. Basically, you are building a graph, whose edges are blocks and the nodes are intermediate outputs of blocks. To add an edge from input_node to output_node with output_node = ak.[some_block]([block_args])(input_node). You can even also use more fine grained blocks to customize the search space even further. See the following example. """ input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node) output_node = ak.ResNetBlock(version="v2")(output_node) output_node = ak.RegressionHead()(output_node) reg = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 ) reg.fit(x_train, y_train, epochs=2) """ ## Data Format The AutoKeras ImageRegressor is quite flexible for the data format. For the image, it accepts data formats both with and without the channel dimension. The images in the MNIST dataset do not have the channel dimension. Each image is a matrix with shape (28, 28). AutoKeras also accepts images of three dimensions with the channel dimension at last, e.g., (32, 32, 3), (28, 28, 1). For the regression targets, it should be a vector of numerical values. AutoKeras accepts numpy.ndarray. We also support using tf.data.Dataset format for the training data. In this case, the images would have to be 3-dimentional. """ (x_train, y_train), (x_test, y_test) = mnist.load_data() # Reshape the images to have the channel dimension. x_train = x_train.reshape(x_train.shape + (1,)) x_test = x_test.reshape(x_test.shape + (1,)) y_train = y_train.reshape(y_train.shape + (1,)) y_test = y_test.reshape(y_test.shape + (1,)) print(x_train.shape) # (60000, 28, 28, 1) print(y_train.shape) # (60000, 10) train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,))) test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,))) reg = ak.ImageRegressor(overwrite=True, max_trials=1) # Feed the tensorflow Dataset to the regressor. reg.fit(train_set, epochs=2) # Predict with the best model. predicted_y = reg.predict(test_set) # Evaluate the best model with testing data. print(reg.evaluate(test_set)) """ ## Reference [ImageRegressor](/image_regressor), [AutoModel](/auto_model/#automodel-class), [ImageBlock](/block/#imageblock-class), [Normalization](/block/#normalization-class), [ImageAugmentation](/block/#image-augmentation-class), [ResNetBlock](/block/#resnetblock-class), [ImageInput](/node/#imageinput-class), [RegressionHead](/block/#regressionhead-class). """
autokeras/docs/py/image_regression.py/0
{ "file_path": "autokeras/docs/py/image_regression.py", "repo_id": "autokeras", "token_count": 1856 }
6
"""shell pip install autokeras """ import pandas as pd import autokeras as ak """ ## Social Media Articles Example Regression tasks estimate a numeric variable, such as the price of a house or a person's age. This example estimates the view counts for an article on social media platforms, trained on a [News Popularity]( https://archive.ics.uci.edu/ml/datasets/ News+Popularity+in+Multiple+Social+Media+Platforms) dataset collected from 2015-2016. First, prepare your text data in a `numpy.ndarray` or `tensorflow.Dataset` format. """ # converting from other formats (such as pandas) to numpy df = pd.read_csv("./News_Final.csv") text_inputs = df.Title.to_numpy(dtype="str") media_success_outputs = df.Facebook.to_numpy(dtype="int") """ Next, initialize and train the [TextRegressor](/text_regressor). """ # Initialize the text regressor reg = ak.TextRegressor(max_trials=15) # AutoKeras tries 15 different models. # Find the best model for the given training data reg.fit(text_inputs, media_success_outputs) # Predict with the chosen model: predict_y = reg.predict(text_inputs) """ If your text source has a larger vocabulary (number of distinct words), you may need to create a custom pipeline in AutoKeras to increase the `max_tokens` parameter. """ text_input = (df.Title + " " + df.Headline).to_numpy(dtype="str") # text input and tokenization input_node = ak.TextInput() output_node = ak.TextToIntSequence(max_tokens=20000)(input_node) # regression output output_node = ak.RegressionHead()(output_node) # initialize AutoKeras and find the best model reg = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=15) reg.fit(text_input, media_success_outputs) """ Measure the accuracy of the regressor on an independent test set: """ print(reg.evaluate(text_input, media_success_outputs))
autokeras/examples/new_pop.py/0
{ "file_path": "autokeras/examples/new_pop.py", "repo_id": "autokeras", "token_count": 589 }
7
pytest tests/performance.py | tee perf_output.txt
autokeras/shell/perf.sh/0
{ "file_path": "autokeras/shell/perf.sh", "repo_id": "autokeras", "token_count": 15 }
8
# Keras Preprocessing Layers | Status | Accepted | :-------------- |:---------------------------------------------------- | | **Author(s)** | Mark Omernick ([email protected]), Stan Bileschi ([email protected]), Kester Tong ([email protected]), Francois Chollet ([email protected]) | | **Updated** | 2019-05-21 | ## Objective We aim at providing additional Keras layers to handle [data preprocessing operations](https://en.wikipedia.org/wiki/Data_pre-processing) such as text vectorization, data normalization, and data discretization (binning). These operations are currently handled separately from a Keras model via utilities such as those from `keras.preprocessing`. These new layers will allow users to include data preprocessing directly in their Keras model, so as to create models that map raw data (such as uint8 tensors for images, or string tensors for text) to predictions. ## Key benefits Including preprocessing layers in the Keras model means that the same preprocessing steps will be performed when that model is exported and used in serving. It also means the steps will be part of the model when the model is saved and loaded as part of another model. This presents the following advantages: - Model portability (encapsulation for sharing models). With PreprocessingLayers, your Keras Model contains all the preprocessing it requires. If another user wishes to use your model in a different workflow, there is no risk of incorrect preprocessing. Models will be more end-to-end. - Serving reliability. The Model object will contain everything you expect to be done at serving time. - Simpler optimization using tf.data and tf.Transform. By providing simple, well defined building blocks for preprocessing, we simplify the process of using tf.data and tf.Transform to optimize preprocessing steps. Users can offload computation of vocabularies, quantiles and mean and variance, to tf.Transform. They can also use tf.data to move data preprocessing in training off the critical path. The preprocessing layer API is designed to make both of these easy and simple. In particular, we expect preprocessing layers to make it easier to serve models in TF.js or in mobile applications. It will also reduce the risk that benchmarks of Keras applications use incorrect preprocessing and subsquently publish invalid findings. ## Design overview ### End-to-end workflow overview Case where a user has a single preprocessing layer to do image normalization. ```python normalization = keras.layers.Normalization(axis=-1) normalization.adapt(data_sample) model = keras.Sequential([ normalization, keras.applications.ResNet50(weights=None), ]) model.fit(data, targets, epochs=10) ``` Case where a user has a single preprocessing layer to do text vectorization where each input sample is encoded as a sequence of word indices. ```python vectorization = keras.layers.TextVectorization(mode='int') vectorization.adapt(data_sample) model = keras.Sequential([ vectorization, keras.layers.Embedding(128), # The number of int indices is not specified since it is inferred. keras.layers.LSTM(32), keras.layers.Dense(10, activation='softmax'), ]) model.fit(data, targets, epochs=10) ``` Case where a user has a single preprocessing layer to do text vectorization where each input sample is encoded as a dense vector of TF-IDF scores. ```python vectorization = keras.layers.TextVectorization(mode='tfidf') vectorization.adapt(data_sample) model = keras.Sequential([ vectorization, keras.layers.Dense(10, activation='softmax'), ]) model.fit(data, targets, epochs=10) ``` Case where a user chains a a normalization step with a discretization step. ```python normalization = keras.layers.Normalization() discretization = keras.layers.Discretization() preprocessing_stage = keras.layers.PreprocessingStage([normalization, discretization]) preprocessing_stage.adapt(data_sample) model = keras.Sequential([ preprocessing_stage, keras.layers.Dense(10, activation='softmax'), ]) model.fit(data, targets, epochs=10) ``` ### Base class: `PreprocessingLayer` All preprocessing layers inherit from a base class: `PreprocessingLayer`, which itself inherits from `Layer`. This class presents a few key differences compared to regular layers: **Separate training mechanism** The internal state of a `PreprocessingLayer` is not affected by backpropagation: all of its weights are non-trainable. A `PreprocessingLayer` has to be trained in a separate step, as follow: ```python preprocessing_layer.adapt(data_sample) ``` **Possible non-differentiability** Processing layers extend Keras by allowing preprocessing to be part of the model. Unlike existing layers, these computations are not always differentiable, e.g. both `Discretize` and `VectorizeText` are non-differentiable. As a result, all preprocessing layers are treated as frozen when used as part of a model. In addition, if a non-differentiable layer is used in the middle of a model (rather than at the start), the model will raise an exception related to differentiability when trying to compute gradients (e.g. as part of `fit`). ### New layers - `PreprocessingLayer` base class: implements shared logic, in particular the `adapt` method for setting the state of the layer. - `PreprocessingStage` class: makes it possible to chain multiple preprocessing layers together while training them in one single `adapt` call (by doing cascading training of the underlying layers). - `Normalization`: normalizes data feature-wise by subtracting the mean of some sample dataset and dividing by the variance. - `Discretization`: transforms continuous data into one-hot encoded binary vectors representing the different "bins" that the continuous data belongs to. - `TextVectorization`: transforms string data into either dense vectors (e.g. TF-IDF transform) or sequences of token indices (e.g. to be passed to an `Embedding` layer). ## Design details ### Detailed layer signatures #### PreprocessingLayer ```python def adapt(self, data, reset_state=True): """Fits the state of the preprocessing layer to the data being passed. Arguments: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array (or a dict or list of arrays in case of multi-input preprocessing stages). reset_state: Optional argument specifying whether to clear the state of the layer at the start of the call to `adapt`, or whether to start from the existing state. This argument may not be relevant to all preprocessing layers: a subclass of PreprocessingLayer may chose to only implement `adapt(self, data)`. """ ``` #### PrepocessingStage There are two ways to instantiate a `PrepocessingStage` layer: either `Sequential` style (pass a list of preprocessing layer instances) or Functional style (pass the inputs and outputs of a DAG of preprocessing layers). If any layer other than `PreprocessingLayer` instances is included in a `PrepocessingStage`, these layers will be treated as frozen both during `adapt` and later during `fit`. #### Normalization ```python def __init__(self, axis=-1, **kwargs): """Feature-wise normalization of the data. Arguments: axis: Integer or tuple of integers, the axis or axes that should be normalized (typically the features axis). Input shape and type: dtype: floating point. shape: any shape with rank >= 2 is accepted. Output shape and type: dtype: same as input. shape: same as input. What happens in `adapt`: Compute mean and variance of the data and store them as the layer's weights. """ ``` #### Discretization ```python def __init__(self, bins=None, strategy='quantiles', sparse=False, **kwargs): """Maps continuous data into one-hot binary vectors of bin indicators. Each non-overlapping bin covers a contiguous portion of the dimension considered. Bin boundaries can be provided by the user or learned as quantiles. Arguments: bins: int | List<float> If bins is an int, then bin boundaries are to be learned, and the width of the output will be exactly bins. For instance, setting bins to 4 implies that inputs are to be sorted into quantiles, and three boundaries are to be learned, corresponding to the 25th, 50th, and 75th percentile value. If, instead, bins is a list of floats, then those are the bin boundary values and nothing is to be learned. The width of the output will in that case be the len(bins) + 1. strategy: callable | 'quantiles' If strategy is the string 'quantiles' (default), then bin boundaries will be learned such that each bin receives an approximately equal number of sample input values. ‘Strategy’ may also be a callable that takes (float value, list[float] boundaries) and returns an int bucket_index which represents which bucket to map ‘value’ to. sparse: If True, the layer will output a SparseTensor. Otherwise it will be dense. This does not change the shape or structure of the output. Specifically tf.sparse.to_dense(output) will be the same for both. Input shape and type: dtype: floating point. shape: [batch_size, ..., features] Output shape and type: dtype: int shape: [batch_size, ..., features, num_bins] i.e., the same as the input shape, with an additional dimension corresponding to the number of bins, which is equal to either the bins constructor argument (if it is an integer), or the length of the bins constructor argument plus 1, if it is a list. What happens in `adapt`: We use a streaming quantile estimator to update the bin boundaries so that statistically an element is about equally likely to fall into any bin. Multiple calls to update continue to mutate the layer based on all data seen so far. """ ``` #### TextVectorization This layer has basic options for managing text in the Keras model. It is expected that more advanced users needing custom control will uses Keras-compatible layers provided by tf.text. Transform a batch of strings (one sample = one string) into either a list of token indices (one sample = 1D int tensor), or a dense representation (1 sample = 1D float vector). The processing of each sample unfolds as: - Standardize each sample (usually lowercasing + punctuation stripping) - Split each sample into substrings (usually words) - Recombine substrings into tokens (usually ngrams) - Index tokens (associate a unique int value with each token) - Transform each sample using this index, either into a vector of ints or a dense float vector. ```python def __init__(self, tokens=None, standardize='lower_and_strip_punctuation', split='whitespace', ngrams=1, mode='int', max_length=None): """Transforms text into dense vectors or sequences of word indices. Arguments: tokens: None (default) | int | list<string> If tokens is an int, then this layer will learn an internal vocabulary of size (tokens - 2), such that each of the most frequent (tokens - 2) words is assigned assigned to one of the values in [0, tokens). The output will have a total to tokens possible values, once the out-of-vocabulary value (1) and the reserved masking value (0) is taken into account. If tokens is None, the number of tokens is automatically inferred from the training data (the output will have a number of possible values equal to the total number of unique tokens seen in the data, plus 2). If, instead, tokens is a list of strings, then it constitutes exactly to a map from string to integer, and there is nothing to be learned. The vocabulary output width will be len(tokens) + 2, accounting for the out-of-vocabulary value (1) and the reserved masking value (0). standardize: 'lower_and_strip_punctuation' (default) | None | callable string -> string if standardize is the string "lower_and_strip_punctuation", each sample is converted to lowercase and the following characters are stripped from each sample before splitting: '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n' if it is a callable, that callable is used to preprocess each input string before splitting. split: ‘whitespace’ (default) | None | Callable string -> list<string> if split is ‘whitespace’, then the string will be split on whitespace characters. if split is None, then each string is treated as a single token. if, instead, split is a function from strings to lists of strings, then that function will be applied to each string in the input. ngrams: 1 (default) | 2 | 3 Controls the ngram functionality of this layer. This layer performs ngrams by concatenating strings with no separator and no begin or end tokens; the ngramming algorithm is not configurable. if ngrams is an int N = 2 or 3, the substrings returned by the split function are combined into N-grams before being indexed. mode: 'int' (default) | 'count' | 'binary' | 'tfidf' controls how the integerized words are reduced and packed into an output vector. if mode is 'count', then the output vector will be of length tokens, and the element at position i will summarize how many times the string mapping to integer i occurred in the split input. If, instead, mode is 'binary', then the output vector will be the same as for 'count' but will contain a 1 if the count is greater than 0. if, instead, mode is 'tfidf', then the output vector will be the same as for 'count', but instead of counts of tokens, will contain the weighted count where weights are determined by the ‘tfidf’ algorithm. if, instead, mode is 'int', then the output vector is an int tensor where each int is the index of one token in the input string. max_length: None (default) | int. Only used if mode=int. If set to an int, the output int tensors are of shape [..., max_length], with longer sequences being truncated at the end and shorter sequences being right-padded. If set to None, output sequences are of shape [..., max_length_in_batch], where max_length_in_batch is the length of the longest sequence in the current batch: shorter sequences get right-padded. Input shape and type: dtype: string. shape: (batch_size, ..., 1) Output shape and type: if `mode='int'`: dtype: int shape: (batch_size, ..., max_length), where max_length is the length of the longest token sequence in the current batch, or the value of the argument `max_length` if it was passed. else: dtype: floating point. shape: (batch_size, ..., num_tokens) What happens in `adapt`: We build an index mapping tokens to token indices, and in the case of `mode='count'` and `mode='tfidf`, we keep track of how many time each token has appeared. """ ``` ### Writing a subclass of `PreprocessingLayer` The following 3 methods should be overridden: - `__init__`: constructor of the layer, used to configure its behavior. - `build(self, inputs_shape)`: creates the state variables of the layer. - `call(self, inputs)`: transforms the inputs (should only be called after `adapt` has been called). - `adapt(self, data, [reset_state=True])`: sets the state of the layer given the data provided (either as a tf.data dataset or numpy array(s)). The `reset_state` argument is optional and may be ignored. ### Handling of async prefetching Some preprocessing ops are CPU-only and benefit from being executed asynchronously on the accelerator host (as opposed to the accelerator itself, e.g. GPU or TPU), with a batch of data being prepocessed on the host while the previous batch is being processed by the accelerator. This pattern is known as "async prefetching". This is normally done as part of a tf.data pipeline. The current proposal implies moving some of that preprocessing to inside the model itself, which is normally executed end-to-end on an accelerator. This means that we need a way to lift the preprocessing part of the model in a tf.data pipeline during model training. In `fit`, we can do this automatically. In custom training loops, we will expect the user to do it manually (see subsection "Custom training loops"). We propose the addition of two new methods on the `Model` class: ```python def get_preprocessing_stage(self): """Retrieves the preprocessing part of the model. This is the part of the model that should be executed asynchronously on the device host during training. Returns: Instance of `PreprocessingLayer` or `PreprocessingStage`. May be None if the model does not start with preprocessing layers. """ pass def get_main_stage(self): """Retrieves the main processing part of the model. This is the part of the model that should be executed on the accelator device. Returns: Model instance. """ ``` Thus, for any model that starts with preprocessing layers, the following: ```python outputs = model(inputs) ``` is functionally equivalent to: ```python preprocessed_inputs = model.get_preprocessing_stage()(inputs) outputs = model.get_main_stage()(preprocessed_inputs) ``` #### Examples: Sequential model with a preprocessing layer: ```python vectorization = keras.layers.TextVectorization() vectorization.adapt(data_sample) model = keras.Sequential([ vectorization, keras.layers.Dense(10, activation='softmax'), ]) # This is the `vectorization` layer. preproc_stage = model.get_preprocessing_stage() # model containing the `Dense` layer only. main_stage = model.get_main_stage() ``` Functional model with 2 branches, each with a preprocessing layer: ```python normalization_a = layers.Normalization() normalization_b = layers.Normalization() normalization_a.adapt(data_a) normalization_b.adapt(data_b) input_a = Input(shape_a) input_b = Input(shape_b) normed_a = normalization_a(input_a) normed_b = normalization_b(input_b) a = layers.Dense(32)(normed_a) b = layers.Dense(32)(normed_b) c = layers.concatenate([a, b]) outputs = layers.Dense(1, activation='sigmoid')(c) model = Model([input_a, input_b], outputs) # `PreprocessingStage` instance # mapping `[input_a, input_b]` to `[normed_a, normed_b]` preproc_stage = model.get_preprocessing_stage() # Model instance mapping `[normed_a, normed_b]` to `outputs`. main_stage = model.get_main_stage() ``` Subclassed model with a preprocessing layer: ```python class MyModel(Model): def __init__(self, **kwargs): super(MyModel, self).__init__(**kwargs) self.preproc_layer = layers.Normalization() self.submodel = MySubmodel() def call(self, inputs): return self.submodel(self.preproc_layer(inputs)) def get_preprocessing_stage(self): return self.preproc_layer def get_main_stage(self): return self.submodel ``` #### The case of the built-in `fit` loop When calling `fit` or `evaluate` on a Dataset a model that contains preprocessing layers, the lifting happens automatically and the user-facing workflow doesn't change. ```python model.fit(dataset, epochs=10) ``` #### Custom training loops When writing custom training loops, the user must manually do the lifting of the preprocessing stage into the data pipeline: ```python model = Model(...) preproc_stage = model.get_preprocessing_stage() main_model = model.get_main_stage() preproc_dataset = Dataset(...) preproc_stage.adapt(preproc_dataset) # Map the preprocessing stage on the dataset. dataset = Dataset(...) dataset = dataset.map(preproc_stage) # Regular training loop (using `main_model`). for x, y in dataset: with GradientTape() as tape: y_pred = main_model(x) loss = loss_fn(y, y_pred) ... ``` In general, you won't have to refer to `get_preprocessing_stage` and `get_main_stage` directly, because you will probably already have direct handles on your preprocessing layer and the rest of the model: ```python normalization = layers.Normalization() normalization.adapt(preproc_dataset) dataset = dataset.map(normalization) for x, y in dataset: with GradientTape() as tape: y_pred = model(x) loss = loss_fn(y, y_pred) ... ``` ## Questions and Discussion Topics ### Naming Discussion #### Naming conventions to follow for preprocessing layers [RESOLUTION: we will use option A] We have two possible sets of names for the layers: ##### Option A: Normalization, Discretization, TextVectorization Pros: consistent with most existing layers, in particular BatchNormalization. Cons: It's longer. ##### Option B: Normalize, Discretize, VectorizeText Pros: It's shorter. Cons: Normalize vs BatchNormalization is jarring. #### Using the name "preprocessing" or "processing" [RESOLUTION: we will use option A, "preprocessing"] It has been proposed that we use the name "processing" throughout the API instead of "preprocessing". ##### Option A: "preprocessing". Pros: 1) The meaning of "preprocessing" is clear to all users ("data normalization and stuff"). 2) We need a clear semantic boundary between the main data processing flow of a model and what goes before it (the preprocessing stage). 3) It replaces the functionality of the `keras.preprocessing` module, and should be consistent with this naming convention. Cons: The `Normalization` layer, being differentiable, can be used in the middle of a model, rather than at the start. However, there's nothing weird about keeping the name "preprocessing" in this specific case: it is widely understood that a `Normalization` layer is doing "data preprocessing", independently of where you use it -- in fact, normalization is the first example that shows up in most definitions of "data preprocessing". ##### Option B: "processing". Pros: The Normalization layer can be used elsewhere in a model than at the start (although it would have to be trained separately). Cons: It's very generic, and does not clearly convey the difference between "preprocessing stage" and "main processing stage" required by the async prefetching API. #### Name to use for `adapt` method [RESOLUTION: decision delayed until implementation] We may want to use the name `fit` instead (other suggestions welcome). Pros of using `fit`: consistency with `model.fit()`, and the `fit` method on `ImageDataGenerator` and `Tokenizer` from the `keras.preprocessing` module. Cons of using `fit`: It may confuse users, since `preprocessing_layer.fit()` would have a different signature. --- [OTHER ADDITIONS FROM DESIGN REVIEW] - We should decouple the user-facing `adapt(data)` method (or `fit(data)`), and the implementer-facing method, so as to make it easier to implement support for different data formats.
governance/rfcs/20190502-preprocessing-layers.md/0
{ "file_path": "governance/rfcs/20190502-preprocessing-layers.md", "repo_id": "governance", "token_count": 7759 }
9
"""Inception-ResNet V2 model for Keras. Model naming and structure follows TF-slim implementation (which has some additional layers and different number of filters from the original arXiv paper): https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_resnet_v2.py Pre-trained ImageNet weights are also converted from TF-slim, which can be found in: https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models # Reference - [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261) (AAAI 2017) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from . import get_submodules_from_kwargs from . import imagenet_utils from .imagenet_utils import decode_predictions from .imagenet_utils import _obtain_input_shape BASE_WEIGHT_URL = ('https://github.com/fchollet/deep-learning-models/' 'releases/download/v0.7/') backend = None layers = None models = None keras_utils = None def preprocess_input(x, **kwargs): """Preprocesses a numpy array encoding a batch of images. # Arguments x: a 4D numpy array consists of RGB values within [0, 255]. # Returns Preprocessed array. """ return imagenet_utils.preprocess_input(x, mode='tf', **kwargs) def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None): """Utility function to apply conv + BN. # Arguments x: input tensor. filters: filters in `Conv2D`. kernel_size: kernel size as in `Conv2D`. strides: strides in `Conv2D`. padding: padding mode in `Conv2D`. activation: activation in `Conv2D`. use_bias: whether to use a bias in `Conv2D`. name: name of the ops; will become `name + '_ac'` for the activation and `name + '_bn'` for the batch norm layer. # Returns Output tensor after applying `Conv2D` and `BatchNormalization`. """ x = layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)(x) if not use_bias: bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3 bn_name = None if name is None else name + '_bn' x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if activation is not None: ac_name = None if name is None else name + '_ac' x = layers.Activation(activation, name=ac_name)(x) return x def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): """Adds a Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` # Arguments x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block (see [activations](../activations.md)). When `activation=None`, no activation is applied (i.e., "linear" activation: `a(x) = x`). # Returns Output tensor for the block. # Raises ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`. """ if block_type == 'block35': branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == 'block17': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == 'block8': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError('Unknown Inception-ResNet block type. ' 'Expects "block35", "block17" or "block8", ' 'but got: ' + str(block_type)) block_name = block_type + '_' + str(block_idx) channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3 mixed = layers.Concatenate( axis=channel_axis, name=block_name + '_mixed')(branches) up = conv2d_bn(mixed, backend.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + '_conv') x = layers.Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale, output_shape=backend.int_shape(x)[1:], arguments={'scale': scale}, name=block_name)([x, up]) if activation is not None: x = layers.Activation(activation, name=block_name + '_ac')(x) return x def InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): """Instantiates the Inception-ResNet v2 architecture. Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. # Arguments include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is `False` (otherwise the input shape has to be `(299, 299, 3)` (with `'channels_last'` data format) or `(3, 299, 299)` (with `'channels_first'` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 75. E.g. `(150, 150, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `'avg'` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `'max'` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is `True`, and if no `weights` argument is specified. # Returns A Keras `Model` instance. # Raises ValueError: in case of invalid argument for `weights`, or invalid input shape. """ global backend, layers, models, keras_utils backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs) if not (weights in {'imagenet', None} or os.path.exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as `"imagenet"` with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = _obtain_input_shape( input_shape, default_size=299, min_size=75, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor # Stem block: 35 x 35 x 192 x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid') x = conv2d_bn(x, 32, 3, padding='valid') x = conv2d_bn(x, 64, 3) x = layers.MaxPooling2D(3, strides=2)(x) x = conv2d_bn(x, 80, 1, padding='valid') x = conv2d_bn(x, 192, 3, padding='valid') x = layers.MaxPooling2D(3, strides=2)(x) # Mixed 5b (Inception-A block): 35 x 35 x 320 branch_0 = conv2d_bn(x, 96, 1) branch_1 = conv2d_bn(x, 48, 1) branch_1 = conv2d_bn(branch_1, 64, 5) branch_2 = conv2d_bn(x, 64, 1) branch_2 = conv2d_bn(branch_2, 96, 3) branch_2 = conv2d_bn(branch_2, 96, 3) branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x) branch_pool = conv2d_bn(branch_pool, 64, 1) branches = [branch_0, branch_1, branch_2, branch_pool] channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3 x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches) # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320 for block_idx in range(1, 11): x = inception_resnet_block(x, scale=0.17, block_type='block35', block_idx=block_idx) # Mixed 6a (Reduction-A block): 17 x 17 x 1088 branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid') branch_1 = conv2d_bn(x, 256, 1) branch_1 = conv2d_bn(branch_1, 256, 3) branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid') branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x) branches = [branch_0, branch_1, branch_pool] x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches) # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088 for block_idx in range(1, 21): x = inception_resnet_block(x, scale=0.1, block_type='block17', block_idx=block_idx) # Mixed 7a (Reduction-B block): 8 x 8 x 2080 branch_0 = conv2d_bn(x, 256, 1) branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid') branch_1 = conv2d_bn(x, 256, 1) branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid') branch_2 = conv2d_bn(x, 256, 1) branch_2 = conv2d_bn(branch_2, 288, 3) branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid') branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x) branches = [branch_0, branch_1, branch_2, branch_pool] x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches) # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080 for block_idx in range(1, 10): x = inception_resnet_block(x, scale=0.2, block_type='block8', block_idx=block_idx) x = inception_resnet_block(x, scale=1., activation=None, block_type='block8', block_idx=10) # Final convolution block: 8 x 8 x 1536 x = conv2d_bn(x, 1536, 1, name='conv_7b') if include_top: # Classification block x = layers.GlobalAveragePooling2D(name='avg_pool')(x) x = layers.Dense(classes, activation='softmax', name='predictions')(x) else: if pooling == 'avg': x = layers.GlobalAveragePooling2D()(x) elif pooling == 'max': x = layers.GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = keras_utils.get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = models.Model(inputs, x, name='inception_resnet_v2') # Load weights. if weights == 'imagenet': if include_top: fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5' weights_path = keras_utils.get_file( fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='e693bd0210a403b3192acc6073ad2e96') else: fname = ('inception_resnet_v2_weights_' 'tf_dim_ordering_tf_kernels_notop.h5') weights_path = keras_utils.get_file( fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='d19885ff4a710c122648d3b5c3b684e4') model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model
keras-applications/keras_applications/inception_resnet_v2.py/0
{ "file_path": "keras-applications/keras_applications/inception_resnet_v2.py", "repo_id": "keras-applications", "token_count": 6968 }
10
import pytest import random import six import numpy as np import keras_applications from keras.applications import densenet from keras.applications import inception_resnet_v2 from keras.applications import inception_v3 from keras.applications import mobilenet try: from keras.applications import mobilenet_v2 except ImportError: from keras.applications import mobilenetv2 as mobilenet_v2 from keras.applications import nasnet from keras.applications import resnet50 from keras.applications import vgg16 from keras.applications import vgg19 from keras.applications import xception from keras.preprocessing import image from keras import backend from keras import layers from keras import models from keras import utils from multiprocessing import Process, Queue def keras_modules_injection(base_fun): def wrapper(*args, **kwargs): kwargs['backend'] = backend kwargs['layers'] = layers kwargs['models'] = models kwargs['utils'] = utils return base_fun(*args, **kwargs) return wrapper for (name, module) in [('resnet', keras_applications.resnet), ('resnet_v2', keras_applications.resnet_v2), ('resnext', keras_applications.resnext), ('efficientnet', keras_applications.efficientnet), ('mobilenet_v3', keras_applications.mobilenet_v3)]: module.decode_predictions = keras_modules_injection(module.decode_predictions) module.preprocess_input = keras_modules_injection(module.preprocess_input) for app in dir(module): if app[0].isupper() and callable(getattr(module, app)): setattr(module, app, keras_modules_injection(getattr(module, app))) setattr(keras_applications, name, module) RESNET_LIST = [keras_applications.resnet.ResNet50, keras_applications.resnet.ResNet101, keras_applications.resnet.ResNet152] RESNETV2_LIST = [keras_applications.resnet_v2.ResNet50V2, keras_applications.resnet_v2.ResNet101V2, keras_applications.resnet_v2.ResNet152V2] RESNEXT_LIST = [keras_applications.resnext.ResNeXt50, keras_applications.resnext.ResNeXt101] MOBILENET_LIST = [(mobilenet.MobileNet, mobilenet, 1024), (mobilenet_v2.MobileNetV2, mobilenet_v2, 1280), (keras_applications.mobilenet_v3.MobileNetV3Small, keras_applications.mobilenet_v3, 576), (keras_applications.mobilenet_v3.MobileNetV3Large, keras_applications.mobilenet_v3, 960)] DENSENET_LIST = [(densenet.DenseNet121, 1024), (densenet.DenseNet169, 1664), (densenet.DenseNet201, 1920)] NASNET_LIST = [(nasnet.NASNetMobile, 1056), (nasnet.NASNetLarge, 4032)] EFFICIENTNET_LIST = [(keras_applications.efficientnet.EfficientNetB0, 1280), (keras_applications.efficientnet.EfficientNetB1, 1280), (keras_applications.efficientnet.EfficientNetB2, 1408), (keras_applications.efficientnet.EfficientNetB3, 1536), (keras_applications.efficientnet.EfficientNetB4, 1792), (keras_applications.efficientnet.EfficientNetB5, 2048)] def keras_test(func): """Function wrapper to clean up after TensorFlow tests. # Arguments func: test function to clean up after. # Returns A function wrapping the input function. """ @six.wraps(func) def wrapper(*args, **kwargs): output = func(*args, **kwargs) if backend.backend() == 'tensorflow' or backend.backend() == 'cntk': backend.clear_session() return output return wrapper def _get_elephant(target_size): # For models that don't include a Flatten step, # the default is to accept variable-size inputs # even when loading ImageNet weights (since it is possible). # In this case, default to 299x299. if target_size[0] is None: target_size = (299, 299) img = image.load_img('tests/data/elephant.jpg', target_size=tuple(target_size)) x = image.img_to_array(img) return np.expand_dims(x, axis=0) def _get_output_shape(model_fn, preprocess_input=None): if backend.backend() == 'cntk': # Create model in a subprocess so that # the memory consumed by InceptionResNetV2 will be # released back to the system after this test # (to deal with OOM error on CNTK backend). # TODO: remove the use of multiprocessing from these tests # once a memory clearing mechanism # is implemented in the CNTK backend. def target(queue): model = model_fn() if preprocess_input is None: queue.put(model.output_shape) else: x = _get_elephant(model.input_shape[1:3]) x = preprocess_input(x) queue.put((model.output_shape, model.predict(x))) queue = Queue() p = Process(target=target, args=(queue,)) p.start() p.join() # The error in a subprocess won't propagate # to the main process, so we check if the model # is successfully created by checking if the output shape # has been put into the queue assert not queue.empty(), 'Model creation failed.' return queue.get_nowait() else: model = model_fn() if preprocess_input is None: return model.output_shape else: x = _get_elephant(model.input_shape[1:3]) x = preprocess_input(x) return (model.output_shape, model.predict(x)) @keras_test def _test_application_basic(app, last_dim=1000, module=None): if module is None: output_shape = _get_output_shape(lambda: app(weights=None)) assert output_shape == (None, None, None, last_dim) else: output_shape, preds = _get_output_shape( lambda: app(weights='imagenet'), module.preprocess_input) assert output_shape == (None, last_dim) names = [p[1] for p in module.decode_predictions(preds)[0]] # Test correct label is in top 3 (weak correctness test). assert 'African_elephant' in names[:3] @keras_test def _test_application_notop(app, last_dim): output_shape = _get_output_shape( lambda: app(weights=None, include_top=False)) assert output_shape == (None, None, None, last_dim) @keras_test def _test_application_variable_input_channels(app, last_dim): if backend.image_data_format() == 'channels_first': input_shape = (1, None, None) else: input_shape = (None, None, 1) output_shape = _get_output_shape( lambda: app(weights=None, include_top=False, input_shape=input_shape)) assert output_shape == (None, None, None, last_dim) if backend.image_data_format() == 'channels_first': input_shape = (4, None, None) else: input_shape = (None, None, 4) output_shape = _get_output_shape( lambda: app(weights=None, include_top=False, input_shape=input_shape)) assert output_shape == (None, None, None, last_dim) @keras_test def _test_app_pooling(app, last_dim): output_shape = _get_output_shape( lambda: app(weights=None, include_top=False, pooling=random.choice(['avg', 'max']))) assert output_shape == (None, last_dim) def test_resnet(): app = random.choice(RESNET_LIST) module = keras_applications.resnet last_dim = 2048 _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_resnetv2(): app = random.choice(RESNETV2_LIST) module = keras_applications.resnet_v2 last_dim = 2048 _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_resnext(): app = random.choice(RESNEXT_LIST) module = keras_applications.resnext last_dim = 2048 _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_vgg(): app = random.choice([vgg16.VGG16, vgg19.VGG19]) module = vgg16 last_dim = 512 _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_xception(): app = xception.Xception module = xception last_dim = 2048 _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_inceptionv3(): app = inception_v3.InceptionV3 module = inception_v3 last_dim = 2048 _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_inceptionresnetv2(): app = inception_resnet_v2.InceptionResNetV2 module = inception_resnet_v2 last_dim = 1536 _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_mobilenet(): app, module, last_dim = random.choice(MOBILENET_LIST) _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_densenet(): app, last_dim = random.choice(DENSENET_LIST) module = densenet _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_nasnet(): app, last_dim = NASNET_LIST[0] # NASNetLarge is too heavy to test on Travis module = nasnet _test_application_basic(app, module=module) # _test_application_notop(app, last_dim) # _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) def test_efficientnet(): app, last_dim = random.choice(EFFICIENTNET_LIST) module = keras_applications.efficientnet _test_application_basic(app, module=module) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim) if __name__ == '__main__': pytest.main([__file__])
keras-applications/tests/applications_test.py/0
{ "file_path": "keras-applications/tests/applications_test.py", "repo_id": "keras-applications", "token_count": 4615 }
11
set -e pip install cntk --progress-bar off # open mpi is needed for cntk rm -rf ~/mpi mkdir ~/mpi pushd ~/mpi wget http://cntk.ai/PythonWheel/ForKeras/depends/openmpi_1.10-3.zip unzip ./openmpi_1.10-3.zip sudo dpkg -i openmpi_1.10-3.deb popd
keras-contrib/.travis/install_cntk.sh/0
{ "file_path": "keras-contrib/.travis/install_cntk.sh", "repo_id": "keras-contrib", "token_count": 115 }
12
"""Train CRF and BiLSTM-CRF on CONLL2000 chunking data, similar to https://arxiv.org/pdf/1508.01991v1.pdf. """ from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy from collections import Counter from keras.models import Sequential from keras.layers import Embedding, Bidirectional, LSTM from keras_contrib.layers import CRF from keras_contrib.losses import crf_loss from keras_contrib.metrics import crf_viterbi_accuracy from keras_contrib.datasets import conll2000 EPOCHS = 10 EMBED_DIM = 200 BiRNN_UNITS = 200 def classification_report(y_true, y_pred, labels): '''Similar to the one in sklearn.metrics, reports per classs recall, precision and F1 score''' y_true = numpy.asarray(y_true).ravel() y_pred = numpy.asarray(y_pred).ravel() corrects = Counter(yt for yt, yp in zip(y_true, y_pred) if yt == yp) y_true_counts = Counter(y_true) y_pred_counts = Counter(y_pred) report = ((lab, # label corrects[i] / max(1, y_true_counts[i]), # recall corrects[i] / max(1, y_pred_counts[i]), # precision y_true_counts[i] # support ) for i, lab in enumerate(labels)) report = [(l, r, p, 2 * r * p / max(1e-9, r + p), s) for l, r, p, s in report] print('{:<15}{:>10}{:>10}{:>10}{:>10}\n'.format('', 'recall', 'precision', 'f1-score', 'support')) formatter = '{:<15}{:>10.2f}{:>10.2f}{:>10.2f}{:>10d}'.format for r in report: print(formatter(*r)) print('') report2 = list(zip(*[(r * s, p * s, f1 * s) for l, r, p, f1, s in report])) N = len(y_true) print(formatter('avg / total', sum(report2[0]) / N, sum(report2[1]) / N, sum(report2[2]) / N, N) + '\n') # ------ # Data # ----- # conll200 has two different targets, here will only use # IBO like chunking as an example train, test, voc = conll2000.load_data() (train_x, _, train_y) = train (test_x, _, test_y) = test (vocab, _, class_labels) = voc # -------------- # 1. Regular CRF # -------------- print('==== training CRF ====') model = Sequential() model.add(Embedding(len(vocab), EMBED_DIM, mask_zero=True)) # Random embedding crf = CRF(len(class_labels), sparse_target=True) model.add(crf) model.summary() # The default `crf_loss` for `learn_mode='join'` is negative log likelihood. model.compile('adam', loss=crf_loss, metrics=[crf_viterbi_accuracy]) model.fit(train_x, train_y, epochs=EPOCHS, validation_data=[test_x, test_y]) test_y_pred = model.predict(test_x).argmax(-1)[test_x > 0] test_y_true = test_y[test_x > 0] print('\n---- Result of CRF ----\n') classification_report(test_y_true, test_y_pred, class_labels) # ------------- # 2. BiLSTM-CRF # ------------- print('==== training BiLSTM-CRF ====') model = Sequential() model.add(Embedding(len(vocab), EMBED_DIM, mask_zero=True)) # Random embedding model.add(Bidirectional(LSTM(BiRNN_UNITS // 2, return_sequences=True))) crf = CRF(len(class_labels), sparse_target=True) model.add(crf) model.summary() model.compile('adam', loss=crf_loss, metrics=[crf_viterbi_accuracy]) model.fit(train_x, train_y, epochs=EPOCHS, validation_data=[test_x, test_y]) test_y_pred = model.predict(test_x).argmax(-1)[test_x > 0] test_y_true = test_y[test_x > 0] print('\n---- Result of BiLSTM-CRF ----\n') classification_report(test_y_true, test_y_pred, class_labels)
keras-contrib/examples/conll2000_chunking_crf.py/0
{ "file_path": "keras-contrib/examples/conll2000_chunking_crf.py", "repo_id": "keras-contrib", "token_count": 1685 }
13
from .snapshot import SnapshotCallbackBuilder, SnapshotModelCheckpoint from .dead_relu_detector import DeadReluDetector from .cyclical_learning_rate import CyclicLR from .tensorboard import TensorBoardGrouped
keras-contrib/keras_contrib/callbacks/__init__.py/0
{ "file_path": "keras-contrib/keras_contrib/callbacks/__init__.py", "repo_id": "keras-contrib", "token_count": 57 }
14
import keras.backend as K from keras.layers import Layer class SineReLU(Layer): """Sine Rectified Linear Unit to generate oscilations. It allows an oscilation in the gradients when the weights are negative. The oscilation can be controlled with a parameter, which makes it be close or equal to zero. The functional is diferentiable at any point due to its derivative. For instance, at 0, the derivative of 'sin(0) - cos(0)' is 'cos(0) + sin(0)' which is 1. # Input shape Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. # Output shape Same shape as the input. # Arguments epsilon: float. Hyper-parameter used to control the amplitude of the sinusoidal wave when weights are negative. The default value, 0.0025, since it works better for CNN layers and those are the most used layers nowadays. When using Dense Networks, try something around 0.006. # References: - [SineReLU: An Alternative to the ReLU Activation Function]( https://medium.com/@wilder.rodrigues/sinerelu-an-alternative-to-the-relu-activation-function-e46a6199997d). This function was first introduced at the Codemotion Amsterdam 2018 and then at the DevDays, in Vilnius, Lithuania. It has been extensively tested with Deep Nets, CNNs, LSTMs, Residual Nets and GANs, based on the MNIST, Kaggle Toxicity and IMDB datasets. # Performance: - Fashion MNIST * Mean of 6 runs per Activation Function * Fully Connection Network - SineReLU: loss mean -> 0.3522; accuracy mean -> 89.18; mean of std loss -> 0.08375204467435822 - LeakyReLU: loss mean-> 0.3553; accuracy mean -> 88.98; mean of std loss -> 0.0831161868455245 - ReLU: loss mean -> 0.3519; accuracy mean -> 88.84; mean of std loss -> 0.08358816501301362 * Convolutional Neural Network - SineReLU: loss mean -> 0.2180; accuracy mean -> 92.49; mean of std loss -> 0.0781155784858847 - LeakyReLU: loss mean -> 0.2205; accuracy mean -> 92.37; mean of std loss -> 0.09273670474788205 - ReLU: loss mean -> 0.2144; accuracy mean -> 92.45; mean of std loss -> 0.09396114585977 - MNIST * Mean of 6 runs per Activation Function * Fully Connection Network - SineReLU: loss mean -> 0.0623; accuracy mean -> 98.53; mean of std loss -> 0.06012015231824904 - LeakyReLU: loss mean-> 0.0623; accuracy mean -> 98.50; mean of std loss -> 0.06052147632835356 - ReLU: loss mean -> 0.0605; accuracy mean -> 98.49; mean of std loss -> 0.059599885665016096 * Convolutional Neural Network - SineReLU: loss mean -> 0.0198; accuracy mean -> 99.51; mean of std loss -> 0.0425338329550847 - LeakyReLU: loss mean -> 0.0216; accuracy mean -> 99.40; mean of std loss -> 0.04834468835196667 - ReLU: loss mean -> 0.0185; accuracy mean -> 99.49; mean of std loss -> 0.05503719489690131 # Jupyter Notebooks - https://github.com/ekholabs/DLinK/blob/master/notebooks/keras # Examples The Advanced Activation function SineReLU have to be imported from the keras_contrib.layers package. To see full source-code of this architecture and other examples, please follow this link: https://github.com/ekholabs/DLinK ```python model = Sequential() model.add(Dense(128, input_shape = (784,))) model.add(SineReLU()) model.add(Dropout(0.2)) model.add(Dense(256)) model.add(SineReLU()) model.add(Dropout(0.3)) model.add(Dense(1024)) model.add(SineReLU()) model.add(Dropout(0.5)) model.add(Dense(10, activation = 'softmax')) ``` """ def __init__(self, epsilon=0.0025, **kwargs): super(SineReLU, self).__init__(**kwargs) self.supports_masking = True self.epsilon = K.cast_to_floatx(epsilon) def call(self, Z): m = self.epsilon * (K.sin(Z) - K.cos(Z)) A = K.maximum(m, Z) return A def get_config(self): config = {'epsilon': float(self.epsilon)} base_config = super(SineReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): return input_shape
keras-contrib/keras_contrib/layers/advanced_activations/sinerelu.py/0
{ "file_path": "keras-contrib/keras_contrib/layers/advanced_activations/sinerelu.py", "repo_id": "keras-contrib", "token_count": 2153 }
15
from .crf_accuracies import crf_accuracy, crf_marginal_accuracy from .crf_accuracies import crf_viterbi_accuracy
keras-contrib/keras_contrib/metrics/__init__.py/0
{ "file_path": "keras-contrib/keras_contrib/metrics/__init__.py", "repo_id": "keras-contrib", "token_count": 43 }
16
import warnings import h5py import keras.backend as K from keras import optimizers from keras.engine import saving def save_all_weights(model, filepath, include_optimizer=True): """ Save model weights and optimizer weights but not configuration to a HDF5 file. Functionally between `save` and `save_weights`. The HDF5 file contains: - the model's weights - the model's optimizer's state (if any) If you have a complicated model or set of models that do not serialize to JSON correctly, use this method. # Arguments model: Keras model instance to be saved. filepath: String, path where to save the model. include_optimizer: If True, save optimizer's state together. # Raises ImportError: if h5py is not available. """ if h5py is None: raise ImportError('`save_all_weights` requires h5py.') with h5py.File(filepath, 'w') as f: model_weights_group = f.create_group('model_weights') model_layers = model.layers saving.save_weights_to_hdf5_group(model_weights_group, model_layers) if include_optimizer and hasattr(model, 'optimizer') and model.optimizer: if isinstance(model.optimizer, optimizers.TFOptimizer): warnings.warn( 'TensorFlow optimizers do not ' 'make it possible to access ' 'optimizer attributes or optimizer state ' 'after instantiation. ' 'As a result, we cannot save the optimizer ' 'as part of the model save file.' 'You will have to compile your model again after loading it. ' 'Prefer using a Keras optimizer instead ' '(see keras.io/optimizers).') else: # Save optimizer weights. symbolic_weights = getattr(model.optimizer, 'weights') if symbolic_weights: optimizer_weights_group = f.create_group('optimizer_weights') weight_values = K.batch_get_value(symbolic_weights) weight_names = [] for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)): # Default values of symbolic_weights is /variable for theano if K.backend() == 'theano': if hasattr(w, 'name') and w.name != "/variable": name = str(w.name) else: name = 'param_' + str(i) else: if hasattr(w, 'name') and w.name: name = str(w.name) else: name = 'param_' + str(i) weight_names.append(name.encode('utf8')) optimizer_weights_group.attrs['weight_names'] = weight_names for name, val in zip(weight_names, weight_values): param_dset = optimizer_weights_group.create_dataset( name, val.shape, dtype=val.dtype) if not val.shape: # scalar param_dset[()] = val else: param_dset[:] = val def load_all_weights(model, filepath, include_optimizer=True): """Loads the weights of a model saved via `save_all_weights`. If model has been compiled, optionally load its optimizer's weights. # Arguments model: instantiated model with architecture matching the saved model. Compile the model beforehand if you want to load optimizer weights. filepath: String, path to the saved model. # Returns None. The model will have its weights updated. # Raises ImportError: if h5py is not available. ValueError: In case of an invalid savefile. """ if h5py is None: raise ImportError('`load_all_weights` requires h5py.') with h5py.File(filepath, mode='r') as f: # set weights saving.load_weights_from_hdf5_group(f['model_weights'], model.layers) # Set optimizer weights. if (include_optimizer and 'optimizer_weights' in f and hasattr(model, 'optimizer') and model.optimizer): optimizer_weights_group = f['optimizer_weights'] optimizer_weight_names = [n.decode('utf8') for n in optimizer_weights_group.attrs['weight_names']] optimizer_weight_values = [optimizer_weights_group[n] for n in optimizer_weight_names] model.optimizer.set_weights(optimizer_weight_values)
keras-contrib/keras_contrib/utils/save_load_utils.py/0
{ "file_path": "keras-contrib/keras_contrib/utils/save_load_utils.py", "repo_id": "keras-contrib", "token_count": 2413 }
17
import pytest from keras_contrib.utils.test_utils import layer_test from keras_contrib.layers import SineReLU @pytest.mark.parametrize('epsilon', [0.0025, 0.0035, 0.0045]) def test_sine_relu(epsilon): layer_test(SineReLU, kwargs={'epsilon': epsilon}, input_shape=(2, 3, 4)) if __name__ == '__main__': pytest.main([__file__])
keras-contrib/tests/keras_contrib/layers/advanced_activations/test_sinerelu.py/0
{ "file_path": "keras-contrib/tests/keras_contrib/layers/advanced_activations/test_sinerelu.py", "repo_id": "keras-contrib", "token_count": 144 }
18
from __future__ import print_function import pytest from keras_contrib.tests import optimizers from keras_contrib.optimizers import Yogi from keras_contrib.utils.test_utils import is_tf_keras def test_yogi(): optimizers._test_optimizer(Yogi()) optimizers._test_optimizer(Yogi(beta_1=0.9, beta_2=0.9)) optimizers._test_optimizer(Yogi(beta_1=0.9, beta_2=0.99)) optimizers._test_optimizer(Yogi(beta_1=0.9, beta_2=0.999)) @pytest.mark.skipif(is_tf_keras, reason='Sometimes fail. It is random.', strict=True) def test_yogi_change_lr(): optimizers._test_optimizer(Yogi(beta_1=0.9, beta_2=0.999, lr=0.001))
keras-contrib/tests/keras_contrib/optimizers/yogi_test.py/0
{ "file_path": "keras-contrib/tests/keras_contrib/optimizers/yogi_test.py", "repo_id": "keras-contrib", "token_count": 295 }
19
""" Title: Simple custom layer example: Antirectifier Author: [fchollet](https://twitter.com/fchollet) Date created: 2016/01/06 Last modified: 2020/04/20 Description: Demonstration of custom layer creation. Accelerator: GPU """ """ ## Introduction This example shows how to create custom layers, using the Antirectifier layer (originally proposed as a Keras example script in January 2016), an alternative to ReLU. Instead of zeroing-out the negative part of the input, it splits the negative and positive parts and returns the concatenation of the absolute value of both. This avoids loss of information, at the cost of an increase in dimensionality. To fix the dimensionality increase, we linearly combine the features back to a space of the original size. """ """ ## Setup """ import tensorflow as tf import keras_core as keras from keras_core import layers """ ## The Antirectifier layer """ class Antirectifier(layers.Layer): def __init__(self, initializer="he_normal", **kwargs): super().__init__(**kwargs) self.initializer = keras.initializers.get(initializer) def build(self, input_shape): output_dim = input_shape[-1] self.kernel = self.add_weight( shape=(output_dim * 2, output_dim), initializer=self.initializer, name="kernel", trainable=True, ) def call(self, inputs): inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True) pos = tf.nn.relu(inputs) neg = tf.nn.relu(-inputs) concatenated = tf.concat([pos, neg], axis=-1) mixed = tf.matmul(concatenated, self.kernel) return mixed def get_config(self): # Implement get_config to enable serialization. This is optional. base_config = super().get_config() config = {"initializer": keras.initializers.serialize(self.initializer)} return dict(list(base_config.items()) + list(config.items())) """ ## Let's test-drive it on MNIST """ # Training parameters batch_size = 128 num_classes = 10 epochs = 20 # The data, split between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = x_train.reshape(-1, 784) x_test = x_test.reshape(-1, 784) x_train = x_train.astype("float32") x_test = x_test.astype("float32") x_train /= 255 x_test /= 255 print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") # Build the model model = keras.Sequential( [ keras.Input(shape=(784,)), layers.Dense(256), Antirectifier(), layers.Dense(256), Antirectifier(), layers.Dropout(0.5), layers.Dense(10), ] ) # Compile the model model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.RMSprop(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) # Train the model model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.15, ) # Test the model model.evaluate(x_test, y_test)
keras-core/examples/keras_io/tensorflow/keras_recipes/antirectifier.py/0
{ "file_path": "keras-core/examples/keras_io/tensorflow/keras_recipes/antirectifier.py", "repo_id": "keras-core", "token_count": 1183 }
20
import keras_core # isort: skip, keep it on top for torch test import numpy as np from tensorflow import keras NUM_CLASSES = 10 def build_mnist_data(num_classes): (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) print("x_train shape:", x_train.shape) print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") return x_train, y_train, x_test, y_test def build_keras_model(keras_module, num_classes): input_shape = (28, 28, 1) model = keras_module.Sequential( [ keras_module.Input(shape=input_shape), keras_module.layers.Conv2D( 32, kernel_size=(3, 3), activation="relu" ), keras_module.layers.MaxPooling2D(pool_size=(2, 2)), keras_module.layers.Conv2D( 64, kernel_size=(3, 3), activation="relu" ), keras_module.layers.MaxPooling2D(pool_size=(2, 2)), keras_module.layers.Flatten(), keras_module.layers.Dense(num_classes, activation="softmax"), ] ) model.summary() return model def train_model(model, x, y): batch_size = 256 epochs = 1 model.compile( loss="mse", optimizer="adam", metrics=["accuracy"], jit_compile=False ) return model.fit( x, y, batch_size=batch_size, epochs=epochs, validation_split=0.1, shuffle=False, ) def eval_model(model, x, y): score = model.evaluate(x, y, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1]) return score def numerical_test(): x_train, y_train, x_test, y_test = build_mnist_data(NUM_CLASSES) keras_model = build_keras_model(keras, NUM_CLASSES) keras_core_model = build_keras_model(keras_core, NUM_CLASSES) # Make sure both model have same weights before training weights = [weight.numpy() for weight in keras_model.weights] keras_core_model.set_weights(weights) for kw, kcw in zip(keras_model.weights, keras_core_model.weights): np.testing.assert_allclose(kw.numpy(), kcw.numpy()) keras_history = train_model(keras_model, x_train, y_train) keras_core_history = train_model(keras_core_model, x_train, y_train) for key in keras_history.history.keys(): np.testing.assert_allclose( keras_history.history[key], keras_core_history.history[key], atol=1e-3, ) if __name__ == "__main__": keras.utils.set_random_seed(1337) keras_core.utils.set_random_seed(1337) numerical_test()
keras-core/integration_tests/numerical_test.py/0
{ "file_path": "keras-core/integration_tests/numerical_test.py", "repo_id": "keras-core", "token_count": 1382 }
21
from keras_core.backend.common import global_state from keras_core.testing import test_case from keras_core.utils.naming import auto_name class GlobalStateTest(test_case.TestCase): def test_clear_session(self): name0 = auto_name("somename") self.assertEqual(name0, "somename") name1 = auto_name("somename") self.assertEqual(name1, "somename_1") global_state.clear_session() name0 = auto_name("somename") self.assertEqual(name0, "somename")
keras-core/keras_core/backend/common/global_state_test.py/0
{ "file_path": "keras-core/keras_core/backend/common/global_state_test.py", "repo_id": "keras-core", "token_count": 210 }
22
from keras_core.backend.tensorflow import core from keras_core.backend.tensorflow import image from keras_core.backend.tensorflow import math from keras_core.backend.tensorflow import nn from keras_core.backend.tensorflow import numpy from keras_core.backend.tensorflow import random from keras_core.backend.tensorflow import tensorboard from keras_core.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS from keras_core.backend.tensorflow.core import Variable from keras_core.backend.tensorflow.core import cast from keras_core.backend.tensorflow.core import compute_output_spec from keras_core.backend.tensorflow.core import cond from keras_core.backend.tensorflow.core import convert_to_numpy from keras_core.backend.tensorflow.core import convert_to_tensor from keras_core.backend.tensorflow.core import is_tensor from keras_core.backend.tensorflow.core import name_scope from keras_core.backend.tensorflow.core import scatter from keras_core.backend.tensorflow.core import shape from keras_core.backend.tensorflow.core import stop_gradient from keras_core.backend.tensorflow.core import vectorized_map from keras_core.backend.tensorflow.rnn import cudnn_ok from keras_core.backend.tensorflow.rnn import gru from keras_core.backend.tensorflow.rnn import lstm from keras_core.backend.tensorflow.rnn import rnn
keras-core/keras_core/backend/tensorflow/__init__.py/0
{ "file_path": "keras-core/keras_core/backend/tensorflow/__init__.py", "repo_id": "keras-core", "token_count": 443 }
23
import unittest from keras_core import backend from keras_core.backend.common.keras_tensor import KerasTensor def single_arg_test_fn(x): return backend.numpy.concatenate([(x + 1) ** 2, x], axis=-1) def three_args_2_kwarg_test_fn(x1, x2, x3=None): x1 = backend.numpy.max(x1, axis=1) x2 = backend.numpy.max(x2, axis=1) if x3 is not None: x1 += backend.numpy.max(x3, axis=1) return x1 + x2 class ComputeOutputSpecTest(unittest.TestCase): def test_dynamic_batch_size(self): x = KerasTensor(shape=(None, 3, 5)) y = backend.compute_output_spec(single_arg_test_fn, x) self.assertEqual(y.shape, (None, 3, 10)) x1 = KerasTensor(shape=(None, 3, 5)) x2 = KerasTensor(shape=(None, 3, 5)) x3 = KerasTensor(shape=(None, 3, 5)) y = backend.compute_output_spec( three_args_2_kwarg_test_fn, x1, x2, x3=x3 ) self.assertEqual(y.shape, (None, 5)) def test_dynamic_everything(self): x = KerasTensor(shape=(2, None, 3)) y = backend.compute_output_spec(single_arg_test_fn, x) self.assertEqual(y.shape, (2, None, 6)) x1 = KerasTensor(shape=(None, None, 5)) x2 = KerasTensor(shape=(None, None, 5)) x3 = KerasTensor(shape=(None, None, 5)) y = backend.compute_output_spec( three_args_2_kwarg_test_fn, x1, x2, x3=x3 ) self.assertEqual(y.shape, (None, 5))
keras-core/keras_core/backend/tests/compute_output_spec_test.py/0
{ "file_path": "keras-core/keras_core/backend/tests/compute_output_spec_test.py", "repo_id": "keras-core", "token_count": 711 }
24
import torch from keras_core import optimizers from keras_core.optimizers.base_optimizer import BaseOptimizer class TorchOptimizer(BaseOptimizer): def __new__(cls, *args, **kwargs): # Import locally to avoid circular imports. from keras_core.backend.torch.optimizers import torch_adadelta from keras_core.backend.torch.optimizers import torch_adagrad from keras_core.backend.torch.optimizers import torch_adam from keras_core.backend.torch.optimizers import torch_adamax from keras_core.backend.torch.optimizers import torch_adamw from keras_core.backend.torch.optimizers import torch_lion from keras_core.backend.torch.optimizers import torch_nadam from keras_core.backend.torch.optimizers import torch_rmsprop from keras_core.backend.torch.optimizers import torch_sgd OPTIMIZERS = { optimizers.Adadelta: torch_adadelta.Adadelta, optimizers.Adagrad: torch_adagrad.Adagrad, optimizers.Adam: torch_adam.Adam, optimizers.Adamax: torch_adamax.Adamax, optimizers.AdamW: torch_adamw.AdamW, optimizers.Lion: torch_lion.Lion, optimizers.Nadam: torch_nadam.Nadam, optimizers.RMSprop: torch_rmsprop.RMSprop, optimizers.SGD: torch_sgd.SGD, } if cls in OPTIMIZERS: return OPTIMIZERS[cls](*args, **kwargs) return super().__new__(cls) def _apply_weight_decay(self, variables): if self.weight_decay is None: return torch._foreach_mul_( [v.value for v in variables if self._use_weight_decay(v)], 1 - self.weight_decay * self._get_current_learning_rate(), )
keras-core/keras_core/backend/torch/optimizers/torch_optimizer.py/0
{ "file_path": "keras-core/keras_core/backend/torch/optimizers/torch_optimizer.py", "repo_id": "keras-core", "token_count": 798 }
25
import numpy as np import pytest from keras_core import callbacks from keras_core import initializers from keras_core import layers from keras_core import testing from keras_core.models import Sequential from keras_core.utils import numerical_utils class TerminateOnNaNTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_TerminateOnNaN(self): TRAIN_SAMPLES = 10 TEST_SAMPLES = 10 INPUT_DIM = 3 NUM_CLASSES = 2 BATCH_SIZE = 4 np.random.seed(1337) x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM)) y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES) x_test = np.random.random((TEST_SAMPLES, INPUT_DIM)) y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES) y_test = numerical_utils.to_categorical(y_test) y_train = numerical_utils.to_categorical(y_train) model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add( layers.Dense( 2, activation="relu", kernel_initializer=initializer, ) ) model.add(layers.Dense(NUM_CLASSES)) model.compile(loss="mean_squared_error", optimizer="sgd") history = model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=[callbacks.TerminateOnNaN()], epochs=20, ) loss = history.history["loss"] self.assertEqual(len(loss), 1) self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
keras-core/keras_core/callbacks/terminate_on_nan_test.py/0
{ "file_path": "keras-core/keras_core/callbacks/terminate_on_nan_test.py", "repo_id": "keras-core", "token_count": 851 }
26
"""Test for distribution_lib.py.""" import os from unittest import mock import jax import numpy as np import pytest from keras_core import backend from keras_core import layers from keras_core import models from keras_core import testing from keras_core.backend import distribution_lib as backend_dlib from keras_core.distribution import distribution_lib if backend.backend() == "jax": # Due to https://github.com/google/jax/issues/17188, we can't # override the XLA flag after the JAX back init. We have to # run this at top level to let JAX pick the flag value. xla_flags = os.getenv("XLA_FLAGS") or "" # Don't override user-specified device count, or other XLA flags. if "xla_force_host_platform_device_count" not in xla_flags: os.environ["XLA_FLAGS"] = ( xla_flags + " --xla_force_host_platform_device_count=8" ) class DeviceMeshTest(testing.TestCase): def test_mesh_creation(self): devices = ["CPU:{i}" for i in range(8)] shape = (4, 2) axis_names = ["batch", "model"] mesh = distribution_lib.DeviceMesh(shape, axis_names, devices) self.assertEqual(mesh.shape, shape) self.assertEqual(mesh.axis_names, axis_names) self.assertEqual(mesh.devices.shape, shape) def test_input_validation(self): devices = ["CPU:{i}" for i in range(4)] with self.assertRaisesRegex( ValueError, "Shape and axis_names cannot be empty" ): distribution_lib.DeviceMesh((4,), "", devices) with self.assertRaisesRegex( ValueError, "Shape and axis_names should have same size" ): distribution_lib.DeviceMesh((4, 2), ["batch"], devices) with self.assertRaisesRegex( ValueError, "Shape does not match the number of devices" ): distribution_lib.DeviceMesh((4, 2), ["batch", "model"], devices) class TensorLayoutTest(testing.TestCase): def setUp(self): self.mesh = distribution_lib.DeviceMesh( (4, 2), ["data", "model"], [f"CPU:{i}" for i in range(8)] ) def test_tensor_layout_creation(self): axes = ["data", None] layout = distribution_lib.TensorLayout(axes, self.mesh) self.assertEqual(layout.device_mesh, self.mesh) self.assertEqual(layout.axes, axes) def test_tensor_layout_validation(self): axes = ["data", "unknown", None] with self.assertRaisesRegex( ValueError, "Invalid axis names for Layout" ): distribution_lib.TensorLayout(axes, self.mesh) def test_lazy_device_mesh_injection(self): axes = ["data", None] layout = distribution_lib.TensorLayout(axes, None) self.assertIsNone(layout.device_mesh) self.assertEqual(layout.axes, axes) layout.device_mesh = self.mesh self.assertEqual(layout.device_mesh, self.mesh) self.assertEqual(layout.axes, axes) def test_lazy_device_mesh_validation(self): axes = ["data", "unknown", None] layout = distribution_lib.TensorLayout(axes, None) self.assertIsNone(layout.device_mesh) self.assertEqual(layout.axes, axes) with self.assertRaisesRegex( ValueError, "Invalid axis names for Layout" ): layout.device_mesh = self.mesh class DistributionTest(testing.TestCase): def setUp(self): super().setUp() devices = ["CPU:{i}" for i in range(8)] shape = (4, 2) axis_names = ["batch", "model"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, devices ) def test_init_with_device_mesh(self): distribution = distribution_lib.Distribution(self.device_mesh) self.assertIs(distribution.device_mesh, self.device_mesh) def test_scope(self): distribution_1 = distribution_lib.Distribution(self.device_mesh) distribution_2 = distribution_lib.Distribution(self.device_mesh) self.assertIsNone(distribution_lib.distribution()) with distribution_1.scope(): self.assertIs(distribution_lib.distribution(), distribution_1) with distribution_2.scope(): self.assertIs(distribution_lib.distribution(), distribution_2) self.assertIs(distribution_lib.distribution(), distribution_1) self.assertIsNone(distribution_lib.distribution()) class DataParallelDistributionTest(testing.TestCase): def setUp(self): super().setUp() self.devices = ["CPU:{i}" for i in range(8)] shape = (8,) axis_names = ["data"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, self.devices ) def test_create_with_device_mesh(self): distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) device_mesh = distribution.device_mesh self.assertEqual(len(device_mesh.devices), 8) self.assertEqual(device_mesh.axis_names, ["data"]) self.assertEqual(distribution._batch_dim_name, "data") def test_create_with_devices(self): distribution = distribution_lib.DataParallel(devices=self.devices) device_mesh = distribution.device_mesh self.assertEqual(len(device_mesh.devices), 8) self.assertEqual(device_mesh.axis_names, ["batch"]) self.assertEqual(distribution._batch_dim_name, "batch") @mock.patch.object( distribution_lib, "list_devices", return_value=["CPU:{i}" for i in range(8)], ) def test_create_with_list_devices(self, mock_list_devices): distribution = distribution_lib.DataParallel() mock_list_devices.assert_called_once() device_mesh = distribution.device_mesh self.assertEqual(len(device_mesh.devices), 8) self.assertEqual(device_mesh.axis_names, ["batch"]) self.assertEqual(distribution._batch_dim_name, "batch") def test_get_data_layout(self): distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) data = np.arange(16).reshape((4, 2, 2)) data_layout = distribution.get_data_layout(data.shape) self.assertIs(data_layout.device_mesh, self.device_mesh) self.assertEqual(data_layout.axes, ["data", None, None]) def test_get_variable_layout(self): distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) variable = backend.Variable(initializer=[1, 2, 3]) variable_layout = distribution.get_variable_layout(variable) self.assertIs(variable_layout.device_mesh, self.device_mesh) self.assertEqual(variable_layout.axes, [None]) class ModelParallelDistributionTest(testing.TestCase): def setUp(self): super().setUp() self.devices = ["CPU:{i}" for i in range(8)] shape = (2, 4) axis_names = ["data", "model"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, self.devices ) def test_distribute_weights(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"]) layout_map[".*bias"] = distribution_lib.TensorLayout(["model"]) distribution = distribution_lib.ModelParallel( self.device_mesh, layout_map, batch_dim_name="data" ) kernel = backend.Variable(initializer=np.arange(8, 4), name="kernel") bias = backend.Variable(initializer=np.arange(4), name="bias") rng_seed = backend.Variable(initializer=[0, 1], name="seed") kernel_layout = distribution.get_variable_layout(kernel) self.assertIs(kernel_layout.device_mesh, self.device_mesh) self.assertEqual(kernel_layout.axes, [None, "model"]) bias_layout = distribution.get_variable_layout(bias) self.assertIs(bias_layout.device_mesh, self.device_mesh) self.assertEqual(bias_layout.axes, ["model"]) rng_seed_layout = distribution.get_variable_layout(rng_seed) self.assertIs(rng_seed_layout.device_mesh, self.device_mesh) self.assertEqual(rng_seed_layout.axes, [None]) def test_distribute_data(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) distribution = distribution_lib.ModelParallel( self.device_mesh, layout_map, batch_dim_name="data" ) data = np.arange(16).reshape((4, 2, 2)) data_layout = distribution.get_data_layout(data.shape) self.assertIs(data_layout.device_mesh, self.device_mesh) self.assertEqual(data_layout.axes, ["data", None, None]) class LayoutMapTest(testing.TestCase): def setUp(self): super().setUp() self.devices = ["CPU:{i}" for i in range(8)] shape = (4, 2) axis_names = ["data", "model"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, self.devices ) self.sharded_2d = distribution_lib.TensorLayout([None, "model"]) self.sharded_1d = distribution_lib.TensorLayout(["model"]) self.replicated_2d = distribution_lib.TensorLayout([None, None]) self.replicated_1d = distribution_lib.TensorLayout([None]) def test_add(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d # Make there are two items in the map, and we access them via the # underlying container at layout_map._layout_map self.assertLen(layout_map, 2) kernel_layout = layout_map["dense/kernel"] self.assertEqual(kernel_layout.axes, [None, "model"]) self.assertIs(kernel_layout.device_mesh, self.device_mesh) bias_layout = layout_map["dense/bias"] self.assertEqual(bias_layout.axes, ["model"]) self.assertIs(bias_layout.device_mesh, self.device_mesh) with self.assertRaisesRegex(ValueError, "dense/kernel already exist"): layout_map["dense/kernel"] = self.sharded_2d with self.assertRaisesRegex(ValueError, "should be a TensorLayout"): layout_map["conv.kernel"] = [1, 2, 3] def test_get(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d layout_map["dense.*kernel"] = self.replicated_2d layout_map["dense.*bias"] = self.replicated_1d layout_map[".*bias"] = self.sharded_1d self.assertEqual(layout_map["dense/kernel"], self.sharded_2d) self.assertEqual(layout_map["dense/bias"], self.sharded_1d) # Map against the wildcard bias rule for dense, and based on the order # of insertion, it will not use .*bias. self.assertEqual(layout_map["dense_2/kernel"], self.replicated_2d) self.assertEqual(layout_map["dense_2/bias"], self.replicated_1d) self.assertIsNone(layout_map["conv2d/kernel"]) self.assertEqual(layout_map["conv2d/bias"], self.sharded_1d) def test_delete(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d self.assertEqual(layout_map.pop("dense/kernel"), self.sharded_2d) # Make sure to match against the exact string, not the regex with self.assertRaises(KeyError): layout_map.pop(".*bias") # Make sure del also works del layout_map["dense/bias"] self.assertLen(layout_map, 0) def test_len(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) self.assertLen(layout_map, 0) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d self.assertLen(layout_map, 2) def test_iter(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d # Make sure the items are ordered based on the insertion order. self.assertEqual( list(layout_map.keys()), ["dense/kernel", "dense/bias"] ) keys = [] values = [] for k, v in layout_map.items(): keys.append(k) values.append(v) self.assertEqual(keys, ["dense/kernel", "dense/bias"]) self.assertEqual(values, [self.sharded_2d, self.sharded_1d]) @pytest.mark.skipif( backend.backend() != "jax", reason="Backend specific test", ) class JaxDistributionLibTest(testing.TestCase): def test_list_devices(self): self.assertEqual(len(distribution_lib.list_devices()), 8) self.assertEqual(len(distribution_lib.list_devices("cpu")), 8) self.assertEqual(len(distribution_lib.list_devices("CPU")), 8) def test_to_jax_mesh(self): devices = ["CPU:{i}" for i in range(8)] shape = (4, 2) axis_names = ["batch", "model"] mesh = distribution_lib.DeviceMesh(shape, axis_names, devices) jax_mesh = backend_dlib.to_jax_mesh(mesh) self.assertIsInstance(jax_mesh, jax.sharding.Mesh) self.assertEqual(jax_mesh.devices.shape, shape) self.assertEqual(jax_mesh.axis_names, ("batch", "model")) def test_to_jax_layout(self): axes = ["data", None] mesh = distribution_lib.DeviceMesh( (4, 2), ["data", "model"], [f"CPU:{i}" for i in range(8)] ) layout = distribution_lib.TensorLayout(axes, mesh) jax_sharding = backend_dlib.to_jax_layout(layout) jax_mesh = backend_dlib.to_jax_mesh(mesh) self.assertEqual( jax_sharding, jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("data", None) ), ) def test_validation_for_device_mesh(self): axes = ["data", None] layout = distribution_lib.TensorLayout(axes, device_mesh=None) with self.assertRaisesRegex( ValueError, "Cannot create sharding when device mesh is not set" ): backend_dlib.to_jax_layout(layout) def test_variable_assignment_reuse_layout(self): shape = (4, 2) axis_names = ["batch", "model"] device_mesh = distribution_lib.DeviceMesh( shape, axis_names, backend_dlib.list_devices() ) layout_map = distribution_lib.LayoutMap(device_mesh) layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout( [None, "model"] ) layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"]) distribution = distribution_lib.ModelParallel( device_mesh, layout_map, batch_dim_name="batch" ) with distribution.scope(): dense_layer = layers.Dense(8) dense_layer.build((16, 16)) self.assertEqual( dense_layer.kernel._value.sharding.spec, (None, "model") ) self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",)) # Assign a numpy value to dense layer to mimic the model weight loading new_kernel = np.random.normal(size=(16, 8)) new_bias = np.random.normal(size=(8)) dense_layer.kernel.assign(new_kernel) dense_layer.bias.assign(new_bias) # Make sure the loaded value still use the layout when it is # initialized, even outside of the distribution scope. self.assertEqual( dense_layer.kernel._value.sharding.spec, (None, "model") ) self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",)) def test_e2e_data_parallel_model(self): distribution = distribution_lib.DataParallel( devices=backend_dlib.list_devices() ) with distribution.scope(): inputs = layers.Input(shape=[28, 28, 1]) y = layers.Flatten()(inputs) y = layers.Dense(units=200, use_bias=False, activation="relu")(y) y = layers.Dropout(0.4)(y) y = layers.Dense(units=10, activation="softmax")(y) model = models.Model(inputs=inputs, outputs=y) # Make sure all the weights are properly sharded. for weight in model.weights: self.assertTrue(weight._value.sharding.is_fully_replicated) inputs = np.random.normal(size=(32, 28, 28, 1)) labels = np.random.normal(size=(32, 10)) with distribution.scope(): model.compile(loss="mse") model.fit(inputs, labels) def test_e2e_model_parallel_model(self): shape = (4, 2) axis_names = ["batch", "model"] device_mesh = distribution_lib.DeviceMesh( shape, axis_names, backend_dlib.list_devices() ) layout_map = distribution_lib.LayoutMap(device_mesh) layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout( [None, "model"] ) layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"]) distribution = distribution_lib.ModelParallel( device_mesh, layout_map, batch_dim_name="batch" ) with distribution.scope(): inputs = layers.Input(shape=[28, 28, 1]) y = layers.Flatten()(inputs) y = layers.Dense(units=200, use_bias=False, activation="relu")(y) y = layers.Dropout(0.4)(y) y = layers.Dense(units=10, activation="softmax")(y) model = models.Model(inputs=inputs, outputs=y) for weight in model.weights: if "kernel" in weight.name: self.assertEqual(weight._value.sharding.spec, (None, "model")) elif "bias" in weight.name: self.assertEqual(weight._value.sharding.spec, ("model",)) else: self.assertTrue(weight._value.sharding.is_fully_replicated) inputs = np.random.normal(size=(32, 28, 28, 1)) labels = np.random.normal(size=(32, 10)) with distribution.scope(): model.compile(loss="mse") model.fit(inputs, labels)
keras-core/keras_core/distribution/distribution_lib_test.py/0
{ "file_path": "keras-core/keras_core/distribution/distribution_lib_test.py", "repo_id": "keras-core", "token_count": 8058 }
27
import warnings from keras_core import activations from keras_core.api_export import keras_core_export from keras_core.layers.layer import Layer @keras_core_export("keras_core.layers.LeakyReLU") class LeakyReLU(Layer): """Leaky version of a Rectified Linear Unit activation layer. This layer allows a small gradient when the unit is not active. Formula: ``` python f(x) = alpha * x if x < 0 f(x) = x if x >= 0 ``` Example: ``` python leaky_relu_layer = LeakyReLU(negative_slope=0.5) input = np.array([-10, -5, 0.0, 5, 10]) result = leaky_relu_layer(input) # result = [-5. , -2.5, 0. , 5. , 10.] ``` Args: negative_slope: Float >= 0.0. Negative slope coefficient. Defaults to `0.3`. **kwargs: Base layer keyword arguments, such as `name` and `dtype`. """ def __init__(self, negative_slope=0.3, **kwargs): if "alpha" in kwargs: negative_slope = kwargs.pop("alpha") warnings.warn( "Argument `alpha` is deprecated. " "Use `negative_slope` instead." ) super().__init__(**kwargs) if negative_slope is None: raise ValueError( "The negative_slope value of a Leaky ReLU layer " "cannot be None. Expected a float. Received: " f"negative_slope={negative_slope}" ) self.supports_masking = True self.negative_slope = negative_slope def call(self, inputs): return activations.leaky_relu( inputs, negative_slope=self.negative_slope ) def get_config(self): config = super().get_config() config.update({"negative_slope": self.negative_slope}) return config def compute_output_shape(self, input_shape): return input_shape
keras-core/keras_core/layers/activations/leaky_relu.py/0
{ "file_path": "keras-core/keras_core/layers/activations/leaky_relu.py", "repo_id": "keras-core", "token_count": 850 }
28
from keras_core import ops from keras_core.api_export import keras_core_export from keras_core.layers.merging.base_merge import Merge @keras_core_export("keras_core.layers.Subtract") class Subtract(Merge): """Performs elementwise subtraction. It takes as input a list of tensors of size 2 both of the same shape, and returns a single tensor (inputs[0] - inputs[1]) of same shape. Examples: >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) >>> y = keras_core.layers.Subtract()([x1, x2]) Usage in a Keras model: >>> input1 = keras_core.layers.Input(shape=(16,)) >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) >>> input2 = keras_core.layers.Input(shape=(32,)) >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) >>> # equivalent to `subtracted = keras_core.layers.subtract([x1, x2])` >>> subtracted = keras_core.layers.Subtract()([x1, x2]) >>> out = keras_core.layers.Dense(4)(subtracted) >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) """ def build(self, input_shape): super().build(input_shape) if len(input_shape) != 2: raise ValueError( "A `Subtract` layer should be called on exactly 2 inputs. " f"Received: input_shape={input_shape}" ) def _merge_function(self, inputs): if len(inputs) != 2: raise ValueError( "A `Subtract` layer should be called on exactly 2 inputs. " f"Received: inputs={inputs}" ) return ops.subtract(inputs[0], inputs[1]) @keras_core_export("keras_core.layers.subtract") def subtract(inputs, **kwargs): """Functional interface to the `keras_core.layers.Subtract` layer. Args: inputs: A list of input tensors of size 2, each tensor of the same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor as the difference of the inputs. It has the same shape as the inputs. Examples: >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) >>> y = keras_core.layers.subtract([x1, x2]) Usage in a Keras model: >>> input1 = keras_core.layers.Input(shape=(16,)) >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) >>> input2 = keras_core.layers.Input(shape=(32,)) >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) >>> subtracted = keras_core.layers.subtract([x1, x2]) >>> out = keras_core.layers.Dense(4)(subtracted) >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) """ return Subtract(**kwargs)(inputs)
keras-core/keras_core/layers/merging/subtract.py/0
{ "file_path": "keras-core/keras_core/layers/merging/subtract.py", "repo_id": "keras-core", "token_count": 1201 }
29
import numpy as np import pytest from absl.testing import parameterized from numpy.lib.stride_tricks import as_strided from keras_core import backend from keras_core import layers from keras_core import testing @pytest.mark.requires_trainable_backend class AveragePoolingBasicTest(testing.TestCase, parameterized.TestCase): @parameterized.parameters( (2, 1, "valid", "channels_last", (3, 5, 4), (3, 4, 4)), (2, 1, "same", "channels_first", (3, 5, 4), (3, 5, 4)), ((2,), (2,), "valid", "channels_last", (3, 5, 4), (3, 2, 4)), ) def test_average_pooling1d( self, pool_size, strides, padding, data_format, input_shape, output_shape, ): self.run_layer_test( layers.AveragePooling1D, init_kwargs={ "pool_size": pool_size, "strides": strides, "padding": padding, "data_format": data_format, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) @parameterized.parameters( (2, 1, "valid", "channels_last", (3, 5, 5, 4), (3, 4, 4, 4)), (2, 1, "same", "channels_first", (3, 5, 5, 4), (3, 5, 5, 4)), ((2, 3), (2, 2), "valid", "channels_last", (3, 5, 5, 4), (3, 2, 2, 4)), ) def test_average_pooling2d( self, pool_size, strides, padding, data_format, input_shape, output_shape, ): self.run_layer_test( layers.AveragePooling2D, init_kwargs={ "pool_size": pool_size, "strides": strides, "padding": padding, "data_format": data_format, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, ) @parameterized.parameters( (2, 1, "valid", "channels_last", (3, 5, 5, 5, 4), (3, 4, 4, 4, 4)), (2, 1, "same", "channels_first", (3, 5, 5, 5, 4), (3, 5, 5, 5, 4)), ( (2, 3, 2), (2, 2, 1), "valid", "channels_last", (3, 5, 5, 5, 4), (3, 2, 2, 4, 4), ), ) def test_average_pooling3d( self, pool_size, strides, padding, data_format, input_shape, output_shape, ): self.run_layer_test( layers.AveragePooling3D, init_kwargs={ "pool_size": pool_size, "strides": strides, "padding": padding, "data_format": data_format, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, # Incomplete op support on tensorflow. run_mixed_precision_check=False, ) class AveragePoolingCorrectnessTest(testing.TestCase, parameterized.TestCase): def _same_padding(self, input_size, pool_size, stride): if input_size % stride == 0: return max(pool_size - stride, 0) else: return max(pool_size - (input_size % stride), 0) def _np_avgpool1d(self, x, pool_size, strides, padding, data_format): if data_format == "channels_first": x = x.swapaxes(1, 2) if isinstance(pool_size, (tuple, list)): pool_size = pool_size[0] if isinstance(strides, (tuple, list)): h_stride = strides[0] else: h_stride = strides if padding == "same": n_batch, h_x, ch_x = x.shape pad_value = self._same_padding(h_x, pool_size, h_stride) npad = [(0, 0)] * x.ndim npad[1] = (0, pad_value) x = np.pad(x, pad_width=npad, mode="edge") n_batch, h_x, ch_x = x.shape out_h = int((h_x - pool_size) / h_stride) + 1 stride_shape = (n_batch, out_h, ch_x, pool_size) strides = ( x.strides[0], h_stride * x.strides[1], x.strides[2], x.strides[1], ) windows = as_strided(x, shape=stride_shape, strides=strides) out = np.mean(windows, axis=(3,)) if data_format == "channels_first": out = out.swapaxes(1, 2) return out def _np_avgpool2d(self, x, pool_size, strides, padding, data_format): if data_format == "channels_first": x = x.transpose((0, 2, 3, 1)) if isinstance(pool_size, int): pool_size = (pool_size, pool_size) if isinstance(strides, int): strides = (strides, strides) h_pool_size, w_pool_size = pool_size h_stride, w_stride = strides if padding == "same": n_batch, h_x, w_x, ch_x = x.shape h_padding = self._same_padding(h_x, h_pool_size, h_stride) w_padding = self._same_padding(w_x, w_pool_size, w_stride) npad = [(0, 0)] * x.ndim npad[1] = (0, h_padding) npad[2] = (0, w_padding) x = np.pad(x, pad_width=npad, mode="edge") n_batch, h_x, w_x, ch_x = x.shape out_h = int((h_x - h_pool_size) / h_stride) + 1 out_w = int((w_x - w_pool_size) / w_stride) + 1 stride_shape = (n_batch, out_h, out_w, ch_x, *pool_size) strides = ( x.strides[0], h_stride * x.strides[1], w_stride * x.strides[2], x.strides[3], x.strides[1], x.strides[2], ) windows = as_strided(x, shape=stride_shape, strides=strides) out = np.mean(windows, axis=(4, 5)) if data_format == "channels_first": out = out.transpose((0, 3, 1, 2)) return out def _np_avgpool3d(self, x, pool_size, strides, padding, data_format): if data_format == "channels_first": x = x.transpose((0, 2, 3, 4, 1)) if isinstance(pool_size, int): pool_size = (pool_size, pool_size, pool_size) if isinstance(strides, int): strides = (strides, strides, strides) h_pool_size, w_pool_size, d_pool_size = pool_size h_stride, w_stride, d_stride = strides if padding == "same": n_batch, h_x, w_x, d_x, ch_x = x.shape h_padding = self._same_padding(h_x, h_pool_size, h_stride) w_padding = self._same_padding(w_x, w_pool_size, w_stride) d_padding = self._same_padding(d_x, d_pool_size, d_stride) npad = [(0, 0)] * x.ndim npad[1] = (0, h_padding) npad[2] = (0, w_padding) npad[3] = (0, d_padding) x = np.pad(x, pad_width=npad, mode="symmetric") n_batch, h_x, w_x, d_x, ch_x = x.shape out_h = int((h_x - h_pool_size) / h_stride) + 1 out_w = int((w_x - w_pool_size) / w_stride) + 1 out_d = int((d_x - d_pool_size) / d_stride) + 1 stride_shape = (n_batch, out_h, out_w, out_d, ch_x, *pool_size) strides = ( x.strides[0], h_stride * x.strides[1], w_stride * x.strides[2], d_stride * x.strides[3], x.strides[4], x.strides[1], x.strides[2], x.strides[3], ) windows = as_strided(x, shape=stride_shape, strides=strides) out = np.mean(windows, axis=(5, 6, 7)) if data_format == "channels_first": out = out.transpose((0, 4, 1, 2, 3)) return out @parameterized.parameters( (2, 1, "valid", "channels_last"), (2, 1, "valid", "channels_first"), ((2,), (2,), "valid", "channels_last"), ((2,), (2,), "valid", "channels_first"), ) def test_average_pooling1d(self, pool_size, strides, padding, data_format): inputs = np.arange(24, dtype="float32").reshape((2, 3, 4)) layer = layers.AveragePooling1D( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, ) outputs = layer(inputs) expected = self._np_avgpool1d( inputs, pool_size, strides, padding, data_format ) self.assertAllClose(outputs, expected) @parameterized.parameters( (2, 1, "same", "channels_last"), (2, 1, "same", "channels_first"), ((2,), (2,), "same", "channels_last"), ((2,), (2,), "same", "channels_first"), ) @pytest.mark.skipif( backend.backend() == "torch", reason="Same padding in Torch backend produces different results.", ) def test_average_pooling1d_same_padding( self, pool_size, strides, padding, data_format ): inputs = np.arange(24, dtype="float32").reshape((2, 3, 4)) layer = layers.AveragePooling1D( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, ) outputs = layer(inputs) expected = self._np_avgpool1d( inputs, pool_size, strides, padding, data_format ) self.assertAllClose(outputs, expected) @parameterized.parameters( (2, 1, "valid", "channels_last"), ((2, 3), (2, 2), "valid", "channels_last"), ) def test_average_pooling2d(self, pool_size, strides, padding, data_format): inputs = np.arange(16, dtype="float32").reshape((1, 4, 4, 1)) layer = layers.AveragePooling2D( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, ) outputs = layer(inputs) expected = self._np_avgpool2d( inputs, pool_size, strides, padding, data_format ) self.assertAllClose(outputs, expected) @parameterized.parameters( (2, (2, 1), "same", "channels_last"), (2, (2, 1), "same", "channels_first"), ((2, 2), (2, 2), "same", "channels_last"), ((2, 2), (2, 2), "same", "channels_first"), ) @pytest.mark.skipif( backend.backend() == "torch", reason="Same padding in Torch backend produces different results.", ) def test_average_pooling2d_same_padding( self, pool_size, strides, padding, data_format ): inputs = np.arange(16, dtype="float32").reshape((1, 4, 4, 1)) layer = layers.AveragePooling2D( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, ) outputs = layer(inputs) expected = self._np_avgpool2d( inputs, pool_size, strides, padding, data_format ) self.assertAllClose(outputs, expected) @parameterized.parameters( (2, 1, "valid", "channels_last"), (2, 1, "valid", "channels_first"), ((2, 3, 2), (2, 2, 1), "valid", "channels_last"), ((2, 3, 2), (2, 2, 1), "valid", "channels_first"), ) def test_average_pooling3d(self, pool_size, strides, padding, data_format): inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2)) layer = layers.AveragePooling3D( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, ) outputs = layer(inputs) expected = self._np_avgpool3d( inputs, pool_size, strides, padding, data_format ) self.assertAllClose(outputs, expected) @parameterized.parameters( (2, 1, "same", "channels_last"), (2, 1, "same", "channels_first"), ((2, 2, 2), (2, 2, 1), "same", "channels_last"), ((2, 2, 2), (2, 2, 1), "same", "channels_first"), ) @pytest.mark.skipif( backend.backend() == "torch", reason="Same padding in Torch backend produces different results.", ) def test_average_pooling3d_same_padding( self, pool_size, strides, padding, data_format ): inputs = np.arange(240, dtype="float32").reshape((2, 3, 4, 5, 2)) layer = layers.AveragePooling3D( pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, ) outputs = layer(inputs) expected = self._np_avgpool3d( inputs, pool_size, strides, padding, data_format ) self.assertAllClose(outputs, expected)
keras-core/keras_core/layers/pooling/average_pooling_test.py/0
{ "file_path": "keras-core/keras_core/layers/pooling/average_pooling_test.py", "repo_id": "keras-core", "token_count": 6731 }
30
import numpy as np import pytest from keras_core import layers from keras_core import ops from keras_core import testing class FlattenTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_repeat_vector(self): inputs = np.random.random((2, 5)).astype("float32") expected_output = ops.convert_to_tensor( np.repeat(np.reshape(inputs, (2, 1, 5)), 3, axis=1) ) self.run_layer_test( layers.RepeatVector, init_kwargs={"n": 3}, input_data=inputs, expected_output=expected_output, ) def test_repeat_vector_with_dynamic_batch_size(self): input_layer = layers.Input(batch_shape=(None, 5)) repeated = layers.RepeatVector(n=3)(input_layer) self.assertEqual(repeated.shape, (None, 3, 5)) def test_repeat_vector_with_dynamic_dimension(self): input_layer = layers.Input(batch_shape=(2, None)) repeated = layers.RepeatVector(n=3)(input_layer) self.assertEqual(repeated.shape, (2, 3, None))
keras-core/keras_core/layers/reshaping/repeat_vector_test.py/0
{ "file_path": "keras-core/keras_core/layers/reshaping/repeat_vector_test.py", "repo_id": "keras-core", "token_count": 462 }
31
import numpy as np import pytest from keras_core import backend from keras_core import layers from keras_core import losses from keras_core import models from keras_core import ops from keras_core import optimizers from keras_core import testing from keras_core.backend.common.keras_tensor import KerasTensor from keras_core.ops import core class CoreOpsStaticShapeTest(testing.TestCase): def test_scatter(self): indices = KerasTensor((5, 2)) values = KerasTensor((5,)) shape = (4, 4) self.assertEqual(core.scatter(indices, values, shape).shape, (4, 4)) def test_scatter_update(self): inputs = KerasTensor((4, 4)) indices = KerasTensor((5, 2)) updates = KerasTensor((5,)) self.assertEqual( core.scatter_update(inputs, indices, updates).shape, (4, 4) ) inputs = KerasTensor((4, 4, 4)) indices = KerasTensor((5, 2)) updates = KerasTensor((5, 4)) self.assertEqual( core.scatter_update(inputs, indices, updates).shape, (4, 4, 4) ) def test_slice_update(self): inputs = KerasTensor((4, 4)) start_indices = KerasTensor((2,)) updates = KerasTensor((2, 2)) self.assertEqual( core.slice_update(inputs, start_indices, updates).shape, (4, 4) ) inputs = KerasTensor((4, 4, 4)) start_indices = KerasTensor((3,)) updates = KerasTensor((2, 2, 2)) self.assertEqual( core.slice_update(inputs, start_indices, updates).shape, (4, 4, 4) ) def test_fori_loop(self): def body_fun(i, x): return x + i initial_value = KerasTensor((3, 5, 7)) result = core.fori_loop(0, 10, body_fun, initial_value) self.assertEqual(result.shape, (3, 5, 7)) def test_unstack(self): x = KerasTensor((2, 3, 4)) axis = 1 out = core.unstack(x, axis=axis) self.assertEqual(len(out), 3) for o in out: self.assertEqual(o.shape, (2, 4)) x = KerasTensor((2, None, None)) axis, num = 1, 3 out = core.unstack(x, num=num, axis=axis) self.assertEqual(len(out), 3) for o in out: self.assertEqual(o.shape, (2, None)) with self.assertRaisesRegex( ValueError, r"Cannot infer argument `num` from shape" ): core.unstack(x, axis=axis) class CoreOpsCorrectnessTest(testing.TestCase): def test_scatter(self): # Test 1D indices = np.array([[1], [3], [4], [7]]) values = np.array([9, 10, 11, 12]) self.assertAllClose( core.scatter(indices, values, (8,)), [0, 9, 0, 10, 11, 0, 0, 12], ) # Test 2D indices = np.array([[0, 1], [2, 0]]) values = np.array([5, 10]) self.assertAllClose( core.scatter(indices, values, (3, 2)), [[0, 5], [0, 0], [10, 0]] ) # Test 3D indices = np.array([[1], [3]]) values = np.array( [ [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], ] ) self.assertAllClose( core.scatter(indices, values, (4, 4, 4)), [ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], ], ) # Test slices indices = np.array([[2], [4]]) values = np.array([[1, 2, 3], [4, 5, 6]]) self.assertAllClose( core.scatter(indices, values, (6, 3)), [[0, 0, 0], [0, 0, 0], [1, 2, 3], [0, 0, 0], [4, 5, 6], [0, 0, 0]], ) # Duplicate indices indices = np.array([[0], [0]]) values = np.array([1, 1]) self.assertAllClose(core.scatter(indices, values, (1,)), [2]) def test_scatter_update(self): # Test 1D. inputs = np.array([0, 0, 0, 0, 0, 0, 0, 0]) indices = [[1], [3], [4], [7]] updates = np.array([9, 10, 11, 12]) self.assertAllClose( core.scatter_update(inputs, indices, updates), [0, 9, 0, 10, 11, 0, 0, 12], ) # Test 2D. inputs = np.array([[1, 1], [1, 1], [1, 1]]) indices = [[0, 1], [2, 0]] updates = np.array([5, 10]) self.assertAllClose( core.scatter_update(inputs, indices, updates), [[1, 5], [1, 1], [10, 1]], ) # Test updates has multiple dimension. inputs = np.ones([4, 4, 4]) indices = [[1, 1], [2, 2]] updates = np.array([[0, 1, 2, 3], [3, 2, 1, 0]], dtype=np.float64) outputs = core.scatter_update(inputs, indices, updates) self.assertAllClose(outputs[1, 1, :], [0, 1, 2, 3]) self.assertAllClose(outputs[2, 2, :], [3, 2, 1, 0]) def test_slice(self): # Test 1D. inputs = np.arange(10) start_indices = np.array([1]) shape = np.array([4]) self.assertAllClose( core.slice(inputs, start_indices, shape), [1, 2, 3, 4], ) # Test 2D. inputs = np.broadcast_to(np.arange(10), (4, 10)) start_indices = np.array([1, 1]) shape = np.array([2, 4]) self.assertAllClose( core.slice(inputs, start_indices, shape), [[1, 2, 3, 4], [1, 2, 3, 4]], ) # Test N-D. inputs = np.broadcast_to(np.arange(10), (4, 4, 4, 10)) start_indices = np.array([1, 1, 1, 1]) shape = np.array([1, 2, 3, 4]) outputs = core.slice(inputs, start_indices, shape) expected = np.broadcast_to(np.arange(1, 5), (1, 2, 3, 4)) self.assertAllClose(outputs, expected) def test_dynamic_slice(self): def cond(index, inputs, sum): return index < 10 def body(index, inputs, sum): sum = sum + core.slice(inputs, [index], [1]) index = index + 1 return index, inputs, sum index, inputs, sum = 0, np.arange(10), np.array([0]) index, inputs, sum = core.while_loop(cond, body, (index, inputs, sum)) self.assertAllClose(sum, [45]) def test_slice_update(self): # Test 1D. inputs = np.array([0, 0, 0, 0, 0, 0, 0, 0]) start_indices = np.array([1]) updates = np.array([9, 10, 11, 12]) self.assertAllClose( core.slice_update(inputs, start_indices, updates), [0, 9, 10, 11, 12, 0, 0, 0], ) # Test 2D. inputs = np.array([[1, 1], [1, 1], [1, 1]]) start_indices = [1, 0] updates = np.array([[2, 2], [2, 2]]) self.assertAllClose( core.slice_update(inputs, start_indices, updates), [[1, 1], [2, 2], [2, 2]], ) # Test N-D. inputs = np.ones([4, 4, 4, 4]) start_indices = [1, 1, 2, 2] updates = np.zeros([2, 2, 2, 2]) outputs = core.slice_update(inputs, start_indices, updates) self.assertAllClose(outputs[1:3, 1:3, 2:4, 2:4], np.zeros([2, 2, 2, 2])) def test_while_loop(self): def cond(x, y): return x[0, 0] < 10 def body(x, y): return x + 1, y + 1 x = np.ones((2, 3)) y = np.ones((3, 2)) x, y = core.while_loop(cond, body, (x, y)) self.assertAllClose(x, np.ones((2, 3)) * 10) self.assertAllClose(y, np.ones((3, 2)) * 10) x = np.ones((2, 3)) y = np.ones((3, 2)) x, y = core.while_loop(cond, body, (x, y), maximum_iterations=5) self.assertAllClose(x, np.ones((2, 3)) * 6) self.assertAllClose(y, np.ones((3, 2)) * 6) def test_fori_loop(self): def body_fun(i, x): return x + i initial_value = np.array(0) result = core.fori_loop(0, 10, body_fun, initial_value) self.assertAllClose(result, 45) @pytest.mark.requires_trainable_backend def test_stop_gradient(self): class ExampleLayer(layers.Layer): def __init__(self): super().__init__() self.w = self.add_weight(shape=(1,), initializer="zeros") self.b = self.add_weight(shape=(1,), initializer="zeros") def call(self, x, training=False): return x * ops.stop_gradient(self.w.value) + self.b model = models.Sequential([ExampleLayer()]) model.compile( optimizer=optimizers.SGD(), loss=losses.MeanSquaredError() ) rng = np.random.default_rng(0) x = np.ones((2, 4), dtype=np.float32) y = rng.standard_normal((2, 4), dtype=np.float32) model.fit(x, y, epochs=1, batch_size=2) self.assertEqual(model.layers[0].w.numpy(), 0.0) self.assertNotEqual(model.layers[0].b.numpy(), 0.0) def test_stop_gradient_return(self): x = ops.random.uniform(shape=(2, 4), dtype="float32") y = ops.stop_gradient(x) self.assertAllClose(x, y) def test_shape(self): x = np.ones((2, 3, 7, 1)) self.assertAllEqual(core.shape(x), (2, 3, 7, 1)) x = KerasTensor((None, 3, None, 1)) self.assertAllEqual(core.shape(x), (None, 3, None, 1)) @pytest.mark.skipif( not backend.SUPPORTS_SPARSE_TENSORS, reason="Backend does not support sparse tensors.", ) def test_shape_sparse(self): import tensorflow as tf x = tf.SparseTensor( indices=[[0, 0], [1, 2]], values=[1.0, 2.0], dense_shape=(2, 3) ) self.assertAllEqual(core.shape(x), (2, 3)) def test_convert_to_tensor(self): x = np.ones((2,)) x = ops.convert_to_tensor(x) x = ops.convert_to_numpy(x) self.assertAllEqual(x, (1, 1)) self.assertIsInstance(x, np.ndarray) # Partially converted. x = ops.convert_to_tensor((1, ops.array(2), 3)) self.assertAllEqual(x, (1, 2, 3)) with self.assertRaises(ValueError): ops.convert_to_numpy(KerasTensor((2,))) @pytest.mark.skipif( not backend.SUPPORTS_SPARSE_TENSORS, reason="Backend does not support sparse tensors.", ) def test_convert_to_tensor_sparse(self): import tensorflow as tf x = tf.SparseTensor( indices=[[0, 0], [1, 2]], values=[1.0, 2.0], dense_shape=(2, 3) ) x_default = ops.convert_to_tensor(x) self.assertIsInstance(x_default, tf.SparseTensor) self.assertAllClose(x, x_default) # Note that ops.convert_to_tensor does not expose the 'sparse' arg x_sparse = backend.convert_to_tensor(x, sparse=True) self.assertIsInstance(x_sparse, tf.SparseTensor) self.assertAllClose(x, x_sparse) x_dense = backend.convert_to_tensor(x, sparse=False) self.assertNotIsInstance(x_dense, tf.SparseTensor) self.assertAllClose(x, x_dense) x_numpy = ops.convert_to_numpy(x) self.assertIsInstance(x_numpy, np.ndarray) self.assertAllClose(x_numpy, x_dense) def test_cond(self): t = ops.cond(True, lambda: 0, lambda: 1) self.assertEqual(t, 0) f = ops.cond(False, lambda: 0, lambda: 1) self.assertEqual(f, 1) f = ops.cond(False, lambda: None, lambda: None) self.assertEqual(f, None) for val in [True, False]: out = ops.cond( val, lambda: KerasTensor((16, 3)), lambda: KerasTensor((16, 3)), ) self.assertEqual((16, 3), out.shape) out = ops.cond( KerasTensor((), dtype="bool"), lambda: ops.ones((1, 3)), lambda: ops.zeros((1, 3)), ) self.assertEqual((1, 3), out.shape) out = ops.cond( KerasTensor((), dtype="bool"), lambda: KerasTensor((3,)), lambda: KerasTensor((3,)), ) self.assertEqual((3,), out.shape) with self.assertRaises(ValueError): ops.cond( KerasTensor((), dtype="bool"), lambda: KerasTensor((3,)), lambda: KerasTensor((4,)), ) def test_unstack(self): rng = np.random.default_rng(0) x = rng.uniform(size=(2, 3, 4)) x_tensor = ops.convert_to_tensor(x) axis = 1 out = ops.unstack(x_tensor, axis=axis) out_ex = [x[:, i, :] for i in range(x.shape[axis])] self.assertEqual(len(out), len(out_ex)) for o, o_e in zip(out, out_ex): o = ops.convert_to_numpy(o) self.assertAllClose(o, o_e) def test_cast(self): x = ops.ones((2,), dtype="float32") y = ops.cast(x, "float16") self.assertIn("float16", str(y.dtype)) x = ops.KerasTensor((2,), dtype="float32") y = ops.cast(x, "float16") self.assertEqual("float16", y.dtype) self.assertEqual(x.shape, y.shape) self.assertTrue(hasattr(y, "_keras_history"))
keras-core/keras_core/ops/core_test.py/0
{ "file_path": "keras-core/keras_core/ops/core_test.py", "repo_id": "keras-core", "token_count": 6936 }
32
import math import time import numpy as np import tensorflow as tf from absl.testing import parameterized from keras_core import testing from keras_core.trainers.data_adapters import py_dataset_adapter from keras_core.utils.rng_utils import set_random_seed class ExamplePyDataset(py_dataset_adapter.PyDataset): def __init__( self, x_set, y_set, sample_weight=None, batch_size=32, delay=0, **kwargs ): super().__init__(**kwargs) self.x, self.y = x_set, y_set self.batch_size = batch_size self.sample_weight = sample_weight self.delay = delay def __len__(self): return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): # Create artificial delay to test multiprocessing time.sleep(self.delay) # Return x, y for batch idx. low = idx * self.batch_size # Cap upper bound at array length; the last batch may be smaller # if the total number of items is not a multiple of batch size. high = min(low + self.batch_size, len(self.x)) batch_x = self.x[low:high] batch_y = self.y[low:high] if self.sample_weight is not None: return batch_x, batch_y, self.sample_weight[low:high] return batch_x, batch_y class PyDatasetAdapterTest(testing.TestCase, parameterized.TestCase): @parameterized.parameters( [ (True, 2, True, 10), (False, 2, True, 10), (True, 2, False, 10), (False, 2, False, 10), (True, 0, False, 0), (False, 0, False, 0), ] ) def test_basic_flow( self, shuffle, workers, use_multiprocessing, max_queue_size ): set_random_seed(1337) x = np.random.random((64, 4)) y = np.array([[i, i] for i in range(64)], dtype="float64") py_dataset = ExamplePyDataset( x, y, batch_size=16, workers=workers, use_multiprocessing=use_multiprocessing, max_queue_size=max_queue_size, ) adapter = py_dataset_adapter.PyDatasetAdapter( py_dataset, shuffle=shuffle ) gen = adapter.get_numpy_iterator() sample_order = [] for batch in gen: self.assertEqual(len(batch), 2) bx, by = batch self.assertIsInstance(bx, np.ndarray) self.assertIsInstance(by, np.ndarray) self.assertEqual(bx.dtype, by.dtype) self.assertEqual(bx.shape, (16, 4)) self.assertEqual(by.shape, (16, 2)) for i in range(by.shape[0]): sample_order.append(by[i, 0]) if shuffle: self.assertFalse(sample_order == list(range(64))) else: self.assertAllClose(sample_order, list(range(64))) ds = adapter.get_tf_dataset() sample_order = [] for batch in ds: self.assertEqual(len(batch), 2) bx, by = batch self.assertIsInstance(bx, tf.Tensor) self.assertIsInstance(by, tf.Tensor) self.assertEqual(bx.dtype, by.dtype) self.assertEqual(tuple(bx.shape), (16, 4)) self.assertEqual(tuple(by.shape), (16, 2)) for i in range(by.shape[0]): sample_order.append(by[i, 0]) if shuffle: self.assertFalse(sample_order == list(range(64))) else: self.assertAllClose(sample_order, list(range(64))) # TODO: test class_weight # TODO: test sample weights # TODO: test inference mode (single output) def test_speedup(self): x = np.random.random((40, 4)) y = np.random.random((40, 2)) py_dataset = ExamplePyDataset( x, y, batch_size=4, workers=4, use_multiprocessing=True, max_queue_size=8, delay=1.0, ) adapter = py_dataset_adapter.PyDatasetAdapter(py_dataset, shuffle=False) gen = adapter.get_numpy_iterator() t0 = time.time() for batch in gen: pass # With non-parallel iteration it should take at least 10s (+ overhead). # We check it took less than 8s. self.assertLess(time.time() - t0, 8)
keras-core/keras_core/trainers/data_adapters/py_dataset_adapter_test.py/0
{ "file_path": "keras-core/keras_core/trainers/data_adapters/py_dataset_adapter_test.py", "repo_id": "keras-core", "token_count": 2146 }
33
set -e set -x cd "${KOKORO_ROOT}/" sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 PYTHON_BINARY="/usr/bin/python3.9" "${PYTHON_BINARY}" -m venv venv source venv/bin/activate # Check the python version python --version python3 --version export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:" # Check cuda nvidia-smi nvcc --version cd "src/github/keras-cv" pip install -U pip setuptools if [ "${KERAS2:-0}" == "1" ] then echo "Keras2 detected." pip install -r requirements-common.txt --progress-bar off pip install tensorflow~=2.14 pip install --extra-index-url https://download.pytorch.org/whl/cpu torch==2.1.0+cpu pip install torchvision~=0.16.0 pip install "jax[cpu]" pip install keras-nlp-nightly --no-deps pip install tensorflow-text==2.15 elif [ "$KERAS_BACKEND" == "tensorflow" ] then echo "TensorFlow backend detected." pip install -r requirements-tensorflow-cuda.txt --progress-bar off pip install keras-nlp-nightly elif [ "$KERAS_BACKEND" == "jax" ] then echo "JAX backend detected." pip install -r requirements-jax-cuda.txt --progress-bar off pip install keras-nlp-nightly elif [ "$KERAS_BACKEND" == "torch" ] then echo "PyTorch backend detected." pip install -r requirements-torch-cuda.txt --progress-bar off pip install keras-nlp-nightly fi pip install --no-deps -e "." --progress-bar off # Run Extra Large Tests for Continuous builds if [ "${RUN_XLARGE:-0}" == "1" ] then pytest --cache-clear --check_gpu --run_large --run_extra_large --durations 0 \ keras_cv/bounding_box \ keras_cv/callbacks \ keras_cv/losses \ keras_cv/layers/object_detection \ keras_cv/layers/preprocessing \ keras_cv/models/backbones \ keras_cv/models/classification \ keras_cv/models/object_detection/retinanet \ keras_cv/models/object_detection/yolo_v8 \ keras_cv/models/object_detection_3d \ keras_cv/models/segmentation \ keras_cv/models/stable_diffusion else pytest --cache-clear --check_gpu --run_large --durations 0 \ keras_cv/bounding_box \ keras_cv/callbacks \ keras_cv/losses \ keras_cv/layers/object_detection \ keras_cv/layers/preprocessing \ keras_cv/models/backbones \ keras_cv/models/classification \ keras_cv/models/object_detection/retinanet \ keras_cv/models/object_detection/yolo_v8 \ keras_cv/models/object_detection_3d \ keras_cv/models/segmentation \ keras_cv/models/stable_diffusion fi
keras-cv/.kokoro/github/ubuntu/gpu/build.sh/0
{ "file_path": "keras-cv/.kokoro/github/ubuntu/gpu/build.sh", "repo_id": "keras-cv", "token_count": 1078 }
34
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow import keras from keras_cv import bounding_box from keras_cv.layers import RandomFlip from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 BOUNDING_BOXES, ) from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 IMAGES, ) # In order to support both unbatched and batched inputs, the horizontal # and vertical axis is reverse indexed H_AXIS = -3 W_AXIS = -2 # Defining modes for random flipping HORIZONTAL = "horizontal" VERTICAL = "vertical" HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical" class OldRandomFlip(BaseImageAugmentationLayer): """A preprocessing layer which randomly flips images. This layer will flip the images horizontally and or vertically based on the `mode` attribute. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format. Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format. Arguments: mode: String indicating which flip mode to use. Can be `"horizontal"`, `"vertical"`, or `"horizontal_and_vertical"`, defaults to `"horizontal"`. `"horizontal"` is a left-right flip and `"vertical"` is a top-bottom flip. seed: Integer. Used to create a random seed. bounding_box_format: The format of bounding boxes of input dataset. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py for more details on supported bounding box formats. """ def __init__( self, mode=HORIZONTAL, seed=None, bounding_box_format=None, **kwargs ): super().__init__(seed=seed, **kwargs) self.mode = mode self.seed = seed if mode == HORIZONTAL: self.horizontal = True self.vertical = False elif mode == VERTICAL: self.horizontal = False self.vertical = True elif mode == HORIZONTAL_AND_VERTICAL: self.horizontal = True self.vertical = True else: raise ValueError( "RandomFlip layer {name} received an unknown mode=" "{arg}".format(name=self.name, arg=mode) ) self.auto_vectorize = True self.bounding_box_format = bounding_box_format def augment_label(self, label, transformation, **kwargs): return label def augment_image(self, image, transformation, **kwargs): return OldRandomFlip._flip_image(image, transformation) def get_random_transformation(self, **kwargs): flip_horizontal = False flip_vertical = False if self.horizontal: flip_horizontal = self._random_generator.uniform(shape=[]) > 0.5 if self.vertical: flip_vertical = self._random_generator.uniform(shape=[]) > 0.5 return { "flip_horizontal": tf.cast(flip_horizontal, dtype=tf.bool), "flip_vertical": tf.cast(flip_vertical, dtype=tf.bool), } def _flip_image(image, transformation): flipped_output = tf.cond( transformation["flip_horizontal"], lambda: tf.image.flip_left_right(image), lambda: image, ) flipped_output = tf.cond( transformation["flip_vertical"], lambda: tf.image.flip_up_down(flipped_output), lambda: flipped_output, ) flipped_output.set_shape(image.shape) return flipped_output def _flip_bounding_boxes_horizontal(bounding_boxes): x1, x2, x3, x4 = tf.split( bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1 ) output = tf.stack( [ 1 - x3, x2, 1 - x1, x4, ], axis=-1, ) bounding_boxes = bounding_boxes.copy() bounding_boxes["boxes"] = tf.squeeze(output, axis=1) return bounding_boxes def _flip_bounding_boxes_vertical(bounding_boxes): x1, x2, x3, x4 = tf.split( bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1 ) output = tf.stack( [ x1, 1 - x4, x3, 1 - x2, ], axis=-1, ) output = tf.squeeze(output, axis=1) bounding_boxes = bounding_boxes.copy() bounding_boxes["boxes"] = output return bounding_boxes def augment_bounding_boxes( self, bounding_boxes, transformation=None, image=None, **kwargs ): if self.bounding_box_format is None: raise ValueError( "`RandomFlip()` was called with bounding boxes," "but no `bounding_box_format` was specified in the constructor." "Please specify a bounding box format in the constructor. i.e." "`RandomFlip(bounding_box_format='xyxy')`" ) bounding_boxes = bounding_boxes.copy() bounding_boxes = bounding_box.convert_format( bounding_boxes, source=self.bounding_box_format, target="rel_xyxy", images=image, ) bounding_boxes = tf.cond( transformation["flip_horizontal"], lambda: OldRandomFlip._flip_bounding_boxes_horizontal( bounding_boxes ), lambda: bounding_boxes, ) bounding_boxes = tf.cond( transformation["flip_vertical"], lambda: OldRandomFlip._flip_bounding_boxes_vertical(bounding_boxes), lambda: bounding_boxes, ) bounding_boxes = bounding_box.clip_to_image( bounding_boxes, bounding_box_format="rel_xyxy", images=image, ) bounding_boxes = bounding_box.convert_format( bounding_boxes, source="rel_xyxy", target=self.bounding_box_format, dtype=self.compute_dtype, images=image, ) return bounding_box.to_ragged(bounding_boxes) def augment_segmentation_mask( self, segmentation_mask, transformation=None, **kwargs ): return OldRandomFlip._flip_image(segmentation_mask, transformation) def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = { "mode": self.mode, "seed": self.seed, "bounding_box_format": self.bounding_box_format, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) class RandomFlipTest(tf.test.TestCase): def test_consistency_with_old_impl(self): mode = HORIZONTAL_AND_VERTICAL image = tf.random.uniform(shape=(1, 64, 64, 3)) * 255.0 layer = RandomFlip( mode=mode, ) old_layer = OldRandomFlip( mode=mode, ) with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=tf.convert_to_tensor([[0.6]]), ): output = layer(image) with unittest.mock.patch.object( old_layer._random_generator, "uniform", return_value=tf.convert_to_tensor(0.6), ): old_output = old_layer(image) self.assertAllClose(old_output, output) if __name__ == "__main__": # Run benchmark (x_train, _), _ = keras.datasets.cifar10.load_data() x_train = x_train.astype(np.float32) is_inputs_containing_bounding_boxes = False num_images = [100, 200, 500, 1000] results = {} aug_candidates = [RandomFlip, OldRandomFlip] aug_args = { "mode": HORIZONTAL_AND_VERTICAL, "bounding_box_format": "xyxy", } for aug in aug_candidates: # Eager Mode c = aug.__name__ layer = aug(**aug_args) runtimes = [] print(f"Timing {c}") for n_images in num_images: inputs = {IMAGES: x_train[:n_images]} if is_inputs_containing_bounding_boxes: inputs.update( { BOUNDING_BOXES: { "classes": tf.zeros(shape=(n_images, 4)), "boxes": tf.zeros(shape=(n_images, 4, 4)), } } ) # warmup layer(inputs) t0 = time.time() r1 = layer(inputs) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes # Graph Mode c = aug.__name__ + " Graph Mode" layer = aug(**aug_args) @tf.function() def apply_aug(inputs): return layer(inputs) runtimes = [] print(f"Timing {c}") for n_images in num_images: inputs = {IMAGES: x_train[:n_images]} if is_inputs_containing_bounding_boxes: inputs.update( { BOUNDING_BOXES: { "classes": tf.zeros(shape=(n_images, 4)), "boxes": tf.zeros(shape=(n_images, 4, 4)), } } ) # warmup apply_aug(inputs) t0 = time.time() r1 = apply_aug(inputs) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes # XLA Mode # OldRandomFlip fails to run on XLA if aug is OldRandomFlip: continue c = aug.__name__ + " XLA Mode" layer = aug(**aug_args) @tf.function(jit_compile=True) def apply_aug(inputs): return layer(inputs) runtimes = [] print(f"Timing {c}") for n_images in num_images: inputs = {IMAGES: x_train[:n_images]} if is_inputs_containing_bounding_boxes: inputs.update( { BOUNDING_BOXES: { "classes": tf.zeros(shape=(n_images, 4)), "boxes": tf.zeros(shape=(n_images, 4, 4)), } } ) # warmup apply_aug(inputs) t0 = time.time() r1 = apply_aug(inputs) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison.png") # So we can actually see more relevant margins del results[aug_candidates[1].__name__] plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison_no_old_eager.png") # Run unit tests tf.test.main()
keras-cv/benchmarks/vectorized_random_flip.py/0
{ "file_path": "keras-cv/benchmarks/vectorized_random_flip.py", "repo_id": "keras-cv", "token_count": 6054 }
35
"""Setup TensorFlow as external dependency""" _TF_HEADER_DIR = "TF_HEADER_DIR" _TF_SHARED_LIBRARY_DIR = "TF_SHARED_LIBRARY_DIR" _TF_SHARED_LIBRARY_NAME = "TF_SHARED_LIBRARY_NAME" _TF_CXX11_ABI_FLAG = "TF_CXX11_ABI_FLAG" _TF_CPLUSPLUS_VER = "TF_CPLUSPLUS_VER" def _tpl(repository_ctx, tpl, substitutions = {}, out = None): if not out: out = tpl repository_ctx.template( out, Label("//build_deps/tf_dependency:%s.tpl" % tpl), substitutions, ) def _fail(msg): """Output failure message when auto configuration fails.""" red = "\033[0;31m" no_color = "\033[0m" fail("%sPython Configuration Error:%s %s\n" % (red, no_color, msg)) def _is_windows(repository_ctx): """Returns true if the host operating system is windows.""" os_name = repository_ctx.os.name.lower() if os_name.find("windows") != -1: return True return False def _execute( repository_ctx, cmdline, error_msg = None, error_details = None, empty_stdout_fine = False): """Executes an arbitrary shell command. Helper for executes an arbitrary shell command. Args: repository_ctx: the repository_ctx object. cmdline: list of strings, the command to execute. error_msg: string, a summary of the error if the command fails. error_details: string, details about the error or steps to fix it. empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise it's an error. Returns: The result of repository_ctx.execute(cmdline). """ result = repository_ctx.execute(cmdline) if result.stderr or not (empty_stdout_fine or result.stdout): _fail("\n".join([ error_msg.strip() if error_msg else "Repository command failed", result.stderr.strip(), error_details if error_details else "", ])) return result def _read_dir(repository_ctx, src_dir): """Returns a string with all files in a directory. Finds all files inside a directory, traversing subfolders and following symlinks. The returned string contains the full path of all files separated by line breaks. Args: repository_ctx: the repository_ctx object. src_dir: directory to find files from. Returns: A string of all files inside the given dir. """ if _is_windows(repository_ctx): src_dir = src_dir.replace("/", "\\") find_result = _execute( repository_ctx, ["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"], empty_stdout_fine = True, ) # src_files will be used in genrule.outs where the paths must # use forward slashes. result = find_result.stdout.replace("\\", "/") else: find_result = _execute( repository_ctx, ["find", src_dir, "-follow", "-type", "f"], empty_stdout_fine = True, ) result = find_result.stdout return result def _genrule(genrule_name, command, outs): """Returns a string with a genrule. Genrule executes the given command and produces the given outputs. Args: genrule_name: A unique name for genrule target. command: The command to run. outs: A list of files generated by this rule. Returns: A genrule target. """ return ( "genrule(\n" + ' name = "' + genrule_name + '",\n' + " outs = [\n" + outs + "\n ],\n" + ' cmd = """\n' + command + '\n """,\n' + ")\n" ) def _norm_path(path): """Returns a path with '/' and remove the trailing slash.""" path = path.replace("\\", "/") if path[-1] == "/": path = path[:-1] return path def _symlink_genrule_for_dir( repository_ctx, src_dir, dest_dir, genrule_name, src_files = [], dest_files = [], tf_pip_dir_rename_pair = []): """Returns a genrule to symlink(or copy if on Windows) a set of files. If src_dir is passed, files will be read from the given directory; otherwise we assume files are in src_files and dest_files. Args: repository_ctx: the repository_ctx object. src_dir: source directory. dest_dir: directory to create symlink in. genrule_name: genrule name. src_files: list of source files instead of src_dir. dest_files: list of corresponding destination files. tf_pip_dir_rename_pair: list of the pair of tf pip parent directory to replace. For example, in TF pip package, the source code is under "tensorflow_core", and we might want to replace it with "tensorflow" to match the header includes. Returns: genrule target that creates the symlinks. """ # Check that tf_pip_dir_rename_pair has the right length tf_pip_dir_rename_pair_len = len(tf_pip_dir_rename_pair) if tf_pip_dir_rename_pair_len != 0 and tf_pip_dir_rename_pair_len != 2: _fail("The size of argument tf_pip_dir_rename_pair should be either 0 or 2, but %d is given." % tf_pip_dir_rename_pair_len) if src_dir != None: src_dir = _norm_path(src_dir) dest_dir = _norm_path(dest_dir) files = "\n".join(sorted(_read_dir(repository_ctx, src_dir).splitlines())) # Create a list with the src_dir stripped to use for outputs. if tf_pip_dir_rename_pair_len: dest_files = files.replace(src_dir, "").replace(tf_pip_dir_rename_pair[0], tf_pip_dir_rename_pair[1]).splitlines() else: dest_files = files.replace(src_dir, "").splitlines() src_files = files.splitlines() command = [] outs = [] for i in range(len(dest_files)): if dest_files[i] != "": # If we have only one file to link we do not want to use the dest_dir, as # $(@D) will include the full path to the file. dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i] # Copy the headers to create a sandboxable setup. cmd = "cp -f" command.append(cmd + ' "%s" "%s"' % (src_files[i], dest)) outs.append(' "' + dest_dir + dest_files[i] + '",') genrule = _genrule( genrule_name, ";\n".join(command), "\n".join(outs), ) return genrule def _tf_pip_impl(repository_ctx): tf_header_dir = repository_ctx.os.environ[_TF_HEADER_DIR] tf_header_rule = _symlink_genrule_for_dir( repository_ctx, tf_header_dir, "include", "tf_header_include", tf_pip_dir_rename_pair = ["tensorflow_core", "tensorflow"], ) tf_shared_library_dir = repository_ctx.os.environ[_TF_SHARED_LIBRARY_DIR] tf_shared_library_name = repository_ctx.os.environ[_TF_SHARED_LIBRARY_NAME] tf_shared_library_path = "%s/%s" % (tf_shared_library_dir, tf_shared_library_name) tf_cx11_abi = "-D_GLIBCXX_USE_CXX11_ABI=%s" % (repository_ctx.os.environ[_TF_CXX11_ABI_FLAG]) tf_cplusplus_ver = "-std=%s" % repository_ctx.os.environ[_TF_CPLUSPLUS_VER] tf_shared_library_rule = _symlink_genrule_for_dir( repository_ctx, None, "", tf_shared_library_name, [tf_shared_library_path], [tf_shared_library_name], ) _tpl(repository_ctx, "BUILD", { "%{TF_HEADER_GENRULE}": tf_header_rule, "%{TF_SHARED_LIBRARY_GENRULE}": tf_shared_library_rule, "%{TF_SHARED_LIBRARY_NAME}": tf_shared_library_name, }) _tpl( repository_ctx, "build_defs.bzl", { "%{tf_cx11_abi}": tf_cx11_abi, "%{tf_cplusplus_ver}": tf_cplusplus_ver, }, ) tf_configure = repository_rule( environ = [ _TF_HEADER_DIR, _TF_SHARED_LIBRARY_DIR, _TF_SHARED_LIBRARY_NAME, _TF_CXX11_ABI_FLAG, _TF_CPLUSPLUS_VER, ], implementation = _tf_pip_impl, )
keras-cv/build_deps/tf_dependency/tf_configure.bzl/0
{ "file_path": "keras-cv/build_deps/tf_dependency/tf_configure.bzl", "repo_id": "keras-cv", "token_count": 3640 }
36