text
stringlengths
5
261k
id
stringlengths
16
106
metadata
dict
__index_level_0__
int64
0
266
class NumpyLayer: pass
keras/keras/backend/numpy/layer.py/0
{ "file_path": "keras/keras/backend/numpy/layer.py", "repo_id": "keras", "token_count": 11 }
137
import functools import itertools import operator import torch from keras.backend.torch.core import convert_to_tensor RESIZE_INTERPOLATIONS = {} # populated after torchvision import UNSUPPORTED_INTERPOLATIONS = ( "lanczos3", "lanczos5", ) def resize( image, size, interpolation="bilinear", antialias=False, data_format="channels_last", ): try: import torchvision from torchvision.transforms import InterpolationMode as im RESIZE_INTERPOLATIONS.update( { "bilinear": im.BILINEAR, "nearest": im.NEAREST_EXACT, "bicubic": im.BICUBIC, } ) except: raise ImportError( "The torchvision package is necessary to use `resize` with the " "torch backend. Please install torchvision." ) if interpolation in UNSUPPORTED_INTERPOLATIONS: raise ValueError( "Resizing with Lanczos interpolation is " "not supported by the PyTorch backend. " f"Received: interpolation={interpolation}." ) if interpolation not in RESIZE_INTERPOLATIONS: raise ValueError( "Invalid value for argument `interpolation`. Expected of one " f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}" ) if not len(size) == 2: raise ValueError( "Argument `size` must be a tuple of two elements " f"(height, width). Received: size={size}" ) size = tuple(size) image = convert_to_tensor(image) if data_format == "channels_last": if image.ndim == 4: image = image.permute((0, 3, 1, 2)) elif image.ndim == 3: image = image.permute((2, 0, 1)) else: raise ValueError( "Invalid input rank: expected rank 3 (single image) " "or rank 4 (batch of images). Received input with shape: " f"image.shape={image.shape}" ) resized = torchvision.transforms.functional.resize( img=image, size=size, interpolation=RESIZE_INTERPOLATIONS[interpolation], antialias=antialias, ) if data_format == "channels_last": if len(image.shape) == 4: resized = resized.permute((0, 2, 3, 1)) elif len(image.shape) == 3: resized = resized.permute((1, 2, 0)) return resized AFFINE_TRANSFORM_INTERPOLATIONS = { "nearest": 0, "bilinear": 1, } AFFINE_TRANSFORM_FILL_MODES = { "constant", "nearest", "wrap", "mirror", "reflect", } def affine_transform( image, transform, interpolation="bilinear", fill_mode="constant", fill_value=0, data_format="channels_last", ): if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys(): raise ValueError( "Invalid value for argument `interpolation`. Expected of one " f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: " f"interpolation={interpolation}" ) if fill_mode not in AFFINE_TRANSFORM_FILL_MODES: raise ValueError( "Invalid value for argument `fill_mode`. Expected of one " f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}" ) image = convert_to_tensor(image) transform = convert_to_tensor(transform) if image.ndim not in (3, 4): raise ValueError( "Invalid image rank: expected rank 3 (single image) " "or rank 4 (batch of images). Received input with shape: " f"image.shape={image.shape}" ) if transform.ndim not in (1, 2): raise ValueError( "Invalid transform rank: expected rank 1 (single transform) " "or rank 2 (batch of transforms). Received input with shape: " f"transform.shape={transform.shape}" ) # unbatched case need_squeeze = False if image.ndim == 3: image = image.unsqueeze(dim=0) need_squeeze = True if transform.ndim == 1: transform = transform.unsqueeze(dim=0) if data_format == "channels_first": image = image.permute((0, 2, 3, 1)) batch_size = image.shape[0] # get indices meshgrid = torch.meshgrid( *[ torch.arange(size, dtype=transform.dtype, device=transform.device) for size in image.shape[1:] ], indexing="ij", ) indices = torch.concatenate( [torch.unsqueeze(x, dim=-1) for x in meshgrid], dim=-1 ) indices = torch.tile(indices, (batch_size, 1, 1, 1, 1)) # swap the values a0 = transform[:, 0].clone() a2 = transform[:, 2].clone() b1 = transform[:, 4].clone() b2 = transform[:, 5].clone() transform[:, 0] = b1 transform[:, 2] = b2 transform[:, 4] = a0 transform[:, 5] = a2 # deal with transform transform = torch.nn.functional.pad( transform, pad=[0, 1, 0, 0], mode="constant", value=1 ) transform = torch.reshape(transform, (batch_size, 3, 3)) offset = transform[:, 0:2, 2].clone() offset = torch.nn.functional.pad(offset, pad=[0, 1, 0, 0]) transform[:, 0:2, 2] = 0 # transform the indices coordinates = torch.einsum("Bhwij, Bjk -> Bhwik", indices, transform) coordinates = torch.moveaxis(coordinates, source=-1, destination=1) coordinates += torch.reshape(a=offset, shape=(*offset.shape, 1, 1, 1)) # Note: torch.stack is faster than torch.vmap when the batch size is small. affined = torch.stack( [ map_coordinates( image[i], coordinates[i], order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation], fill_mode=fill_mode, fill_value=fill_value, ) for i in range(len(image)) ], ) if data_format == "channels_first": affined = affined.permute((0, 3, 1, 2)) if need_squeeze: affined = affined.squeeze(dim=0) return affined def _mirror_index_fixer(index, size): s = size - 1 # Half-wavelength of triangular wave # Scaled, integer-valued version of the triangular wave |x - round(x)| return torch.abs((index + s) % (2 * s) - s) def _reflect_index_fixer(index, size): return torch.floor_divide( _mirror_index_fixer(2 * index + 1, 2 * size + 1) - 1, 2 ) _INDEX_FIXERS = { # we need to take care of out-of-bound indices in torch "constant": lambda index, size: torch.clip(index, 0, size - 1), "nearest": lambda index, size: torch.clip(index, 0, size - 1), "wrap": lambda index, size: index % size, "mirror": _mirror_index_fixer, "reflect": _reflect_index_fixer, } def _is_integer(a): if not torch.is_floating_point(a) and not torch.is_complex(a): return True return False def _nearest_indices_and_weights(coordinate): coordinate = ( coordinate if _is_integer(coordinate) else torch.round(coordinate) ) index = coordinate.to(torch.int32) return [(index, 1)] def _linear_indices_and_weights(coordinate): lower = torch.floor(coordinate) upper_weight = coordinate - lower lower_weight = 1 - upper_weight index = lower.to(torch.int32) return [(index, lower_weight), (index + 1, upper_weight)] def map_coordinates( input, coordinates, order, fill_mode="constant", fill_value=0.0 ): input_arr = convert_to_tensor(input) coordinate_arrs = [convert_to_tensor(c) for c in coordinates] # skip tensor creation as possible if isinstance(fill_value, (int, float)) and _is_integer(input_arr): fill_value = int(fill_value) if len(coordinates) != len(input_arr.shape): raise ValueError( "coordinates must be a sequence of length input.shape, but " f"{len(coordinates)} != {len(input_arr.shape)}" ) index_fixer = _INDEX_FIXERS.get(fill_mode) if index_fixer is None: raise ValueError( "Invalid value for argument `fill_mode`. Expected one of " f"{set(_INDEX_FIXERS.keys())}. Received: fill_mode={fill_mode}" ) if order == 0: interp_fun = _nearest_indices_and_weights elif order == 1: interp_fun = _linear_indices_and_weights else: raise NotImplementedError("map_coordinates currently requires order<=1") if fill_mode == "constant": def is_valid(index, size): return (0 <= index) & (index < size) else: def is_valid(index, size): return True valid_1d_interpolations = [] for coordinate, size in zip(coordinate_arrs, input_arr.shape): interp_nodes = interp_fun(coordinate) valid_interp = [] for index, weight in interp_nodes: fixed_index = index_fixer(index, size) valid = is_valid(index, size) valid_interp.append((fixed_index, valid, weight)) valid_1d_interpolations.append(valid_interp) outputs = [] for items in itertools.product(*valid_1d_interpolations): indices, validities, weights = zip(*items) if all(valid is True for valid in validities): # fast path contribution = input_arr[indices] else: all_valid = functools.reduce(operator.and_, validities) contribution = torch.where( all_valid, input_arr[indices], fill_value ) outputs.append(functools.reduce(operator.mul, weights) * contribution) result = functools.reduce(operator.add, outputs) if _is_integer(input_arr): result = result if _is_integer(result) else torch.round(result) return result.to(input_arr.dtype)
keras/keras/backend/torch/image.py/0
{ "file_path": "keras/keras/backend/torch/image.py", "repo_id": "keras", "token_count": 4296 }
138
import torch from keras import ops from keras import optimizers from keras.backend.torch.optimizers import torch_parallel_optimizer class RMSprop( torch_parallel_optimizer.TorchParallelOptimizer, optimizers.RMSprop ): def _parallel_update_step( self, grads, variables, learning_rate, ): keras_variables = variables variables = [v.value for v in variables] dtype = variables[0].dtype lr = ops.cast(learning_rate, dtype) velocities = [ self._velocities[self._get_variable_index(variable)].value for variable in keras_variables ] rho = self.rho torch._foreach_mul_(velocities, rho) torch._foreach_add_( velocities, torch._foreach_mul(grads, grads), alpha=1 - rho ) denominators = torch._foreach_add(velocities, self.epsilon) if self.centered: average_grads = [ self._average_gradients[ self._get_variable_index(variable) ].value for variable in keras_variables ] torch._foreach_mul_(average_grads, rho) torch._foreach_add_(average_grads, grads, alpha=1 - rho) torch._foreach_add_( denominators, torch._foreach_mul(average_grads, average_grads), alpha=-1, ) torch._foreach_sqrt_(denominators) increments = torch._foreach_div( torch._foreach_mul(grads, lr), denominators ) if self.momentum > 0: momentum_list = [ self._momentums[self._get_variable_index(variable)].value for variable in keras_variables ] torch._foreach_mul_(momentum_list, self.momentum) torch._foreach_add_(momentum_list, increments) torch._foreach_add_(variables, momentum_list, alpha=-1) else: torch._foreach_add_(variables, increments, alpha=-1)
keras/keras/backend/torch/optimizers/torch_rmsprop.py/0
{ "file_path": "keras/keras/backend/torch/optimizers/torch_rmsprop.py", "repo_id": "keras", "token_count": 1015 }
139
from keras.api_export import keras_export from keras.callbacks.callback import Callback @keras_export("keras.callbacks.LambdaCallback") class LambdaCallback(Callback): """Callback for creating simple, custom callbacks on-the-fly. This callback is constructed with anonymous functions that will be called at the appropriate time (during `Model.{fit | evaluate | predict}`). Note that the callbacks expects positional arguments, as: - `on_epoch_begin` and `on_epoch_end` expect two positional arguments: `epoch`, `logs` - `on_train_begin` and `on_train_end` expect one positional argument: `logs` - `on_train_batch_begin` and `on_train_batch_end` expect two positional arguments: `batch`, `logs` - See `Callback` class definition for the full list of functions and their expected arguments. Args: on_epoch_begin: called at the beginning of every epoch. on_epoch_end: called at the end of every epoch. on_train_begin: called at the beginning of model training. on_train_end: called at the end of model training. on_train_batch_begin: called at the beginning of every train batch. on_train_batch_end: called at the end of every train batch. kwargs: Any function in `Callback` that you want to override by passing `function_name=function`. For example, `LambdaCallback(.., on_train_end=train_end_fn)`. The custom function needs to have same arguments as the ones defined in `Callback`. Example: ```python # Print the batch number at the beginning of every batch. batch_print_callback = LambdaCallback( on_train_batch_begin=lambda batch,logs: print(batch)) # Stream the epoch loss to a file in JSON format. The file content # is not well-formed JSON but rather has a JSON object per line. import json json_log = open('loss_log.json', mode='wt', buffering=1) json_logging_callback = LambdaCallback( on_epoch_end=lambda epoch, logs: json_log.write( json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'), on_train_end=lambda logs: json_log.close() ) # Terminate some processes after having finished model training. processes = ... cleanup_callback = LambdaCallback( on_train_end=lambda logs: [ p.terminate() for p in processes if p.is_alive()]) model.fit(..., callbacks=[batch_print_callback, json_logging_callback, cleanup_callback]) ``` """ def __init__( self, on_epoch_begin=None, on_epoch_end=None, on_train_begin=None, on_train_end=None, on_train_batch_begin=None, on_train_batch_end=None, **kwargs, ): super().__init__() self.__dict__.update(kwargs) if on_epoch_begin is not None: self.on_epoch_begin = on_epoch_begin if on_epoch_end is not None: self.on_epoch_end = on_epoch_end if on_train_begin is not None: self.on_train_begin = on_train_begin if on_train_end is not None: self.on_train_end = on_train_end if on_train_batch_begin is not None: self.on_train_batch_begin = on_train_batch_begin if on_train_batch_end is not None: self.on_train_batch_end = on_train_batch_end
keras/keras/callbacks/lambda_callback.py/0
{ "file_path": "keras/keras/callbacks/lambda_callback.py", "repo_id": "keras", "token_count": 1402 }
140
"""Test for distribution_lib.py.""" import os from unittest import mock import numpy as np import pytest import tensorflow as tf from keras import backend from keras import testing from keras.backend import distribution_lib as backend_dlib from keras.distribution import distribution_lib @pytest.mark.skipif( backend.backend() != "jax", reason="Only JAX has the backend to mock at the moment", ) @mock.patch.object( backend_dlib, "initialize", return_value=None, ) class MultiProcessInitializeTest(testing.TestCase): def tearDown(self): super().tearDown() os.environ.clear() def test_initialize_with_explicit_param(self, mock_backend_initialize): job_addresses = "10.0.0.1:1234,10.0.0.2:2345" num_processes = 2 current_process_id = 0 distribution_lib.initialize( job_addresses, num_processes, current_process_id ) mock_backend_initialize.assert_called_once_with( job_addresses, num_processes, current_process_id ) def test_initialize_with_env_vars(self, mock_backend_initialize): job_addresses = "10.0.0.1:1234,10.0.0.2:2345" num_processes = 2 current_process_id = 0 os.environ["KERAS_DISTRIBUTION_JOB_ADDRESSES"] = job_addresses os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = str(num_processes) os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = str(current_process_id) distribution_lib.initialize() mock_backend_initialize.assert_called_once_with( job_addresses, num_processes, current_process_id ) def test_init_with_nones(self, mock_backend_initialize): # This is also valid case for Cloud TPU on JAX distribution_lib.initialize() mock_backend_initialize.assert_called_once_with(None, None, None) class DeviceMeshTest(testing.TestCase): def test_mesh_creation(self): devices = [f"cpu:{i}" for i in range(8)] shape = (4, 2) axis_names = ["batch", "model"] mesh = distribution_lib.DeviceMesh(shape, axis_names, devices) self.assertEqual(mesh.shape, shape) self.assertEqual(mesh.axis_names, axis_names) self.assertEqual(mesh.devices.shape, shape) def test_input_validation(self): devices = [f"cpu:{i}" for i in range(4)] with self.assertRaisesRegex( ValueError, "Shape and axis_names cannot be empty" ): distribution_lib.DeviceMesh((4,), "", devices) with self.assertRaisesRegex( ValueError, "Shape and axis_names should have same size" ): distribution_lib.DeviceMesh((4, 2), ["batch"], devices) with self.assertRaisesRegex( ValueError, "Shape does not match the number of devices" ): distribution_lib.DeviceMesh((4, 2), ["batch", "model"], devices) class TensorLayoutTest(testing.TestCase): def setUp(self): self.mesh = distribution_lib.DeviceMesh( (4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)] ) def test_tensor_layout_creation(self): axes = ("data", None) layout = distribution_lib.TensorLayout(axes, self.mesh) self.assertEqual(layout.device_mesh, self.mesh) self.assertEqual(layout.axes, axes) def test_tensor_layout_validation(self): axes = ("data", "unknown", None) with self.assertRaisesRegex( ValueError, "Invalid axis names for Layout" ): distribution_lib.TensorLayout(axes, self.mesh) def test_lazy_device_mesh_injection(self): axes = ("data", None) layout = distribution_lib.TensorLayout(axes, None) self.assertIsNone(layout.device_mesh) self.assertEqual(layout.axes, axes) layout.device_mesh = self.mesh self.assertEqual(layout.device_mesh, self.mesh) self.assertEqual(layout.axes, axes) def test_lazy_device_mesh_validation(self): axes = ("data", "unknown", None) layout = distribution_lib.TensorLayout(axes, None) self.assertIsNone(layout.device_mesh) self.assertEqual(layout.axes, axes) with self.assertRaisesRegex( ValueError, "Invalid axis names for Layout" ): layout.device_mesh = self.mesh class DistributionTest(testing.TestCase): def setUp(self): super().setUp() devices = [f"cpu:{i}" for i in range(8)] shape = (4, 2) axis_names = ["batch", "model"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, devices ) def test_init_with_device_mesh(self): distribution = distribution_lib.Distribution(self.device_mesh) self.assertIs(distribution.device_mesh, self.device_mesh) def test_scope(self): distribution_1 = distribution_lib.Distribution(self.device_mesh) distribution_2 = distribution_lib.Distribution(self.device_mesh) self.assertIsNone(distribution_lib.distribution()) with distribution_1.scope(): self.assertIs(distribution_lib.distribution(), distribution_1) with distribution_2.scope(): self.assertIs(distribution_lib.distribution(), distribution_2) self.assertIs(distribution_lib.distribution(), distribution_1) self.assertIsNone(distribution_lib.distribution()) @pytest.mark.skipif( backend.backend() != "jax", reason="Only JAX has the proper backend distribution lib", ) class DataParallelDistributionTest(testing.TestCase): def setUp(self): super().setUp() self.devices = [f"cpu:{i}" for i in range(8)] shape = (8,) axis_names = ["data"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, self.devices ) def test_create_with_device_mesh(self): distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) device_mesh = distribution.device_mesh self.assertEqual(len(device_mesh.devices), 8) self.assertEqual(device_mesh.axis_names, ["data"]) self.assertEqual(distribution._batch_dim_name, "data") self.assertFalse(distribution._is_multi_process) self.assertEqual(distribution._process_id, 0) self.assertEqual(distribution._num_process, 1) def test_create_with_devices(self): distribution = distribution_lib.DataParallel(devices=self.devices) device_mesh = distribution.device_mesh self.assertEqual(len(device_mesh.devices), 8) self.assertEqual(device_mesh.axis_names, ["batch"]) self.assertEqual(distribution._batch_dim_name, "batch") @mock.patch.object( distribution_lib, "list_devices", return_value=[f"cpu:{i}" for i in range(8)], ) def test_create_with_list_devices(self, mock_list_devices): distribution = distribution_lib.DataParallel() mock_list_devices.assert_called_once() device_mesh = distribution.device_mesh self.assertEqual(len(device_mesh.devices), 8) self.assertEqual(device_mesh.axis_names, ["batch"]) self.assertEqual(distribution._batch_dim_name, "batch") def test_get_data_layout(self): distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) data = np.arange(16).reshape((4, 2, 2)) data_layout = distribution.get_data_layout(data.shape) self.assertIs(data_layout.device_mesh, self.device_mesh) self.assertEqual(data_layout.axes, ("data", None, None)) def test_get_variable_layout(self): distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) variable = backend.Variable(initializer=[1, 2, 3]) variable_layout = distribution.get_variable_layout(variable) self.assertIs(variable_layout.device_mesh, self.device_mesh) self.assertEqual(variable_layout.axes, (None,)) def test_get_tensor_layout(self): distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) path = "path/to/tensor" tensor_layout = distribution.get_tensor_layout(path) self.assertIsNone(tensor_layout) def test_distribute_dataset(self): # We can only verify the single worker/process case in OSS for now. dataset = tf.data.Dataset.range(8) distribution = distribution_lib.DataParallel( device_mesh=self.device_mesh ) distributed_dataset = distribution.distribute_dataset(dataset) self.assertIs(dataset, distributed_dataset) @pytest.mark.skipif( backend.backend() != "jax", reason="Only JAX has the proper backend distribution lib", ) class ModelParallelDistributionTest(testing.TestCase): def setUp(self): super().setUp() self.devices = [f"cpu:{i}" for i in range(8)] shape = (2, 4) axis_names = ["data", "model"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, self.devices ) def test_distribute_weights(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"]) layout_map[".*bias"] = distribution_lib.TensorLayout(["model"]) distribution = distribution_lib.ModelParallel( self.device_mesh, layout_map, batch_dim_name="data" ) kernel = backend.Variable(initializer=np.arange(8, 4), name="kernel") bias = backend.Variable(initializer=np.arange(4), name="bias") rng_seed = backend.Variable(initializer=[0, 1], name="seed") kernel_layout = distribution.get_variable_layout(kernel) self.assertIs(kernel_layout.device_mesh, self.device_mesh) self.assertEqual(kernel_layout.axes, (None, "model")) bias_layout = distribution.get_variable_layout(bias) self.assertIs(bias_layout.device_mesh, self.device_mesh) self.assertEqual(bias_layout.axes, ("model",)) rng_seed_layout = distribution.get_variable_layout(rng_seed) self.assertIs(rng_seed_layout.device_mesh, self.device_mesh) self.assertEqual(rng_seed_layout.axes, (None,)) def test_distribute_data(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) distribution = distribution_lib.ModelParallel( self.device_mesh, layout_map, batch_dim_name="data" ) data = np.arange(16).reshape((4, 2, 2)) data_layout = distribution.get_data_layout(data.shape) self.assertIs(data_layout.device_mesh, self.device_mesh) self.assertEqual(data_layout.axes, ("data", None, None)) def test_get_tensor_layout(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"]) layout_map[".*bias"] = distribution_lib.TensorLayout(["model"]) layout_map["/model/layer/tensor"] = ("data", None) distribution = distribution_lib.ModelParallel( self.device_mesh, layout_map, batch_dim_name="data" ) layout = distribution.get_tensor_layout("/model/layer/tensor") self.assertIs(layout.device_mesh, self.device_mesh) self.assertEqual(layout.axes, ("data", None)) layout = distribution.get_tensor_layout("/model/layer/other_tensor") self.assertIsNone(layout) def test_distribute_dataset(self): # We can only verify the single worker/process case in OSS for now. dataset = tf.data.Dataset.range(8) distribution = distribution = distribution_lib.ModelParallel( self.device_mesh, {}, batch_dim_name="data" ) distributed_dataset = distribution.distribute_dataset(dataset) self.assertIs(dataset, distributed_dataset) class LayoutMapTest(testing.TestCase): def setUp(self): super().setUp() self.devices = [f"cpu:{i}" for i in range(8)] shape = (4, 2) axis_names = ["data", "model"] self.device_mesh = distribution_lib.DeviceMesh( shape, axis_names, self.devices ) self.sharded_2d = distribution_lib.TensorLayout([None, "model"]) self.sharded_1d = distribution_lib.TensorLayout(["model"]) self.replicated_2d = distribution_lib.TensorLayout([None, None]) self.replicated_1d = distribution_lib.TensorLayout([None]) def test_add(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d # Test for adding list/tuple as shortcut for TensorLayout layout_map["conv/bias"] = ("model",) # Make there are two items in the map, and we access them via the # underlying container at layout_map._layout_map self.assertLen(layout_map, 3) kernel_layout = layout_map["dense/kernel"] self.assertEqual(kernel_layout.axes, (None, "model")) self.assertIs(kernel_layout.device_mesh, self.device_mesh) bias_layout = layout_map["dense/bias"] self.assertEqual(bias_layout.axes, ("model",)) self.assertIs(bias_layout.device_mesh, self.device_mesh) conv_bias_layout = layout_map["conv/bias"] self.assertEqual(conv_bias_layout.axes, ("model",)) self.assertIs(bias_layout.device_mesh, self.device_mesh) with self.assertRaisesRegex(ValueError, "dense/kernel already exist"): layout_map["dense/kernel"] = self.sharded_2d with self.assertRaisesRegex(ValueError, "should be a TensorLayout"): layout_map["conv.kernel"] = ["a", "b"] def test_get(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d layout_map["dense.*kernel"] = self.replicated_2d layout_map["dense.*bias"] = self.replicated_1d layout_map["bias"] = self.sharded_1d self.assertEqual(layout_map["dense/kernel"], self.sharded_2d) self.assertEqual(layout_map["dense/bias"], self.sharded_1d) self.assertEqual(layout_map["dense_2/kernel"], self.replicated_2d) # Map against the wildcard bias rule for dense. This will cause a # ValueError with self.assertRaisesRegex( ValueError, "Path 'dense_2/bias' matches multiple layout" ): layout_map["dense_2/bias"] self.assertIsNone(layout_map["conv2d/kernel"]) self.assertEqual(layout_map["conv2d/bias"], self.sharded_1d) def test_delete(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d self.assertEqual(layout_map.pop("dense/kernel"), self.sharded_2d) # Make sure to match against the exact string, not the regex with self.assertRaises(KeyError): layout_map.pop(".*bias") # Make sure del also works del layout_map["dense/bias"] self.assertLen(layout_map, 0) def test_len(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) self.assertLen(layout_map, 0) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d self.assertLen(layout_map, 2) def test_iter(self): layout_map = distribution_lib.LayoutMap(self.device_mesh) layout_map["dense/kernel"] = self.sharded_2d layout_map["dense/bias"] = self.sharded_1d # Make sure the items are ordered based on the insertion order. self.assertEqual( list(layout_map.keys()), ["dense/kernel", "dense/bias"] ) keys = [] values = [] for k, v in layout_map.items(): keys.append(k) values.append(v) self.assertEqual(keys, ["dense/kernel", "dense/bias"]) self.assertEqual(values, [self.sharded_2d, self.sharded_1d]) # @pytest.mark.skipif( # backend.backend() != "tensorflow", # reason="Backend specific test", # ) # class TensorflowDistributionLibTest(testing.TestCase): # def setUp(self): # super().setUp() # # Config virtual devices for testing. # cpus = tf.config.list_physical_devices("cpu") # context._reset_context() # tf.config.set_logical_device_configuration( # cpus[0], [tf.config.LogicalDeviceConfiguration()] * 8 # ) # dtensor.initialize_accelerator_system("cpu") # def tearDown(self) -> None: # super().tearDown() # dtensor.shutdown_accelerator_system() # def test_list_devices(self): # self.assertEqual(len(distribution_lib.list_devices()), 8) # self.assertEqual(len(distribution_lib.list_devices("cpu")), 8) # self.assertEqual(len(distribution_lib.list_devices("cpu")), 8) # def test_to_dtensor_mesh(self): # devices = [f"cpu:{i}" for i in range(8)] # shape = (4, 2) # axis_names = ["batch", "model"] # mesh = distribution_lib.DeviceMesh(shape, axis_names, devices) # dtensor_mesh = backend_dlib._to_dtensor_mesh(mesh) # self.assertIsInstance(dtensor_mesh, dtensor.Mesh) # self.assertEqual(dtensor_mesh.shape(), list(shape)) # self.assertEqual(dtensor_mesh.dim_names, axis_names) # def test_to_dtensor_layout(self): # axes = ["data", None] # mesh = distribution_lib.DeviceMesh( # (4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)] # ) # layout = distribution_lib.TensorLayout(axes, mesh) # dtensor_layout = backend_dlib._to_dtensor_layout(layout) # dtensor_mesh = backend_dlib._to_dtensor_mesh(mesh) # self.assertEqual( # dtensor_layout, # dtensor.Layout(["data", dtensor.UNSHARDED], dtensor_mesh), # ) # def test_validation_for_device_mesh(self): # axes = ["data", None] # layout = distribution_lib.TensorLayout(axes, device_mesh=None) # with self.assertRaisesRegex( # ValueError, "Cannot create sharding when device mesh is not set" # ): # backend_dlib._to_dtensor_layout(layout)
keras/keras/distribution/distribution_lib_test.py/0
{ "file_path": "keras/keras/distribution/distribution_lib_test.py", "repo_id": "keras", "token_count": 8069 }
141
import pytest from keras import activations from keras import layers from keras import testing class ActivationTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_activation_basics(self): self.run_layer_test( layers.Activation, init_kwargs={ "activation": "relu", }, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, ) self.run_layer_test( layers.Activation, init_kwargs={ "activation": activations.gelu, }, input_shape=(2, 2), expected_output_shape=(2, 2), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, )
keras/keras/layers/activations/activation_test.py/0
{ "file_path": "keras/keras/layers/activations/activation_test.py", "repo_id": "keras", "token_count": 565 }
142
from keras import constraints from keras import initializers from keras import ops from keras import regularizers from keras.api_export import keras_export from keras.layers.activations.softmax import Softmax from keras.layers.core.einsum_dense import EinsumDense from keras.layers.layer import Layer from keras.layers.regularization.dropout import Dropout @keras_export("keras.layers.GroupQueryAttention") class GroupedQueryAttention(Layer): """Grouped Query Attention layer. This is an implementation of grouped-query attention introduced by [Ainslie et al., 2023](https://arxiv.org/abs/2305.13245). Here `num_key_value_heads` denotes number of groups, setting `num_key_value_heads` to 1 is equivalent to multi-query attention, and when `num_key_value_heads` is equal to `num_query_heads` it is equivalent to multi-head attention. This layer first projects `query`, `key`, and `value` tensors. Then, `key` and `value` are repeated to match the number of heads of `query`. Then, the `query` is scaled and dot-producted with `key` tensors. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities and concatenated back to a single tensor. Args: head_dim: Size of each attention head. num_query_heads: Number of query attention heads. num_key_value_heads: Number of key and value attention heads. dropout: Dropout probability. use_bias: Boolean, whether the dense layers use bias vectors/matrices. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. Call arguments: query: Query tensor of shape `(batch_dim, target_seq_len, feature_dim)`, where `batch_dim` is batch size, `target_seq_len` is the length of target sequence, and `feature_dim` is dimension of feature. value: Value tensor of shape `(batch_dim, source_seq_len, feature_dim)`, where `batch_dim` is batch size, `source_seq_len` is the length of source sequence, and `feature_dim` is dimension of feature. key: Optional key tensor of shape `(batch_dim, source_seq_len, feature_dim)`. If not given, will use `value` for both `key` and `value`, which is most common case. attention_mask: A boolean mask of shape `(batch_dim, target_seq_len, source_seq_len)`, that prevents attention to certain positions. The boolean mask specifies which query elements can attend to which key elements, where 1 indicates attention and 0 indicates no attention. Broadcasting can happen for the missing batch dimensions and the head dimension. return_attention_scores: A boolean to indicate whether the output should be `(attention_output, attention_scores)` if `True`, or `attention_output` if `False`. Defaults to `False`. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (no dropout). Will go with either using the training mode of the parent layer/model or `False` (inference) if there is no parent layer. use_causal_mask: A boolean to indicate whether to apply a causal mask to prevent tokens from attending to future tokens (e.g., used in a decoder Transformer). Returns: attention_output: Result of the computation, of shape `(batch_dim, target_seq_len, feature_dim)`, where `target_seq_len` is for target sequence length and `feature_dim` is the query input last dim. attention_scores: (Optional) attention coefficients of shape `(batch_dim, num_query_heads, target_seq_len, source_seq_len)`. """ def __init__( self, head_dim, num_query_heads, num_key_value_heads, dropout=0.0, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs, ): super().__init__(**kwargs) self.supports_masking = True self.head_dim = head_dim self.num_query_heads = num_query_heads self.num_key_value_heads = num_key_value_heads if num_query_heads % num_key_value_heads != 0: raise ValueError( "`num_query_heads` must be divisible" " by `num_key_value_heads`." ) self.num_repeats = num_query_heads // num_key_value_heads self.dropout = dropout self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) def build( self, query_shape, value_shape, key_shape=None, ): # Einsum variables: # b = batch size # q = query length # k = key/value length # m = model dim # u = num query heads # v = num key/value heads # h = head dim key_shape = value_shape if key_shape is None else key_shape self.feature_dim = query_shape[-1] self._query_dense = EinsumDense( "bqm,muh->bquh", output_shape=(None, self.num_query_heads, self.head_dim), bias_axes="uh" if self.use_bias else None, name="query", **self._get_common_kwargs_for_sublayer(), ) self._query_dense.build(query_shape) self._key_dense = EinsumDense( "bkm,mvh->bkvh", output_shape=(None, self.num_key_value_heads, self.head_dim), bias_axes="vh" if self.use_bias else None, name="key", **self._get_common_kwargs_for_sublayer(), ) self._key_dense.build(key_shape) self._value_dense = EinsumDense( "bkm,mvh->bkvh", output_shape=(None, self.num_key_value_heads, self.head_dim), bias_axes="vh" if self.use_bias else None, name="value", **self._get_common_kwargs_for_sublayer(), ) self._value_dense.build(value_shape) self._softmax = Softmax(axis=-1, dtype=self.dtype_policy) self._dropout_layer = Dropout( rate=self.dropout, dtype=self.dtype_policy ) self._dot_product_equation = "bquh,bkuh->buqk" self._combine_equation = "buqk,bkuh->bquh" self._output_dense = EinsumDense( "bquh,uhm->bqm", output_shape=(None, self.feature_dim), bias_axes="m" if self.use_bias else None, name="attention_output", **self._get_common_kwargs_for_sublayer(), ) self._output_dense.build( (None, None, self.num_query_heads, self.head_dim) ) self.built = True def _get_common_kwargs_for_sublayer(self): common_kwargs = dict( kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, activity_regularizer=self.activity_regularizer, kernel_constraint=self.kernel_constraint, bias_constraint=self.bias_constraint, dtype=self.dtype_policy, ) # Create new clone of kernel/bias initializer, so that we don't reuse # the initializer instance, which could lead to same init value since # initializer is stateless. kernel_initializer = self.kernel_initializer.__class__.from_config( self.kernel_initializer.get_config() ) bias_initializer = self.bias_initializer.__class__.from_config( self.bias_initializer.get_config() ) common_kwargs["kernel_initializer"] = kernel_initializer common_kwargs["bias_initializer"] = bias_initializer return common_kwargs def call( self, query, value, key=None, query_mask=None, value_mask=None, key_mask=None, attention_mask=None, return_attention_scores=False, training=None, use_causal_mask=False, ): if key is None: key = value attention_mask = self._compute_attention_mask( query, value, query_mask=query_mask, value_mask=value_mask, key_mask=key_mask, attention_mask=attention_mask, use_causal_mask=use_causal_mask, ) query = self._query_dense(query) key = self._key_dense(key) value = self._value_dense(value) key = ops.repeat( key, self.num_repeats, axis=2 ) # (batch_dim, source_seq_len, query_heads, head_dim) value = ops.repeat( value, self.num_repeats, axis=2 ) # (batch_dim, source_seq_len, query_heads, head_dim) output, scores = self._compute_attention( query, key, value, attention_mask=attention_mask, training=training, ) output = self._output_dense( output ) # (batch_dim, target_seq_len, feature_dim) if return_attention_scores: return output, scores return output def _compute_attention_mask( self, query, value, query_mask=None, value_mask=None, key_mask=None, attention_mask=None, use_causal_mask=False, ): """Computes the attention mask, using the Keras masks of the inputs. * The `query`'s mask is reshaped from [B, T] to [B, T, 1]. * The `value`'s mask is reshaped from [B, S] to [B, 1, S]. * The `key`'s mask is reshaped from [B, S] to [B, 1, S]. The `key`'s mask is ignored if `key` is `None` or if `key is value`. * If `use_causal_mask=True`, then the causal mask is computed. Its shape is [1, T, S]. All defined masks are merged using a logical AND operation (`&`). In general, if the `query` and `value` are masked, then there is no need to define the `attention_mask`. Args: query: Projected query tensor of shape `(B, T, N, key_dim)`. key: Projected key tensor of shape `(B, T, N, key_dim)`. value: Projected value tensor of shape `(B, T, N, value_dim)`. attention_mask: a boolean mask of shape `(B, T, S)`, that prevents attention to certain positions. use_causal_mask: A boolean to indicate whether to apply a causal mask to prevent tokens from attending to future tokens (e.g., used in a decoder Transformer). Returns: attention_mask: a boolean mask of shape `(B, T, S)`, that prevents attention to certain positions, based on the Keras masks of the `query`, `key`, `value`, and `attention_mask` tensors, and the causal mask if `use_causal_mask=True`. """ auto_mask = None if query_mask is not None: query_mask = ops.cast(query_mask, "bool") # defensive casting # B = batch size, T = max query length auto_mask = ops.expand_dims(query_mask, -1) # shape is [B, T, 1] if value_mask is not None: value_mask = ops.cast(value_mask, "bool") # defensive casting # B = batch size, S == max value length mask = ops.expand_dims(value_mask, -2) # shape is [B, 1, S] auto_mask = mask if auto_mask is None else auto_mask & mask if key_mask is not None: key_mask = ops.cast(key_mask, "bool") # defensive casting # B == batch size, S == max key length == max value length mask = ops.expand_dims(key_mask, -2) # shape is [B, 1, S] auto_mask = mask if auto_mask is None else auto_mask & mask if use_causal_mask: # the shape of the causal mask is [1, T, S] mask = self._compute_causal_mask(query, value) auto_mask = mask if auto_mask is None else auto_mask & mask if auto_mask is not None: # merge attention_mask & automatic mask, to shape [B, T, S] attention_mask = ( auto_mask if attention_mask is None else ops.cast(attention_mask, bool) & auto_mask ) return attention_mask def _compute_causal_mask(self, query, value=None): """Computes a causal mask (e.g., for masked self-attention layers). For example, if query and value both contain sequences of length 4, this function returns a boolean tensor equal to: ``` [[[True, False, False, False], [True, True, False, False], [True, True, True, False], [True, True, True, True]]] ``` Args: query: query tensor of shape `(B, T, ...)`. value: value tensor of shape `(B, S, ...)` (optional, defaults to query). Returns: mask: a boolean tensor of shape `(1, T, S)` containing a lower triangular matrix of shape `(T, S)`. """ q_seq_length = ops.shape(query)[1] v_seq_length = q_seq_length if value is None else ops.shape(value)[1] ones_mask = ops.ones((1, q_seq_length, v_seq_length), dtype="int32") row_index = ops.cumsum(ones_mask, axis=-2) col_index = ops.cumsum(ones_mask, axis=-1) return ops.greater_equal(row_index, col_index) def _compute_attention( self, query, key, value, attention_mask=None, training=None ): query = ops.multiply( query, 1.0 / ops.sqrt(ops.cast(self.head_dim, query.dtype)), ) # Take the dot product between "query" and "key" to get the raw # attention scores. scores = ops.einsum( self._dot_product_equation, query, key ) # (batch_dim, query_heads, target_seq_len, source_seq_len) scores = self._masked_softmax(scores, attention_mask=attention_mask) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. scores_dropout = self._dropout_layer(scores, training=training) output = ops.einsum(self._combine_equation, scores_dropout, value) return output, scores def _masked_softmax(self, scores, attention_mask=None): # Normalize the attention scores to probabilities. # scores = [B, N, T, S] if attention_mask is not None: # The expand dim happens starting from the `num_heads` dimension, # (<batch_dims>, num_heads, <query_attention_dims, # key_attention_dims>) mask_expansion_axis = -1 * 2 - 1 for _ in range(len(scores.shape) - len(attention_mask.shape)): attention_mask = ops.expand_dims( attention_mask, axis=mask_expansion_axis ) return self._softmax(scores, mask=attention_mask) def compute_output_shape( self, query_shape, value_shape, key_shape=None, ): if key_shape is None: key_shape = value_shape if query_shape[-1] != value_shape[-1]: raise ValueError( "The last dimension of `query_shape` and `value_shape` " f"must be equal, but are {query_shape[-1]}, {value_shape[-1]}. " "Received: query_shape={query_shape}, value_shape={value_shape}" ) if value_shape[1:-1] != key_shape[1:-1]: raise ValueError( "All dimensions of `value` and `key`, except the last one, " f"must be equal. Received: value_shape={value_shape} and " f"key_shape={key_shape}" ) return query_shape def get_config(self): config = { "head_dim": self.head_dim, "num_query_heads": self.num_query_heads, "num_key_value_heads": self.num_key_value_heads, "use_bias": self.use_bias, "dropout": self.dropout, "kernel_initializer": initializers.serialize( self.kernel_initializer ), "bias_initializer": initializers.serialize(self.bias_initializer), "kernel_regularizer": regularizers.serialize( self.kernel_regularizer ), "bias_regularizer": regularizers.serialize(self.bias_regularizer), "activity_regularizer": regularizers.serialize( self.activity_regularizer ), "kernel_constraint": constraints.serialize(self.kernel_constraint), "bias_constraint": constraints.serialize(self.bias_constraint), } base_config = super().get_config() return {**base_config, **config}
keras/keras/layers/attention/grouped_query_attention.py/0
{ "file_path": "keras/keras/layers/attention/grouped_query_attention.py", "repo_id": "keras", "token_count": 8057 }
143
import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data from keras import backend from keras import layers from keras import testing class RandomRotationTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("random_rotate_neg4", -0.4), ("random_rotate_neg2", -0.2), ("random_rotate_4", 0.4), ("random_rotate_2", 0.2), ("random_rotate_tuple", (-0.2, 0.4)), ) def test_random_rotation_shapes(self, factor): self.run_layer_test( layers.RandomRotation, init_kwargs={ "factor": factor, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 4), supports_masking=False, run_training_check=False, ) def test_random_rotation_correctness(self): if backend.config.image_data_format() == "channels_last": input_shape = (1, 5, 5, 1) else: input_shape = (1, 1, 5, 5) input_image = np.reshape(np.arange(0, 25), input_shape) layer = layers.RandomRotation(factor=(0.5, 0.5)) actual_output = layer(input_image) expected_output = np.asarray( [ [24, 23, 22, 21, 20], [19, 18, 17, 16, 15], [14, 13, 12, 11, 10], [9, 8, 7, 6, 5], [4, 3, 2, 1, 0], ] ).reshape(input_shape) self.assertAllClose( backend.convert_to_tensor(expected_output), actual_output, atol=1e-5 ) def test_training_false(self): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)) layer = layers.RandomRotation(factor=(0.5, 0.5)) actual_output = layer(input_image, training=False) self.assertAllClose(actual_output, input_image) def test_tf_data_compatibility(self): if backend.config.image_data_format() == "channels_last": input_shape = (1, 5, 5, 1) else: input_shape = (1, 1, 5, 5) input_image = np.reshape(np.arange(0, 25), input_shape) layer = layers.RandomRotation(factor=(0.5, 0.5)) ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer) expected_output = np.asarray( [ [24, 23, 22, 21, 20], [19, 18, 17, 16, 15], [14, 13, 12, 11, 10], [9, 8, 7, 6, 5], [4, 3, 2, 1, 0], ] ).reshape(input_shape[1:]) for output in ds.take(1): output = output.numpy() self.assertAllClose(expected_output, output)
keras/keras/layers/preprocessing/random_rotation_test.py/0
{ "file_path": "keras/keras/layers/preprocessing/random_rotation_test.py", "repo_id": "keras", "token_count": 1388 }
144
import numpy as np from keras import backend from keras import initializers from keras import testing from keras.layers.rnn.conv_lstm import ConvLSTM from keras.layers.rnn.conv_lstm import ConvLSTMCell class ConvLSTMCellTest(testing.TestCase): def test_correctness(self): x = np.arange(150).reshape((2, 5, 5, 3)).astype("float32") / 10 s1 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 10 s2 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 10 if backend.config.image_data_format() == "channels_first": x = x.transpose((0, 3, 1, 2)) s1 = s1.transpose((0, 3, 1, 2)) s2 = s2.transpose((0, 3, 1, 2)) layer = ConvLSTMCell( rank=2, filters=4, kernel_size=3, padding="same", kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), ) output = layer(x, [s1, s2]) checksum_0 = np.sum(backend.convert_to_numpy(output[0])) self.assertAllClose(checksum_0, 188.89502) checksum_1 = np.sum(backend.convert_to_numpy(output[1][0])) self.assertAllClose(checksum_1, 188.89502) checksum_2 = np.sum(backend.convert_to_numpy(output[1][1])) self.assertAllClose(checksum_2, 2170.444) class ConvLSTMTest(testing.TestCase): def test_correctness(self): x = np.arange(450).reshape((2, 3, 5, 5, 3)).astype("float32") / 100 s1 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 100 s2 = np.arange(200).reshape((2, 5, 5, 4)).astype("float32") / 100 if backend.config.image_data_format() == "channels_first": x = x.transpose((0, 1, 4, 2, 3)) s1 = s1.transpose((0, 3, 1, 2)) s2 = s2.transpose((0, 3, 1, 2)) layer = ConvLSTM( rank=2, filters=4, kernel_size=3, padding="same", kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), ) output = layer(x, initial_state=[s1, s2]) output = backend.convert_to_numpy(output) self.assertAllClose(np.sum(output), 119.812454)
keras/keras/layers/rnn/conv_lstm_test.py/0
{ "file_path": "keras/keras/layers/rnn/conv_lstm_test.py", "repo_id": "keras", "token_count": 1125 }
145
from keras import backend from keras.api_export import keras_export from keras.optimizers import base_optimizer if backend.backend() == "tensorflow": from keras.backend.tensorflow.optimizer import TFOptimizer BackendOptimizer = TFOptimizer elif backend.backend() == "torch": from keras.backend.torch.optimizers import TorchOptimizer BackendOptimizer = TorchOptimizer elif backend.backend() == "jax": from keras.backend.jax.optimizer import JaxOptimizer BackendOptimizer = JaxOptimizer else: BackendOptimizer = base_optimizer.BaseOptimizer @keras_export(["keras.Optimizer", "keras.optimizers.Optimizer"]) class Optimizer(BackendOptimizer): pass base_optimizer_keyword_args = base_optimizer.base_optimizer_keyword_args Optimizer.__doc__ = base_optimizer.BaseOptimizer.__doc__
keras/keras/optimizers/optimizer.py/0
{ "file_path": "keras/keras/optimizers/optimizer.py", "repo_id": "keras", "token_count": 295 }
146
import inspect from keras.api_export import keras_export from keras.regularizers.regularizers import L1 from keras.regularizers.regularizers import L1L2 from keras.regularizers.regularizers import L2 from keras.regularizers.regularizers import OrthogonalRegularizer from keras.regularizers.regularizers import Regularizer from keras.saving import serialization_lib from keras.utils.naming import to_snake_case ALL_OBJECTS = { Regularizer, L1, L2, L1L2, OrthogonalRegularizer, } ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS} ALL_OBJECTS_DICT.update( {to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS} ) @keras_export("keras.regularizers.serialize") def serialize(initializer): return serialization_lib.serialize_keras_object(initializer) @keras_export("keras.regularizers.deserialize") def deserialize(config, custom_objects=None): """Return a Keras regularizer object via its config.""" return serialization_lib.deserialize_keras_object( config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects, ) @keras_export("keras.regularizers.get") def get(identifier): """Retrieve a Keras regularizer object via an identifier.""" if identifier is None: return None if isinstance(identifier, dict): obj = deserialize(identifier) elif isinstance(identifier, str): obj = ALL_OBJECTS_DICT.get(identifier, None) else: obj = identifier if callable(obj): if inspect.isclass(obj): obj = obj() return obj else: raise ValueError( f"Could not interpret regularizer identifier: {identifier}" )
keras/keras/regularizers/__init__.py/0
{ "file_path": "keras/keras/regularizers/__init__.py", "repo_id": "keras", "token_count": 665 }
147
""" Separation of concerns: DataAdapter: - x, y - sample_weight - class_weight - shuffle - batch_size - steps, as it relates to batch_size for array data EpochIterator: - whether to yield numpy or tf data - steps - most argument validation Trainer: - steps_per_execution - validation_split - validation_data - callbacks - validation_freq - epochs - initial_epoch - any backend-specific concern such as distribution PyDataset: - num_workers - use_multiprocessing - max_queue_size EpochIterator steps: 1. Look at data type and select correct DataHandler 2. Instantiate DataHandler with correct arguments 3. Raise or warn on unused arguments 4. in __iter__, iterate, either for a fixed number of steps or until there is no data """ import warnings from keras.trainers import data_adapters class EpochIterator: def __init__( self, x, y=None, sample_weight=None, batch_size=None, steps_per_epoch=None, shuffle=False, class_weight=None, steps_per_execution=1, ): self.steps_per_epoch = steps_per_epoch self.steps_per_execution = steps_per_execution if steps_per_epoch: self._current_iterator = None self._insufficient_data = False self.data_adapter = data_adapters.get_data_adapter( x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps_per_epoch, shuffle=shuffle, class_weight=class_weight, ) self._num_batches = self.data_adapter.num_batches def _get_iterator(self): return self.data_adapter.get_numpy_iterator() def enumerate_epoch(self): buffer = [] if self.steps_per_epoch: if self._current_iterator is None: self._current_iterator = iter(self._get_iterator()) self._insufficient_data = False for step in range(self.steps_per_epoch): if self._insufficient_data: break try: data = next(self._current_iterator) buffer.append(data) if len(buffer) == self.steps_per_execution: yield step - len(buffer) + 1, buffer buffer = [] except (StopIteration,): warnings.warn( "Your input ran out of data; interrupting epoch. " "Make sure that your dataset or generator can generate " "at least `steps_per_epoch * epochs` batches. " "You may need to use the `.repeat()` " "function when building your dataset.", stacklevel=2, ) self._current_iterator = None self._insufficient_data = True if buffer: yield step - len(buffer) + 1, buffer else: for step, data in enumerate(self._get_iterator()): buffer.append(data) if len(buffer) == self.steps_per_execution: yield step - len(buffer) + 1, buffer buffer = [] if buffer: yield step - len(buffer) + 1, buffer if not self._num_batches: # Infer the number of batches returned by the data_adapter. # Assumed static. self._num_batches = step + 1 self.data_adapter.on_epoch_end() @property def num_batches(self): if self.steps_per_epoch: return self.steps_per_epoch # Either copied from the data_adapter, or # inferred at the end of an iteration. return self._num_batches
keras/keras/trainers/epoch_iterator.py/0
{ "file_path": "keras/keras/trainers/epoch_iterator.py", "repo_id": "keras", "token_count": 1926 }
148
FROM mcr.microsoft.com/vscode/devcontainers/python:3.9 COPY setup.sh /setup.sh # Install Bazel RUN sudo apt install wget -y RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-linux-amd64 RUN chmod a+x bazelisk-linux-amd64 RUN mv bazelisk-linux-amd64 /usr/bin/bazel
tf-keras/.devcontainer/Dockerfile/0
{ "file_path": "tf-keras/.devcontainer/Dockerfile", "repo_id": "tf-keras", "token_count": 124 }
149
## Security vulnerability reports Since Keras 2 is the high-level API of TensorFlow 2, Keras 2 follows the same security practices as TensorFlow. For details on guidelines on vulnerabilities and how to report them, you can refer to [Using TensorFlow Securely](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md).
tf-keras/SECURITY.md/0
{ "file_path": "tf-keras/SECURITY.md", "repo_id": "tf-keras", "token_count": 85 }
150
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MobileNet v1 models for TF-Keras. MobileNet is a general architecture and can be used for multiple use cases. Depending on the use case, it can use different input layer size and different width factors. This allows different width models to reduce the number of multiply-adds and thereby reduce inference cost on mobile devices. MobileNets support any input size greater than 32 x 32, with larger image sizes offering better performance. The number of parameters and number of multiply-adds can be modified by using the `alpha` parameter, which increases/decreases the number of filters in each layer. By altering the image size and `alpha` parameter, all 16 models from the paper can be built, with ImageNet weights provided. The paper demonstrates the performance of MobileNets using `alpha` values of 1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25. For each of these `alpha` values, weights for 4 different input image sizes are provided (224, 192, 160, 128). The following table describes the size and accuracy of the 100% MobileNet on size 224 x 224: ---------------------------------------------------------------------------- Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M) -------------------------|---------------|-------------------|-------------- | 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 | | 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 | | 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 | | 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 | The following table describes the performance of the 100 % MobileNet on various input sizes: ------------------------------------------------------------------------ Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M) ----------------------|---------------|-------------------|---------------- | 1.0 MobileNet-224 | 70.6 % | 569 | 4.2 | | 1.0 MobileNet-192 | 69.1 % | 418 | 4.2 | | 1.0 MobileNet-160 | 67.2 % | 290 | 4.2 | | 1.0 MobileNet-128 | 64.4 % | 186 | 4.2 | Reference: - [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications]( https://arxiv.org/abs/1704.04861) """ import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.applications import imagenet_utils from tf_keras.engine import training from tf_keras.layers import VersionAwareLayers from tf_keras.utils import data_utils from tf_keras.utils import layer_utils # isort: off from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import keras_export BASE_WEIGHT_PATH = ( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/" ) layers = None @keras_export( "keras.applications.mobilenet.MobileNet", "keras.applications.MobileNet" ) def MobileNet( input_shape=None, alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights="imagenet", input_tensor=None, pooling=None, classes=1000, classifier_activation="softmax", **kwargs, ): """Instantiates the MobileNet architecture. Reference: - [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications]( https://arxiv.org/abs/1704.04861) This function returns a TF-Keras image classification model, optionally loaded with weights pre-trained on ImageNet. For image classification use cases, see [this page for detailed examples]( https://keras.io/api/applications/#usage-examples-for-image-classification-models). For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning]( https://keras.io/guides/transfer_learning/). Note: each TF-Keras Application expects a specific kind of input preprocessing. For MobileNet, call `tf.keras.applications.mobilenet.preprocess_input` on your inputs before passing them to the model. `mobilenet.preprocess_input` will scale input pixels between -1 and 1. Args: input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or (3, 224, 224) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. Defaults to `None`. `input_shape` will be ignored if the `input_tensor` is provided. alpha: Controls the width of the network. This is known as the width multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. Defaults to `1.0`. depth_multiplier: Depth multiplier for depthwise convolution. This is called the resolution multiplier in the MobileNet paper. Defaults to `1.0`. dropout: Dropout rate. Defaults to `0.001`. include_top: Boolean, whether to include the fully-connected layer at the top of the network. Defaults to `True`. weights: One of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. Defaults to `imagenet`. input_tensor: Optional TF-Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. `input_tensor` is useful for sharing inputs between multiple different networks. Defaults to `None`. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` (default) means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Defaults to `1000`. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. When loading pretrained weights, `classifier_activation` can only be `None` or `"softmax"`. **kwargs: For backwards compatibility only. Returns: A `keras.Model` instance. """ global layers if "layers" in kwargs: layers = kwargs.pop("layers") else: layers = VersionAwareLayers() if kwargs: raise ValueError(f"Unknown argument(s): {(kwargs,)}") if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)): raise ValueError( "The `weights` argument should be either " "`None` (random initialization), `imagenet` " "(pre-training on ImageNet), " "or the path to the weights file to be loaded. " f"Received weights={weights}" ) if weights == "imagenet" and include_top and classes != 1000: raise ValueError( 'If using `weights` as `"imagenet"` with `include_top` ' "as true, `classes` should be 1000. " f"Received classes={classes}" ) # Determine proper input shape and default size. if input_shape is None: default_size = 224 else: if backend.image_data_format() == "channels_first": rows = input_shape[1] cols = input_shape[2] else: rows = input_shape[0] cols = input_shape[1] if rows == cols and rows in [128, 160, 192, 224]: default_size = rows else: default_size = 224 input_shape = imagenet_utils.obtain_input_shape( input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights, ) if backend.image_data_format() == "channels_last": row_axis, col_axis = (0, 1) else: row_axis, col_axis = (1, 2) rows = input_shape[row_axis] cols = input_shape[col_axis] if weights == "imagenet": if depth_multiplier != 1: raise ValueError( "If imagenet weights are being loaded, " "depth multiplier must be 1. " f"Received depth_multiplier={depth_multiplier}" ) if alpha not in [0.25, 0.50, 0.75, 1.0]: raise ValueError( "If imagenet weights are being loaded, " "alpha can be one of" "`0.25`, `0.50`, `0.75` or `1.0` only. " f"Received alpha={alpha}" ) if rows != cols or rows not in [128, 160, 192, 224]: rows = 224 logging.warning( "`input_shape` is undefined or non-square, " "or `rows` is not in [128, 160, 192, 224]. " "Weights for input shape (224, 224) will be " "loaded as the default." ) if input_tensor is None: img_input = layers.Input(shape=input_shape) else: if not backend.is_keras_tensor(input_tensor): img_input = layers.Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor x = _conv_block(img_input, 32, alpha, strides=(2, 2)) x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1) x = _depthwise_conv_block( x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2 ) x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3) x = _depthwise_conv_block( x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4 ) x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5) x = _depthwise_conv_block( x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6 ) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11) x = _depthwise_conv_block( x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12 ) x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13) if include_top: x = layers.GlobalAveragePooling2D(keepdims=True)(x) x = layers.Dropout(dropout, name="dropout")(x) x = layers.Conv2D(classes, (1, 1), padding="same", name="conv_preds")(x) x = layers.Reshape((classes,), name="reshape_2")(x) imagenet_utils.validate_activation(classifier_activation, weights) x = layers.Activation( activation=classifier_activation, name="predictions" )(x) else: if pooling == "avg": x = layers.GlobalAveragePooling2D()(x) elif pooling == "max": x = layers.GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = layer_utils.get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = training.Model(inputs, x, name=f"mobilenet_{alpha:0.2f}_{rows}") # Load weights. if weights == "imagenet": if alpha == 1.0: alpha_text = "1_0" elif alpha == 0.75: alpha_text = "7_5" elif alpha == 0.50: alpha_text = "5_0" else: alpha_text = "2_5" if include_top: model_name = "mobilenet_%s_%d_tf.h5" % (alpha_text, rows) weight_path = BASE_WEIGHT_PATH + model_name weights_path = data_utils.get_file( model_name, weight_path, cache_subdir="models" ) else: model_name = "mobilenet_%s_%d_tf_no_top.h5" % (alpha_text, rows) weight_path = BASE_WEIGHT_PATH + model_name weights_path = data_utils.get_file( model_name, weight_path, cache_subdir="models" ) model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): """Adds an initial convolution layer (with batch normalization and relu6). Args: inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last` data format) or (3, rows, cols) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value. filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. kernel: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. # Input shape 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. Returns: Output tensor of block. """ channel_axis = 1 if backend.image_data_format() == "channels_first" else -1 filters = int(filters * alpha) x = layers.Conv2D( filters, kernel, padding="same", use_bias=False, strides=strides, name="conv1", )(inputs) x = layers.BatchNormalization(axis=channel_axis, name="conv1_bn")(x) return layers.ReLU(6.0, name="conv1_relu")(x) def _depthwise_conv_block( inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1, ): """Adds a depthwise convolution block. A depthwise convolution block consists of a depthwise conv, batch normalization, relu6, pointwise convolution, batch normalization and relu6 activation. Args: inputs: Input tensor of shape `(rows, cols, channels)` (with `channels_last` data format) or (channels, rows, cols) (with `channels_first` data format). pointwise_conv_filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the pointwise convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. block_id: Integer, a unique identification designating the block number. # Input shape 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. Returns: Output tensor of block. """ channel_axis = 1 if backend.image_data_format() == "channels_first" else -1 pointwise_conv_filters = int(pointwise_conv_filters * alpha) if strides == (1, 1): x = inputs else: x = layers.ZeroPadding2D( ((0, 1), (0, 1)), name="conv_pad_%d" % block_id )(inputs) x = layers.DepthwiseConv2D( (3, 3), padding="same" if strides == (1, 1) else "valid", depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name="conv_dw_%d" % block_id, )(x) x = layers.BatchNormalization( axis=channel_axis, name="conv_dw_%d_bn" % block_id )(x) x = layers.ReLU(6.0, name="conv_dw_%d_relu" % block_id)(x) x = layers.Conv2D( pointwise_conv_filters, (1, 1), padding="same", use_bias=False, strides=(1, 1), name="conv_pw_%d" % block_id, )(x) x = layers.BatchNormalization( axis=channel_axis, name="conv_pw_%d_bn" % block_id )(x) return layers.ReLU(6.0, name="conv_pw_%d_relu" % block_id)(x) @keras_export("keras.applications.mobilenet.preprocess_input") def preprocess_input(x, data_format=None): return imagenet_utils.preprocess_input( x, data_format=data_format, mode="tf" ) @keras_export("keras.applications.mobilenet.decode_predictions") def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( mode="", ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC, ) decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
tf-keras/tf_keras/applications/mobilenet.py/0
{ "file_path": "tf-keras/tf_keras/applications/mobilenet.py", "repo_id": "tf-keras", "token_count": 8065 }
151
# TF-Keras Benchmark This package contains benchmarks on TF-Keras models and components.
tf-keras/tf_keras/benchmarks/README.md/0
{ "file_path": "tf-keras/tf_keras/benchmarks/README.md", "repo_id": "tf-keras", "token_count": 25 }
152
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks on MLP on Reuters dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.benchmarks import benchmark_util class MLPReutersBenchmark(tf.test.Benchmark): """Benchmarks for MLP using `tf.test.Benchmark`.""" def __init__(self): super().__init__() self.max_words = 1000 (self.x_train, self.y_train), _ = keras.datasets.reuters.load_data( num_words=self.max_words ) self.num_classes = np.max(self.y_train) + 1 tokenizer = keras.preprocessing.text.Tokenizer(num_words=self.max_words) self.x_train = tokenizer.sequences_to_matrix( self.x_train, mode="binary" ) self.y_train = keras.utils.to_categorical( self.y_train, self.num_classes ) self.epochs = 5 def _build_model(self): """Model from https://github.com/keras-team/tf-keras/blob/master/ examples/reuters_mlp.py. """ model = keras.Sequential() model.add(keras.layers.Dense(512, input_shape=(self.max_words,))) model.add(keras.layers.Activation("relu")) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(self.num_classes)) model.add(keras.layers.Activation("softmax")) return model # In each benchmark test, the required arguments for the # method `measure_performance` include: # x: Input data, it could be Numpy or loaded from tfds. # y: Target data. If `x` is a dataset or generator instance, # `y` should not be specified. # loss: Loss function for model. # optimizer: Optimizer for model. # Check more details in `measure_performance()` method of # benchmark_util. def benchmark_mlp_reuters_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata("mlp", batch_size) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) def benchmark_mlp_reuters_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata("mlp", batch_size) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) def benchmark_mlp_reuters_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata("mlp", batch_size) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) def benchmark_mlp_reuters_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy="mirrored", epochs=self.epochs, optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata("mlp", batch_size) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py/0
{ "file_path": "tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py", "repo_id": "tf-keras", "token_count": 2409 }
153
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Fashion-MNIST dataset.""" import gzip import os import numpy as np from tf_keras.utils.data_utils import get_file # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.datasets.fashion_mnist.load_data") def load_data(): """Loads the Fashion-MNIST dataset. This is a dataset of 60,000 28x28 grayscale images of 10 fashion categories, along with a test set of 10,000 images. This dataset can be used as a drop-in replacement for MNIST. The classes are: | Label | Description | |:-----:|-------------| | 0 | T-shirt/top | | 1 | Trouser | | 2 | Pullover | | 3 | Dress | | 4 | Coat | | 5 | Sandal | | 6 | Shirt | | 7 | Sneaker | | 8 | Bag | | 9 | Ankle boot | Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train**: uint8 NumPy array of grayscale image data with shapes `(60000, 28, 28)`, containing the training data. **y_train**: uint8 NumPy array of labels (integers in range 0-9) with shape `(60000,)` for the training data. **x_test**: uint8 NumPy array of grayscale image data with shapes (10000, 28, 28), containing the test data. **y_test**: uint8 NumPy array of labels (integers in range 0-9) with shape `(10000,)` for the test data. Example: ```python (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() assert x_train.shape == (60000, 28, 28) assert x_test.shape == (10000, 28, 28) assert y_train.shape == (60000,) assert y_test.shape == (10000,) ``` License: The copyright for Fashion-MNIST is held by Zalando SE. Fashion-MNIST is licensed under the [MIT license]( https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE). """ dirname = os.path.join("datasets", "fashion-mnist") base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" files = [ "train-labels-idx1-ubyte.gz", "train-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz", "t10k-images-idx3-ubyte.gz", ] paths = [] for fname in files: paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname)) with gzip.open(paths[0], "rb") as lbpath: y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[1], "rb") as imgpath: x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape( len(y_train), 28, 28 ) with gzip.open(paths[2], "rb") as lbpath: y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[3], "rb") as imgpath: x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape( len(y_test), 28, 28 ) return (x_train, y_train), (x_test, y_test)
tf-keras/tf_keras/datasets/fashion_mnist.py/0
{ "file_path": "tf-keras/tf_keras/datasets/fashion_mnist.py", "repo_id": "tf-keras", "token_count": 1469 }
154
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities related to distribute coordinator. The module is used only for utils to support legacy TF1 code path involving distribute coordinator, and is not expected to change in any way. This is subject to cleanup once TF1 is no longer supported. TODO(rchao): Remove this module once TF1 is not supported. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import json import os import threading import time import tensorflow.compat.v2 as tf # isort: off from tensorflow.core.protobuf import cluster_pb2 from tensorflow.python.platform import tf_logging as logging _worker_context = threading.local() _thread_local = threading.local() def get_current_worker_context(): """Returns the current task context.""" try: return _worker_context.current except AttributeError: return None class _TaskType: PS = "ps" WORKER = "worker" CHIEF = "chief" EVALUATOR = "evaluator" CLIENT = "client" def _get_num_workers(cluster_spec): """Gets number of workers including chief.""" if not cluster_spec: return 0 return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len( cluster_spec.as_dict().get(_TaskType.CHIEF, []) ) class _WorkerContext: """The worker context class. This context object provides configuration information for each task. One context manager with a worker context object will be created per invocation to the `worker_fn` where `get_current_worker_context` can be called to access the worker context object. """ def __init__( self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer="grpc", worker_barrier=None, ): """Initialize the worker context object. Args: strategy: a `DistributionStrategy` object. cluster_spec: a ClusterSpec object. It can be empty or None in the local training case. task_type: a string indicating the role of the corresponding task, such as "worker" or "ps". It can be None if it is local training or in-graph replicated training. task_id: an integer indicating id of the corresponding task. It can be None if it is local training or in-graph replicated training. session_config: an optional `tf.compat.v1.ConfigProto` object. rpc_layer: optional string specifying the RPC protocol for communication with worker masters. If None or empty, hosts in the `cluster_spec` will be used directly. worker_barrier: optional, the barrier object for worker synchronization. """ self._strategy = strategy self._cluster_spec = cluster_spec self._task_type = task_type self._task_id = task_id self._session_config = session_config self._worker_barrier = worker_barrier self._rpc_layer = rpc_layer self._master_target = self._get_master_target() self._num_workers = _get_num_workers(cluster_spec) self._is_chief_node = self._is_chief() def _debug_message(self): if self._cluster_spec: return "[cluster_spec: %r, task_type: %r, task_id: %r]" % ( self._cluster_spec, self.task_type, self.task_id, ) else: return "[local]" def __enter__(self): old_context = get_current_worker_context() if old_context: raise ValueError( "You cannot run distribute coordinator in a `worker_fn`.\t" + self._debug_message() ) _worker_context.current = self def __exit__( self, unused_exception_type, unused_exception_value, unused_traceback ): _worker_context.current = None def _get_master_target(self): """Return the master target for a task.""" # If cluster_spec is None or empty, we use local master. if not self._cluster_spec or self._task_type == _TaskType.EVALUATOR: return "" # If task_type is None, then it is in-graph replicated training. In this # case we use the chief or first worker's master target. if not self._task_type: if _TaskType.CHIEF in self._cluster_spec.jobs: task_type = _TaskType.CHIEF task_id = 0 else: assert _TaskType.WORKER in self._cluster_spec.jobs task_type = _TaskType.WORKER task_id = 0 else: task_type = self._task_type task_id = self._task_id prefix = "" if self._rpc_layer: prefix = self._rpc_layer + "://" return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0] def _is_chief(self): """Return whether the task is the chief worker.""" if not self._cluster_spec or self._task_type in [ _TaskType.CHIEF, _TaskType.EVALUATOR, None, ]: return True # If not local and chief not in the cluster_spec, use the first worker # as chief. if ( _TaskType.CHIEF not in self._cluster_spec.jobs and self._task_type == _TaskType.WORKER and self._task_id == 0 ): return True return False def wait_for_other_workers(self): """Waits for other workers to reach the same call to this method. Raises: ValueError: if `worker_barrier` is not passed to the __init__ method. """ if not self._worker_barrier: # TODO(yuefengz): we should throw an error in independent worker # mode. return self._worker_barrier.wait() def session_creator( self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200, ): """Returns a session creator. The returned session creator will be configured with the correct master target and session configs. It will also run either init ops or ready ops by querying the `strategy` object when `create_session` is called on it. Args: scaffold: A `Scaffold` used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. config: `ConfigProto` proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file. Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be specified. max_wait_secs: Maximum time to wait for the session to become available. Returns: a descendant of SessionCreator. """ if config: session_config = copy.deepcopy(config) session_config.MergeFrom(self._session_config) else: session_config = self._session_config if ( not self._strategy or self._strategy.extended.experimental_should_init ): logging.info( "Creating chief session creator with config: %r", config ) return tf.compat.v1.train.ChiefSessionCreator( scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, ) else: logging.info( "Creating worker session creator with config: %r", config ) return tf.compat.v1.train.WorkerSessionCreator( scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs, ) @property def session_config(self): return copy.deepcopy(self._session_config) @property def has_barrier(self): """Whether the barrier is set or not.""" return self._worker_barrier is not None @property def distributed_mode(self): """Whether it is distributed training or not.""" return ( bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR ) @property def cluster_spec(self): """Returns a copy of the cluster_spec object.""" return copy.deepcopy(self._cluster_spec) @property def task_type(self): """Returns the role of the corresponding task.""" return self._task_type @property def task_id(self): """Returns the id or index of the corresponding task.""" return self._task_id @property def master_target(self): """Returns the session master for the corresponding task to connect to.""" return self._master_target @property def is_chief(self): """Returns whether the task is a chief node.""" return self._is_chief_node @property def num_workers(self): """Returns number of workers in the cluster, including chief.""" return self._num_workers @property def experimental_should_init(self): """Whether to run init ops.""" return self._strategy.extended.experimental_should_init @property def should_checkpoint(self): """Whether to save checkpoint.""" return self._strategy.extended.should_checkpoint @property def should_save_summary(self): """Whether to save summaries.""" return self._strategy.extended.should_save_summary def _run_single_worker( worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer="", worker_barrier=None, coord=None, ): """Runs a single worker by calling `worker_fn` under context.""" session_config = copy.deepcopy(session_config) strategy = copy.deepcopy(strategy) # If there is an EVALUATOR task, we run single-machine eval on that task. if task_type == _TaskType.EVALUATOR: # It is possible to not have a strategy object for EVALUATOR task. if strategy: strategy.configure(session_config) else: assert strategy strategy.configure(session_config, cluster_spec, task_type, task_id) context = _WorkerContext( strategy, cluster_spec, task_type, task_id, session_config=session_config, rpc_layer=rpc_layer, worker_barrier=worker_barrier, ) with context: if coord: with coord.stop_on_exception(): return worker_fn(strategy) else: return worker_fn(strategy) def _split_cluster_for_evaluator(cluster_spec, task_type): """Split the cluster for evaluator since it needn't talk to other tasks.""" # Splitting the cluster is important to prevent the evaluator from talking # to other tasks in the cluster. Since we allow evaluator not to use # distribution strategies and as a result ops in the evaluator task may have # unspecified devices. Those ops may end up on other tasks if we don't split # the cluster. # Note: if you bypass distribute coordinator and bring the cluster yourself, # you can equivalently set device filters to split clusters. This is already # done by distribution strategy's `update_config_proto` method. new_cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() if task_type == _TaskType.EVALUATOR: assert _TaskType.EVALUATOR in new_cluster_spec new_cluster_spec = { _TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR] } else: new_cluster_spec.pop(_TaskType.EVALUATOR, None) return normalize_cluster_spec(new_cluster_spec) def _run_std_server( cluster_spec=None, task_type=None, task_id=None, session_config=None, rpc_layer=None, environment=None, ): """Runs a standard server.""" # Check if the Server is already running. If so, assert that no # configuration options have changed, and return the existing Server. This # allows us to call `run_distribute_coordinator` multiple times. if getattr(_thread_local, "server", None) is not None: assert _thread_local.cluster_spec == cluster_spec assert _thread_local.task_type == task_type assert _thread_local.task_id == task_id assert _thread_local.session_config_str == repr(session_config) assert _thread_local.rpc_layer == rpc_layer assert _thread_local.environment == environment return _thread_local.server else: # This method is not thread-safe. _thread_local.server_started = True _thread_local.cluster_spec = cluster_spec _thread_local.task_type = task_type _thread_local.task_id = task_id _thread_local.session_config_str = repr(session_config) _thread_local.rpc_layer = rpc_layer _thread_local.environment = environment assert cluster_spec target = cluster_spec.task_address(task_type, task_id) if rpc_layer: target = rpc_layer + "://" + target class _FakeServer: """A fake server that runs a master session.""" def start(self): # A tensorflow server starts when a remote session is created. logging.info( "Creating a remote session to start a TensorFlow server, " "target = %r, session_config=%r", target, session_config, ) tf.compat.v1.Session(target=target, config=session_config) def join(self): while True: time.sleep(5) if environment == "google": server = _FakeServer() else: if session_config: logging.info( "Starting standard TensorFlow server, target = %r, " "session_config = %r", target, session_config, ) else: logging.info( "Starting standard TensorFlow server, target = %r", target ) cluster_spec = _split_cluster_for_evaluator(cluster_spec, task_type) server = tf.distribute.Server( cluster_spec, job_name=task_type, task_index=task_id, config=session_config, protocol=rpc_layer, ) server.start() _thread_local.server = server return server def _configure_session_config_for_std_servers( strategy, eval_strategy, session_config, cluster_spec, task_type, task_id ): """Call strategy's `configure` to mutate the session_config. The session_config is currently needed as default config for a TensorFlow server. In the future, we should be able to remove this method and only pass the session config to a client session. """ if task_type == _TaskType.EVALUATOR: if eval_strategy: eval_strategy.configure(session_config=session_config) else: # The strategy may be shared in standalone client mode. strategy = copy.deepcopy(strategy) strategy.configure( session_config=session_config, cluster_spec=cluster_spec, task_type=task_type, task_id=task_id, ) # Remove the device filters specific to the strategy, so that the # TensorFlow server brought up with one strategy can be used by other # strategies. The device filters can be set in the client side as well. del session_config.device_filters[:] # TODO(yuefengz): propagate cluster_spec in the STANDALONE_CLIENT mode. # TODO(yuefengz): we may need a smart way to figure out whether the current task # is the special task when we support cluster_spec propagation. def run_distribute_coordinator( worker_fn, strategy, eval_fn=None, eval_strategy=None, cluster_spec=None, task_type=None, task_id=None, session_config=None, rpc_layer="grpc", ): """Runs the coordinator for distributed TensorFlow. This function runs a split coordinator for distributed TensorFlow in its default mode, i.e the STANDALONE_CLIENT mode. Given a `cluster_spec` specifying server addresses and their roles in a cluster, this coordinator will figure out how to set them up, give the underlying function the right targets for master sessions via a scope object and coordinate their training. The cluster consisting of standard servers needs to be brought up either with the standard server binary or with a binary running distribute coordinator with `task_type` set to non-client type which will then turn into standard servers. In addition to be the distribute coordinator, this is also the source of configurations for each job in the distributed training. As there are multiple ways to configure a distributed TensorFlow cluster, its context object provides these configurations so that users or higher-level APIs don't have to figure out the configuration for each job by themselves. In the between-graph replicated training, this coordinator will create multiple threads and each calls the `worker_fn` which is supposed to create its own graph and connect to one worker master given by its context object. In the in-graph replicated training, it has only one thread calling this `worker_fn`. Another mode is the INDEPENDENT_WORKER mode where each server runs a distribute coordinator which will start a standard server and optionally runs `worker_fn` depending whether it is between-graph training or in-graph replicated training. The `strategy` object is expected to be a DistributionStrategy object which has implemented methods needed by distributed coordinator such as `configure(session_config, cluster_spec, task_type, task_id)` which configures the strategy object for a specific task and `experimental_should_init` property which instructs the distribute coordinator whether to run init ops for a task. The distribute coordinator will make a copy of the `strategy` object, call its `configure` method and pass it to `worker_fn` as an argument. The `worker_fn` defines the training logic and is called under its own worker context which can be accessed to via `get_current_worker_context`. A worker context provides access to configurations for each task, e.g. the task_type, task_id, master target and so on. Since `worker_fn` will be called in a thread and possibly multiple times, caller should be careful when it accesses global data. For example, it is unsafe to define flags in a `worker_fn` or to define different environment variables for different `worker_fn`s. The `worker_fn` for the between-graph replication is defined as if there is only one worker corresponding to the `worker_fn` and possibly ps jobs. For example, when training with parameter servers, it assigns variables to parameter servers and all other operations to that worker. In the in-graph replication case, the `worker_fn` has to define operations for all worker jobs. Using a distribution strategy can simplify the `worker_fn` by not having to worry about the replication and device assignment of variables and operations. This method is intended to be invoked by high-level APIs so that users don't have to explicitly call it to run this coordinator. For those who don't use high-level APIs, to change a program to use this coordinator, wrap everything in a the program after global data definitions such as commandline flag definition into the `worker_fn` and get task-specific configurations from the worker context. The `cluster_spec` can be either passed by the argument or parsed from the "TF_CONFIG" environment variable. Example of a TF_CONFIG: ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster}) ``` If `cluster_spec` is not given in any format, it becomes local training and this coordinator will connect to a local session. For evaluation, if "evaluator" exists in the cluster_spec, a separate thread will be created to call `eval_fn` with its `task_type` set to "evaluator". If `eval_fn` is not defined, fall back to `worker_fn`. This implies that evaluation will be done on a single machine if there is an "evaluator" task. If "evaluator" doesn't exist in the cluster_spec, it entirely depends on the `worker_fn` for how to do evaluation. Args: worker_fn: the function to be called. The function should accept a `strategy` object and will be given access to a context object via a context manager scope. strategy: a DistributionStrategy object specifying whether it should run between-graph replicated training or not, whether to run init ops, etc. This object will also be configured given `session_config`, `cluster_spec`, `task_type` and `task_id`. eval_fn: optional function for "evaluator" task. If `eval_fn` is not passed in but a "evaluator" task is found in the `cluster_spec`, the `worker_fn` will be used for this task. eval_strategy: optional DistributionStrategy object for "evaluator" task. cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles in a cluster. If not set or empty, fall back to local training. task_type: the current task type, optional if this is a client. task_id: the current task id, optional if this is a client. session_config: an optional `tf.compat.v1.ConfigProto` object which will be passed to `strategy`'s `configure` method and used to create a session. rpc_layer: optional string, the protocol for RPC, e.g. "grpc". Raises: ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or a ClusterSpec. Returns: In the client job, return the value returned by `worker_fn` if it is in-graph replication or INDEPENDENT_WORKER mode; return None otherwise. """ tf_config = json.loads(os.environ.get("TF_CONFIG", "{}")) rpc_layer = tf_config.get("rpc_layer", rpc_layer) environment = tf_config.get("environment", None) if not cluster_spec: cluster_spec = tf_config.get("cluster", {}) task_env = tf_config.get("task", {}) if task_env: task_type = task_env.get("type", task_type) task_id = int(task_env.get("index", task_id)) if cluster_spec: # TODO(yuefengz): validate cluster_spec. cluster_spec = normalize_cluster_spec(cluster_spec) elif hasattr(strategy.extended, "_cluster_resolver"): cluster_resolver = strategy.extended._cluster_resolver task_type = cluster_resolver.task_type task_id = cluster_resolver.task_id rpc_layer = cluster_resolver.rpc_layer or rpc_layer environment = cluster_resolver.environment cluster_spec = cluster_resolver.cluster_spec() # Setting the session config is necessary for some strategies such as # CollectiveAllReduceStrategy. session_config = session_config or tf.compat.v1.ConfigProto( allow_soft_placement=True ) if cluster_spec: logging.info( "Running Distribute Coordinator with cluster_spec = %r, " "task_type = %r, task_id = %r, environment = %r, rpc_layer = %r", cluster_spec.as_dict(), task_type, task_id, environment, rpc_layer, ) if not cluster_spec: # `mode` is ignored in the local case. logging.info("Running local Distribute Coordinator.") _run_single_worker( worker_fn, strategy, None, None, None, session_config, rpc_layer ) if eval_fn: _run_single_worker( eval_fn, eval_strategy, None, None, None, session_config, rpc_layer, ) else: logging.warning( "Skipped evaluation since `eval_fn` is not passed in." ) else: if not eval_fn: logging.warning( "`eval_fn` is not passed in. The `worker_fn` will be " 'used if an "evaluator" task exists in the cluster.' ) eval_fn = eval_fn or worker_fn if not eval_strategy: logging.warning( "`eval_strategy` is not passed in. No distribution " "strategy will be used for evaluation." ) # Every one starts a standard server, get session config from # `configure` method. _configure_session_config_for_std_servers( strategy, eval_strategy, session_config, cluster_spec, task_type, task_id, ) if task_type != _TaskType.EVALUATOR and not getattr( strategy.extended, "_std_server_started", False ): # Right now, with eager mode, context is configured with a std # server at the very beginning while with graph mode the std server # is started when distribute coordinator is called. We should # consolidate these two paths. server = _run_std_server( cluster_spec=cluster_spec, task_type=task_type, task_id=task_id, session_config=session_config, rpc_layer=rpc_layer, environment=environment, ) if task_type in [_TaskType.CHIEF, _TaskType.WORKER]: if strategy.extended.experimental_between_graph: # All jobs run `worker_fn` if between-graph. return _run_single_worker( worker_fn, strategy, cluster_spec, task_type, task_id, session_config, rpc_layer, ) else: # Only one node runs `worker_fn` if in-graph. context = _WorkerContext( strategy, cluster_spec, task_type, task_id ) if context.is_chief: return _run_single_worker( worker_fn, strategy, cluster_spec, None, None, session_config, rpc_layer, ) else: server.join() elif task_type == _TaskType.EVALUATOR: return _run_single_worker( eval_fn, eval_strategy, cluster_spec, task_type, task_id, session_config, rpc_layer, ) else: if task_type != _TaskType.PS: raise ValueError(f"Unexpected task_type: {task_type!r}") server.join() def normalize_cluster_spec(cluster_spec): """Makes `cluster_spec` into a `ClusterSpec` object. Args: cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. Returns: a `ClusterSpec` object. Raises: ValueError: if `cluster_spec` is not a dict or a `ClusterSpec` or a `ClusterDef`. """ if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)): return tf.train.ClusterSpec(cluster_spec) elif not isinstance(cluster_spec, tf.train.ClusterSpec): raise ValueError( "`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a " "`tf.train.ClusterDef` object" ) return cluster_spec
tf-keras/tf_keras/distribute/distribute_coordinator_utils.py/0
{ "file_path": "tf-keras/tf_keras/distribute/distribute_coordinator_utils.py", "repo_id": "tf-keras", "token_count": 11941 }
155
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for saving and loading using keras save/load APIs with DS.""" import tensorflow.compat.v2 as tf from tf_keras.distribute import saved_model_test_base as test_base from tf_keras.saving.legacy import save from tf_keras.testing_infra import test_utils @test_utils.run_all_without_tensor_float_32( "Uses Dense layers, which call matmul" ) class KerasSaveLoadTest(test_base.TestSavedModelBase): def setUp(self): self._root_dir = "keras_save_load" super().setUp() def _save_model(self, model, saved_dir): model.save(saved_dir, save_format="tf") def _load_and_run_model( self, distribution, saved_dir, predict_dataset, output_name="output_1" ): restored_keras_model = save.load_model(saved_dir) return restored_keras_model.predict( predict_dataset, steps=test_base.PREDICT_STEPS ) @tf.__internal__.distribute.combinations.generate( test_base.simple_models_with_strategies() ) def test_save_no_strategy_restore_strategy( self, model_and_input, distribution ): self.run_test_save_no_strategy_restore_strategy( model_and_input, distribution ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( test_base.simple_models_with_strategies(), tf.__internal__.test.combinations.combine( save_in_scope=[True, False] ), ) ) def test_save_strategy_restore_no_strategy( self, model_and_input, distribution, save_in_scope ): self.run_test_save_strategy_restore_no_strategy( model_and_input, distribution, save_in_scope ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( test_base.simple_models_with_strategy_pairs(), tf.__internal__.test.combinations.combine( save_in_scope=[True, False] ), ) ) def test_save_strategy_restore_strategy( self, model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope, ): self.run_test_save_strategy_restore_strategy( model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope, ) if __name__ == "__main__": tf.compat.v1.enable_eager_execution() tf.test.main()
tf-keras/tf_keras/distribute/keras_save_load_test.py/0
{ "file_path": "tf-keras/tf_keras/distribute/keras_save_load_test.py", "repo_id": "tf-keras", "token_count": 1306 }
156
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for saving and loading using tf's saved_model APIs with DS.""" import os import tensorflow.compat.v2 as tf from tf_keras.distribute import model_combinations from tf_keras.distribute import saved_model_test_base as test_base from tf_keras.testing_infra import test_utils @test_utils.run_v2_only @test_utils.run_all_without_tensor_float_32( "Uses Dense layers, which call matmul" ) class SavedModelKerasModelTest(test_base.TestSavedModelBase): def setUp(self): self._root_dir = "saved_model_save_load" super().setUp() def _save_model(self, model, saved_dir): tf.saved_model.save(model, saved_dir) def _load_and_run_model( self, distribution, saved_dir, predict_dataset, output_name="output_1" ): return test_base.load_and_run_with_saved_model_api( distribution, saved_dir, predict_dataset, output_name ) @tf.__internal__.distribute.combinations.generate( test_base.simple_models_with_strategies() ) def test_save_no_strategy_restore_strategy( self, model_and_input, distribution ): self.run_test_save_no_strategy_restore_strategy( model_and_input, distribution ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( test_base.simple_models_with_strategies(), tf.__internal__.test.combinations.combine( save_in_scope=[True, False] ), ) ) def test_save_strategy_restore_no_strategy( self, model_and_input, distribution, save_in_scope ): self.run_test_save_strategy_restore_no_strategy( model_and_input, distribution, save_in_scope ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( test_base.simple_models_with_strategy_pairs(), tf.__internal__.test.combinations.combine( save_in_scope=[True, False] ), ) ) def test_save_strategy_restore_strategy( self, model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope, ): self.run_test_save_strategy_restore_strategy( model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope, ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( test_base.simple_models_with_strategies(), tf.__internal__.test.combinations.combine( save_in_scope=[True, False] ), ) ) def test_no_variable_device_placement( self, model_and_input, distribution, save_in_scope ): saved_dir = self.run_test_save_strategy( model_and_input, distribution, save_in_scope ) func = tf.saved_model.load(saved_dir) concrete_function = func.signatures[test_base._DEFAULT_FUNCTION_KEY] for f in concrete_function.graph.as_graph_def().library.function: for n in f.node_def: if n.op == "ReadVariableOp": self.assertEmpty(n.device) @test_utils.run_v2_only class SavedModelTFModuleTest(test_base.TestSavedModelBase): def setUp(self): self._root_dir = "saved_model_save_load" super().setUp() def _train_model(self, model, x_train, y_train, batch_size): pass def _predict_with_model(self, distribution, model, predict_dataset): if distribution: dist_predict_dataset = distribution.experimental_distribute_dataset( predict_dataset ) per_replica_predict_data = next(iter(dist_predict_dataset)) result = distribution.run(model, args=(per_replica_predict_data,)) # Convert the per_replica value to a list, then concatenate them reduced = distribution.experimental_local_results(result) concat = tf.concat(reduced, 0) return concat else: return model(next(iter(predict_dataset))) def _save_model(self, model, saved_dir): call = model.__call__.get_concrete_function(tf.TensorSpec(None)) tf.saved_model.save(model, saved_dir, signatures=call) def _load_and_run_model( self, distribution, saved_dir, predict_dataset, output_name="output_1" ): del output_name model = tf.saved_model.load(saved_dir) return self._predict_with_model(distribution, model, predict_dataset) @tf.__internal__.distribute.combinations.generate( test_base.tfmodule_models_with_strategies() ) def test_save_no_strategy_restore_strategy( self, model_and_input, distribution ): self.run_test_save_no_strategy_restore_strategy( model_and_input, distribution ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( test_base.tfmodule_models_with_strategies(), tf.__internal__.test.combinations.combine( save_in_scope=[True, False] ), ) ) def test_save_strategy_restore_no_strategy( self, model_and_input, distribution, save_in_scope ): self.run_test_save_strategy_restore_no_strategy( model_and_input, distribution, save_in_scope ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( test_base.tfmodule_models_with_strategy_pairs(), tf.__internal__.test.combinations.combine( save_in_scope=[True, False] ), ) ) def test_save_strategy_restore_strategy( self, model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope, ): self.run_test_save_strategy_restore_strategy( model_and_input, distribution_for_saving, distribution_for_restoring, save_in_scope, ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( model_and_input=[model_combinations.simple_tfmodule_model], distribution=test_base.strategies + [tf.__internal__.distribute.combinations.cloud_tpu_strategy], ) ) def test_save_load_io_device(self, model_and_input, distribution): saved_dir = os.path.join(self.get_temp_dir(), "io_device") with distribution.scope(): model = model_and_input.get_model() x_train, y_train, _ = model_and_input.get_data() batch_size = model_and_input.get_batch_size() self._train_model(model, x_train, y_train, batch_size) call = model.__call__.get_concrete_function(tf.TensorSpec(None)) save_options = tf.saved_model.SaveOptions( experimental_io_device="/job:localhost" ) tf.saved_model.save( model, saved_dir, signatures=call, options=save_options ) load_options = tf.saved_model.LoadOptions( experimental_io_device="/job:localhost" ) # Check that the model can be loaded and training continued without # error. with distribution.scope(): loaded_model = tf.saved_model.load(saved_dir, options=load_options) self._train_model(loaded_model, x_train, y_train, batch_size) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/distribute/saved_model_save_load_test.py/0
{ "file_path": "tf-keras/tf_keras/distribute/saved_model_save_load_test.py", "repo_id": "tf-keras", "token_count": 3674 }
157
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Lazily initialized variables, useful for creating symbolic TF-Keras model.""" import threading # isort: off from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.framework import tensor from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.trackable import base as trackable from tensorflow.python.util import compat from tensorflow.python.util import tf_contextlib _DISABLE_LAZY_VARIABLE_INIT = threading.local() def _infer_shape_dtype_and_create_handle(initial_value, shape, dtype, name): """Infer shape and dtype from initial_value and create a variable handle.""" with ops.name_scope(name, "Variable", skip_on_eager=False) as name: handle_name = ops.name_from_scope_name(name) unique_id = "%s_%d" % (handle_name, ops.uid()) # Use attr_scope and device(None) to simulate the behavior of # colocate_with when the variable we want to colocate with doesn't # yet exist. device_context_manager = ops.NullContextmanager attr = attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue( s=[compat.as_bytes(f"loc:@{handle_name}")] ) ) with ops.get_default_graph()._attr_scope({"_class": attr}): with ops.name_scope("Initializer"), device_context_manager(None): if not callable(initial_value): if isinstance( initial_value, trackable.CheckpointInitialValue ): raise NotImplementedError( "CheckpointInitialValue is not supported to be the " "initial value of a lazy variable." ) initial_value = ops.convert_to_tensor( initial_value, name="initial_value", dtype=dtype ) assert not callable(initial_value) assert initial_value.shape.is_compatible_with(shape) dtype = dtype or initial_value.dtype.base_dtype shape = shape or initial_value.shape assert dtype assert shape handle = ( resource_variable_ops._variable_handle_from_shape_and_dtype( shape=shape, dtype=dtype, shared_name=None, # Never shared name=name, graph_mode=False, initial_value=None, ) ) # initial_value=initial_value if not callable(initial_value) else # None) return initial_value, shape, dtype, handle, handle_name, unique_id class LazyInitVariable(resource_variable_ops.BaseResourceVariable): """Lazily initialized variables. The major use case for this class is to serve as a memory efficient alternative for tf.Variable. The resource handle of this class is point to nothing, which mean it will raise error when its value is fetched in a eager context. Having said that, it will perform like a normal tf.Variable when using with graph tensor, like KerasTensor produced from tf.keras.Input. """ def __init__( self, initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, dtype=None, variable_def=None, import_scope=None, constraint=None, distribute_strategy=None, synchronization=None, aggregation=None, shape=None, **kwargs, ): assert context.executing_eagerly() # To simplify the logic assert variable_def is None # Not supported yet. assert caching_device is None # Not supported yet if initial_value is None: raise ValueError( "The `initial_value` arg to `tf.Variable` must " "be specified except when you are not providing a " "`variable_def`. You provided neither." ) if ( isinstance(initial_value, tensor.Tensor) and hasattr(initial_value, "graph") and initial_value.graph.building_function ): raise ValueError( f"Argument `initial_value` ({initial_value}) could not " "be lifted out of a `tf.function`. " f"(Tried to create variable with name='{name}'). " "To avoid this error, when constructing `tf.Variable`s " "inside of `tf.function` you can create the " "`initial_value` tensor in a " "`tf.init_scope` or pass a callable `initial_value` " "(e.g., `tf.Variable(lambda : " "tf.truncated_normal([10, 40]))`). " "Please file a feature request if this " "restriction inconveniences you." ) if constraint is not None and not callable(constraint): raise ValueError( "Argument `constraint` must be None or a callable. " f"a callable. Got a {type(constraint)}: {constraint}" ) self._name = name ( initial_value, shape, dtype, handle, handle_name, unique_id, ) = _infer_shape_dtype_and_create_handle( initial_value, shape, dtype, name ) super().__init__( distribute_strategy=distribute_strategy, initial_value=initial_value, shape=shape, dtype=dtype, name=name, unique_id=unique_id, handle_name=handle_name, constraint=constraint, handle=handle, graph_element=None, trainable=trainable, synchronization=synchronization, aggregation=aggregation, in_graph_mode=False, ) # TODO(scottzhu): This method and create_and_initialize might be removed if # we decide to just use the tf.Variable to replace this class. def initialize(self): with ops.name_scope(self._name, "Variable", skip_on_eager=False): with ops.colocate_with(self._handle), ops.name_scope("Initializer"): if callable(self._initial_value): initial_value = self._initial_value() else: initial_value = self._initial_value if not initial_value.shape.is_compatible_with(self._shape): raise ValueError( "In this `tf.Variable` creation, the initial value's " f"shape ({initial_value.shape}) is not compatible with " "the explicitly supplied `shape` " f"argument ({self._shape})." ) assert self._dtype is initial_value.dtype.base_dtype gen_resource_variable_ops.assign_variable_op( self._handle, initial_value ) def create_and_initialize(self): if callable(self._initial_value): initial_value = self._initial_value() with ops.device(initial_value.device): ( initial_value, shape, dtype, handle, handle_name, unique_id, ) = _infer_shape_dtype_and_create_handle( initial_value, self._shape, self._dtype, self._name ) self.initialize() super().__init__( trainable=self._trainable, shape=shape, dtype=dtype, handle=handle, synchronization=self._synchronization, constraint=self._constraint, aggregation=self._aggregation, distribute_strategy=self._distribute_strategy, name=self._name, unique_id=unique_id, handle_name=handle_name, graph_element=None, initial_value=initial_value, initializer_op=None, is_initialized_op=None, cached_value=None, caching_device=None, ) def _lazy_init_variable_creator(next_creator, **kwargs): if getattr(_DISABLE_LAZY_VARIABLE_INIT, "disabled", False): return next_creator(**kwargs) else: return LazyInitVariable(**kwargs) @tf_contextlib.contextmanager def lazy_init_scope(): with variable_scope.variable_creator_scope(_lazy_init_variable_creator): yield @tf_contextlib.contextmanager def disable_init_variable_creator(): try: global _DISABLE_LAZY_VARIABLE_INIT existing_value = getattr(_DISABLE_LAZY_VARIABLE_INIT, "disabled", False) _DISABLE_LAZY_VARIABLE_INIT.disabled = True yield finally: _DISABLE_LAZY_VARIABLE_INIT.disabled = existing_value
tf-keras/tf_keras/dtensor/lazy_variable.py/0
{ "file_path": "tf-keras/tf_keras/dtensor/lazy_variable.py", "repo_id": "tf-keras", "token_count": 4565 }
158
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the base ProcessingLayer and a subclass that uses Combiners.""" import abc import tensorflow.compat.v2 as tf from tf_keras.engine import data_adapter from tf_keras.engine.base_layer import Layer from tf_keras.utils import version_utils # isort: off from tensorflow.python.eager import context from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls @keras_export("keras.layers.experimental.preprocessing.PreprocessingLayer") class PreprocessingLayer(Layer, metaclass=abc.ABCMeta): """Base class for Preprocessing Layers. **Don't use this class directly: it's an abstract base class!** You may be looking for one of the many built-in [preprocessing layers](https://keras.io/guides/preprocessing_layers/) instead. Preprocessing layers are layers whose state gets computed before model training starts. They do not get updated during training. Most preprocessing layers implement an `adapt()` method for state computation. The `PreprocessingLayer` class is the base class you would subclass to implement your own preprocessing layers. """ _must_restore_from_config = True def __init__(self, **kwargs): super().__init__(**kwargs) self._is_compiled = False self._is_adapted = False # Sets `is_adapted=False` when `reset_state` is called. self._reset_state_impl = self.reset_state self.reset_state = self._reset_state_wrapper self._adapt_function = None @property def is_adapted(self): """Whether the layer has been fit to data already.""" return self._is_adapted @doc_controls.do_not_generate_docs def update_state(self, data): """Accumulates statistics for the preprocessing layer. Arguments: data: A mini-batch of inputs to the layer. """ raise NotImplementedError @doc_controls.do_not_generate_docs def reset_state(self): """Resets the statistics of the preprocessing layer.""" raise NotImplementedError @doc_controls.do_not_generate_docs def finalize_state(self): """Finalize the statistics for the preprocessing layer. This method is called at the end of `adapt` or after restoring a serialized preprocessing layer's state. This method handles any one-time operations that should occur on the layer's state before `Layer.__call__`. """ pass @doc_controls.do_not_generate_docs def make_adapt_function(self): """Creates a function to execute one step of `adapt`. This method can be overridden to support custom adapt logic. This method is called by `PreprocessingLayer.adapt`. Typically, this method directly controls `tf.function` settings, and delegates the actual state update logic to `PreprocessingLayer.update_state`. This function is cached the first time `PreprocessingLayer.adapt` is called. The cache is cleared whenever `PreprocessingLayer.compile` is called. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, retrieve a batch, and update the state of the layer. """ if self._adapt_function is not None: return self._adapt_function def adapt_step(iterator): data = next(iterator) self._adapt_maybe_build(data) self.update_state(data) if self._steps_per_execution.numpy().item() == 1: adapt_fn = adapt_step else: def adapt_fn(iterator): for _ in tf.range(self._steps_per_execution): adapt_step(iterator) if not self._run_eagerly: adapt_fn = tf.function(adapt_fn) self._adapt_function = adapt_fn return self._adapt_function def compile(self, run_eagerly=None, steps_per_execution=None): """Configures the layer for `adapt`. Arguments: run_eagerly: Bool. If `True`, this `Model`'s logic will not be wrapped in a `tf.function`. Recommended to leave this as `None` unless your `Model` cannot be run inside a `tf.function`. Defaults to `False`. steps_per_execution: Int. The number of batches to run during each `tf.function` call. Running multiple batches inside a single `tf.function` call can greatly improve performance on TPUs or small models with a large Python overhead. Defaults to `1`. """ if steps_per_execution is None: steps_per_execution = 1 self._configure_steps_per_execution(steps_per_execution) if run_eagerly is None: run_eagerly = self.dynamic self._run_eagerly = run_eagerly self._is_compiled = True def adapt(self, data, batch_size=None, steps=None): """Fits the state of the preprocessing layer to the data being passed. After calling `adapt` on a layer, a preprocessing layer's state will not update during training. In order to make preprocessing layers efficient in any distribution context, they are kept constant with respect to any compiled `tf.Graph`s that call the layer. This does not affect the layer use when adapting each layer only once, but if you adapt a layer multiple times you will need to take care to re-compile any compiled functions as follows: * If you are adding a preprocessing layer to a `keras.Model`, you need to call `model.compile` after each subsequent call to `adapt`. * If you are calling a preprocessing layer inside `tf.data.Dataset.map`, you should call `map` again on the input `tf.data.Dataset` after each `adapt`. * If you are using a `tf.function` directly which calls a preprocessing layer, you need to call `tf.function` again on your callable after each subsequent call to `adapt`. `tf.keras.Model` example with multiple adapts: >>> layer = tf.keras.layers.Normalization( ... axis=None) >>> layer.adapt([0, 2]) >>> model = tf.keras.Sequential(layer) >>> model.predict([0, 1, 2]) array([-1., 0., 1.], dtype=float32) >>> layer.adapt([-1, 1]) >>> model.compile() # This is needed to re-compile model.predict! >>> model.predict([0, 1, 2]) array([0., 1., 2.], dtype=float32) `tf.data.Dataset` example with multiple adapts: >>> layer = tf.keras.layers.Normalization( ... axis=None) >>> layer.adapt([0, 2]) >>> input_ds = tf.data.Dataset.range(3) >>> normalized_ds = input_ds.map(layer) >>> list(normalized_ds.as_numpy_iterator()) [array([-1.], dtype=float32), array([0.], dtype=float32), array([1.], dtype=float32)] >>> layer.adapt([-1, 1]) >>> normalized_ds = input_ds.map(layer) # Re-map over the input dataset. >>> list(normalized_ds.as_numpy_iterator()) [array([0.], dtype=float32), array([1.], dtype=float32), array([2.], dtype=float32)] `adapt()` is meant only as a single machine utility to compute layer state. To analyze a dataset that cannot fit on a single machine, see [Tensorflow Transform]( https://www.tensorflow.org/tfx/transform/get_started) for a multi-machine, map-reduce solution. Arguments: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. batch_size: Integer or `None`. Number of samples per state update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of datasets, generators, or `keras.utils.Sequence` instances (since they generate batches). steps: Integer or `None`. Total number of steps (batches of samples) When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and 'steps' is None, the epoch will run until the input dataset is exhausted. When passing an infinitely repeating dataset, you must specify the `steps` argument. This argument is not supported with array inputs. """ _disallow_inside_tf_function("adapt") if not version_utils.should_use_v2(): raise RuntimeError("`adapt` is only supported in tensorflow v2.") if not self._is_compiled: self.compile() # Compile with defaults. if self.built: self.reset_state() data_handler = data_adapter.DataHandler( data, batch_size=batch_size, steps_per_epoch=steps, epochs=1, steps_per_execution=self._steps_per_execution, distribute=False, ) self._adapt_function = self.make_adapt_function() for _, iterator in data_handler.enumerate_epochs(): with data_handler.catch_stop_iteration(): for _ in data_handler.steps(): self._adapt_function(iterator) if data_handler.should_sync: context.async_wait() self.finalize_state() self._is_adapted = True def _reset_state_wrapper(self): """Calls `reset_state` and sets `adapted` to `False`.""" self._reset_state_impl() self._is_adapted = False @tf.__internal__.tracking.no_automatic_dependency_tracking def _configure_steps_per_execution(self, steps_per_execution): self._steps_per_execution = tf.Variable( steps_per_execution, dtype="int64", aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, ) # TODO(omalleyt): Unify this logic with `Layer._maybe_build`. def _adapt_maybe_build(self, data): if not self.built: try: # If this is a Numpy array or tensor, we can get shape from # .shape. If not, an attribute error will be thrown. data_shape = data.shape data_shape_nones = tuple([None] * len(data.shape)) except AttributeError: # The input has an unknown number of dimensions. data_shape = None data_shape_nones = None # TODO (b/159261555): move this to base layer build. batch_input_shape = getattr(self, "_batch_input_shape", None) if batch_input_shape is None: # Set the number of dimensions. self._batch_input_shape = data_shape_nones self.build(data_shape) self.built = True def _disallow_inside_tf_function(method_name): """Disallow calling a method inside a `tf.function`.""" if tf.inside_function(): error_msg = ( "Detected a call to `PreprocessingLayer.{method_name}` inside a " "`tf.function`. `PreprocessingLayer.{method_name} is a high-level " "endpoint that manages its own `tf.function`. Please move the call " "to `PreprocessingLayer.{method_name}` outside of all enclosing " "`tf.function`s. Note that you can call a `PreprocessingLayer` " "directly on `Tensor`s inside a `tf.function` like: `layer(x)`, " "or update its state like: `layer.update_state(x)`." ).format(method_name=method_name) raise RuntimeError(error_msg)
tf-keras/tf_keras/engine/base_preprocessing_layer.py/0
{ "file_path": "tf-keras/tf_keras/engine/base_preprocessing_layer.py", "repo_id": "tf-keras", "token_count": 5111 }
159
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras import metrics as metrics_module from tf_keras.optimizers.legacy import rmsprop from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils class TrainingTest(test_combinations.TestCase): @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_dynamic_model_has_trainable_weights(self): if not tf.executing_eagerly(): # Only test Eager modes, as Graph mode is not relevant for dynamic # models. return class DynamicModel(keras.Model): def __init__(self): super().__init__(dynamic=True) self.dense = keras.layers.Dense( 1, kernel_initializer="zeros", bias_initializer="ones" ) def call(self, inputs): return self.dense(inputs) model = DynamicModel() model.compile("rmsprop", "mae", run_eagerly=True) hist = model.fit(np.zeros((1, 1)), np.zeros((1, 1))) self.assertEqual(hist.history["loss"][-1], 1) self.assertEqual(len(model.trainable_weights), 2) loss = model.train_on_batch(np.zeros((1, 1)), np.zeros((1, 1))) # The loss must have been updated if the trainable weights are taken # into account during tracking. self.assertLess(loss, 1) @test_combinations.run_with_all_model_types(exclude_models="sequential") @test_combinations.run_all_keras_modes def test_model_methods_with_eager_tensors_multi_io(self): if not tf.executing_eagerly(): # Only test V2 Function and V2 Eager modes, as V1 Graph mode with # symbolic tensors has different requirements. return input_a = keras.layers.Input(shape=(3,), name="input_a") input_b = keras.layers.Input(shape=(3,), name="input_b") dense = keras.layers.Dense(4, name="dense") dropout = keras.layers.Dropout(0.5, name="dropout") model = test_utils.get_multi_io_model( [input_a, dense], [input_b, dense, dropout] ) optimizer = rmsprop.RMSprop(learning_rate=0.001) loss = "mse" loss_weights = [1.0, 0.5] metrics = ["mae", metrics_module.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics, loss_weights=loss_weights, run_eagerly=test_utils.should_run_eagerly(), sample_weight_mode=None, ) input_a = tf.zeros(shape=(10, 3)) input_b = tf.zeros(shape=(10, 3)) target_a = tf.zeros(shape=(10, 4)) target_b = tf.zeros(shape=(10, 4)) model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0, ) # Test: no shuffle. model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0, shuffle=False, ) # Test: validation data. model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=2, verbose=0, validation_data=([input_a, input_b], [target_a, target_b]), ) model.train_on_batch([input_a, input_b], [target_a, target_b]) model.predict([input_a, input_b], batch_size=5) model.evaluate( [input_a, input_b], [target_a, target_b], batch_size=2, verbose=0 ) model.test_on_batch([input_a, input_b], [target_a, target_b]) # Test: mix np and tensors. input_b = np.zeros(shape=(10, 3)).astype("float32") target_b = np.zeros(shape=(10, 4)).astype("float32") model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0, ) model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=2, verbose=0, validation_data=([input_a, input_b], [target_a, target_b]), ) model.fit( [input_a, input_b], [target_a, target_b], epochs=1, batch_size=5, verbose=0, shuffle=False, ) model.train_on_batch([input_a, input_b], [target_a, target_b]) model.predict([input_a, input_b], batch_size=5) model.evaluate( [input_a, input_b], [target_a, target_b], batch_size=2, verbose=0 ) model.test_on_batch([input_a, input_b], [target_a, target_b]) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_model_methods_with_eager_tensors_single_io(self): if not tf.executing_eagerly(): # Only test V2 Function and V2 Eager modes, as V1 Graph mode with # symbolic tensors has different requirements. return model = test_utils.get_small_mlp(10, 4, 3) optimizer = rmsprop.RMSprop(learning_rate=0.001) loss = "mse" metrics = ["mae", metrics_module.CategoricalAccuracy()] model.compile( optimizer, loss, metrics=metrics, run_eagerly=test_utils.should_run_eagerly(), ) inputs = tf.zeros(shape=(10, 3)) targets = tf.zeros(shape=(10, 4)) model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0) model.fit( inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False ) model.fit( inputs, targets, epochs=1, batch_size=4, verbose=0, validation_data=(inputs, targets), ) model.evaluate(inputs, targets, batch_size=2, verbose=0) model.predict(inputs, batch_size=2) model.train_on_batch(inputs, targets) model.test_on_batch(inputs, targets) @test_combinations.run_with_all_model_types def test_model_fit_and_validation_with_missing_arg_errors(self): model = test_utils.get_small_mlp(10, 4, 3) model.compile( optimizer=rmsprop.RMSprop(learning_rate=0.001), loss="mse", run_eagerly=True, ) x = tf.zeros(shape=(10, 3)) y = tf.zeros(shape=(10, 4)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat(10).batch(5) validation_dataset = ( tf.data.Dataset.from_tensor_slices((x, y)).repeat().batch(5) ) # Infinite dataset. model.fit(dataset, epochs=1, verbose=0) # Step argument is required for infinite datasets. with self.assertRaises(ValueError): model.fit( dataset, steps_per_epoch=2, epochs=1, verbose=0, validation_data=validation_dataset, ) with self.assertRaises(ValueError): model.fit( dataset, steps_per_epoch=2, epochs=1, verbose=0, validation_data=validation_dataset, ) # TODO(b/120931266): Enable test on subclassed models after bug causing an # extra dimension to be added to predict outputs is fixed. @test_combinations.run_with_all_model_types(exclude_models="subclass") def test_generator_methods(self): model = test_utils.get_small_mlp(10, 4, 3) optimizer = rmsprop.RMSprop(learning_rate=0.001) model.compile( optimizer, loss="mse", metrics=["mae", metrics_module.CategoricalAccuracy()], run_eagerly=True, ) x = np.random.random((10, 3)) y = np.random.random((10, 4)) def numpy_iterator(): while True: yield x, y model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1) model.evaluate_generator(numpy_iterator(), steps=3) def inference_numpy_iterator(): while True: yield x out = model.predict_generator(inference_numpy_iterator(), steps=3) self.assertEqual(out.shape, (30, 4)) class CorrectnessTest(test_combinations.TestCase): @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes @parameterized.named_parameters( [ ("", dict()), ("_clipvalue_inf", {"clipvalue": 999999}), ("_clipnorm_inf", {"clipnorm": 999999}), ] ) def test_loss_correctness(self, optimizer_kwargs): # Test that training loss is the same in eager and graph # (by comparing it to a reference value in a deterministic case) layers = [ keras.layers.Dense(3, activation="relu", kernel_initializer="ones"), keras.layers.Dense( 2, activation="softmax", kernel_initializer="ones" ), ] model = test_utils.get_model_from_layers(layers, input_shape=(4,)) model.compile( loss="sparse_categorical_crossentropy", optimizer=rmsprop.RMSprop(learning_rate=0.001, **optimizer_kwargs), run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((100, 4)) np.random.seed(123) y = np.random.randint(0, 1, size=(100, 1)) history = model.fit(x, y, epochs=1, batch_size=10) self.assertAlmostEqual(history.history["loss"][-1], 0.5836, 4) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_loss_correctness_clipvalue_zero(self): # Test that training loss is the same in eager and graph # (by comparing it to a reference value in a deterministic case) # And confirm that setting clipvalue to zero stops all training layers = [ keras.layers.Dense(3, activation="relu", kernel_initializer="ones"), keras.layers.Dense( 2, activation="softmax", kernel_initializer="ones" ), ] model = test_utils.get_model_from_layers(layers, input_shape=(4,)) model.compile( loss="sparse_categorical_crossentropy", optimizer=rmsprop.RMSprop(learning_rate=0.001, clipvalue=0.0), run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((100, 4)) np.random.seed(123) y = np.random.randint(0, 1, size=(100, 1)) history = model.fit(x, y, epochs=3, batch_size=10) self.assertAlmostEqual(history.history["loss"][-3], 0.6931, 4) self.assertAlmostEqual(history.history["loss"][-2], 0.6931, 4) self.assertAlmostEqual(history.history["loss"][-1], 0.6931, 4) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_loss_correctness_with_iterator(self): # Test that training loss is the same in eager and graph # (by comparing it to a reference value in a deterministic case) layers = [ keras.layers.Dense(3, activation="relu", kernel_initializer="ones"), keras.layers.Dense( 2, activation="softmax", kernel_initializer="ones" ), ] model = test_utils.get_model_from_layers(layers, input_shape=(4,)) model.compile( loss="sparse_categorical_crossentropy", optimizer=rmsprop.RMSprop(learning_rate=0.001), run_eagerly=test_utils.should_run_eagerly(), ) x = np.ones((100, 4), dtype=np.float32) np.random.seed(123) y = np.random.randint(0, 1, size=(100, 1)) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.repeat(100) dataset = dataset.batch(10) history = model.fit(dataset, epochs=1, steps_per_epoch=10) self.assertAlmostEqual(history.history["loss"][-1], 0.5836, 4) @parameterized.named_parameters( [ ("_None", None, 0.0, 4.0), ("_False", False, 4.0, 4.0), ("_True", True, 0.0, 0.0), ] ) def test_nested_model_learning_phase( self, training, expected_training_loss, expected_validation_loss ): """Tests learning phase is correctly set in an intermediate layer.""" def _make_unregularized_model(): inputs = keras.Input((4,)) # Zero out activations when `training=True`. x = keras.layers.Dropout(1.0 - 1.0 / (1 << 24))(inputs) x = keras.layers.Dense( 10, activation="relu", trainable=False, bias_initializer="zeros", kernel_initializer="ones", )( x ) # Just sum together all the activations. outputs = keras.layers.Dense(3)(x) return keras.Model(inputs, outputs) def _regularize_model(unregularized_model): # Regularize the most recent activations of a post-dropout layer. sample_activations = unregularized_model.get_layer( index=-2 ).get_output_at(-1) regularization_loss = keras.backend.mean(sample_activations) unregularized_model.add_loss(regularization_loss) unregularized_model.add_metric( regularization_loss, aggregation="mean", name="regularization_loss", ) inputs = keras.Input(unregularized_model.inputs[0].shape[1:]) logits = unregularized_model(inputs, training=training) outputs = keras.activations.softmax(logits) model = keras.Model(inputs, outputs) return model # Make and compile models. model = _regularize_model(_make_unregularized_model()) model.compile("sgd", "sparse_categorical_crossentropy") # Prepare fake data. x = np.ones((20, 4)).astype(np.float32) y = np.random.randint(0, 3, size=(20,)).astype(np.int64) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2) results = model.evaluate(dataset) evaluation_results = dict(zip(model.metrics_names, results)) # Rate of dropout depends on the learning phase. self.assertEqual( evaluation_results["regularization_loss"], expected_validation_loss ) history = model.fit(dataset, epochs=2, validation_data=dataset).history self.assertAllEqual( history["regularization_loss"], [expected_training_loss] * 2 ) self.assertAllEqual( history["val_regularization_loss"], [expected_validation_loss] * 2 ) if __name__ == "__main__": tf.compat.v1.enable_eager_execution() tf.test.main()
tf-keras/tf_keras/engine/training_eager_test.py/0
{ "file_path": "tf-keras/tf_keras/engine/training_eager_test.py", "repo_id": "tf-keras", "token_count": 7636 }
160
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for inference-only model/layer exporting utilities.""" import os import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.export import export_lib from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils def get_model(): layers = [ keras.layers.Dense(10, activation="relu"), keras.layers.BatchNormalization(), keras.layers.Dense(1, activation="sigmoid"), ] model = test_utils.get_model_from_layers(layers, input_shape=(10,)) return model @test_utils.run_v2_only class ExportArchiveTest(tf.test.TestCase, parameterized.TestCase): @test_combinations.run_with_all_model_types def test_standard_model_export(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = get_model() ref_input = tf.random.normal((3, 10)) ref_output = model(ref_input).numpy() export_lib.export_model(model, temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output, revived_model.serve(ref_input).numpy(), atol=1e-6 ) @test_combinations.run_with_all_model_types def test_low_level_model_export(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = get_model() ref_input = tf.random.normal((3, 10)) ref_output = model(ref_input).numpy() # Test variable tracking export_archive = export_lib.ExportArchive() export_archive.track(model) self.assertLen(export_archive.variables, 8) self.assertLen(export_archive.trainable_variables, 6) self.assertLen(export_archive.non_trainable_variables, 2) @tf.function() def my_endpoint(x): return model(x) # Test registering an endpoint that is a tf.function (called) my_endpoint(ref_input) # Trace fn export_archive.add_endpoint( "call", my_endpoint, ) export_archive.write_out(temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertFalse(hasattr(revived_model, "_tracked")) self.assertAllClose( ref_output, revived_model.call(ref_input).numpy(), atol=1e-6 ) self.assertLen(revived_model.variables, 8) self.assertLen(revived_model.trainable_variables, 6) self.assertLen(revived_model.non_trainable_variables, 2) # Test registering an endpoint that is NOT a tf.function export_archive = export_lib.ExportArchive() export_archive.track(model) export_archive.add_endpoint( "call", model.call, input_signature=[ tf.TensorSpec( shape=(None, 10), dtype=tf.float32, ) ], ) export_archive.write_out(temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output, revived_model.call(ref_input).numpy(), atol=1e-6 ) def test_layer_export(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_layer") layer = keras.layers.BatchNormalization() ref_input = tf.random.normal((3, 10)) ref_output = layer(ref_input).numpy() # Build layer (important) export_archive = export_lib.ExportArchive() export_archive.track(layer) export_archive.add_endpoint( "call", layer.call, input_signature=[ tf.TensorSpec( shape=(None, 10), dtype=tf.float32, ) ], ) export_archive.write_out(temp_filepath) revived_layer = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output, revived_layer.call(ref_input).numpy(), atol=1e-6 ) def test_multi_input_output_functional_model(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") x1 = keras.Input((2,)) x2 = keras.Input((2,)) y1 = keras.layers.Dense(3)(x1) y2 = keras.layers.Dense(3)(x2) model = keras.Model([x1, x2], [y1, y2]) ref_inputs = [tf.random.normal((3, 2)), tf.random.normal((3, 2))] ref_outputs = model(ref_inputs) export_archive = export_lib.ExportArchive() export_archive.track(model) export_archive.add_endpoint( "serve", model.call, input_signature=[ [ tf.TensorSpec( shape=(None, 2), dtype=tf.float32, ), tf.TensorSpec( shape=(None, 2), dtype=tf.float32, ), ] ], ) export_archive.write_out(temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_outputs[0].numpy(), revived_model.serve(ref_inputs)[0].numpy(), atol=1e-6, ) self.assertAllClose( ref_outputs[1].numpy(), revived_model.serve(ref_inputs)[1].numpy(), atol=1e-6, ) # Now test dict inputs model = keras.Model({"x1": x1, "x2": x2}, [y1, y2]) ref_inputs = { "x1": tf.random.normal((3, 2)), "x2": tf.random.normal((3, 2)), } ref_outputs = model(ref_inputs) export_archive = export_lib.ExportArchive() export_archive.track(model) export_archive.add_endpoint( "serve", model.call, input_signature=[ { "x1": tf.TensorSpec( shape=(None, 2), dtype=tf.float32, ), "x2": tf.TensorSpec( shape=(None, 2), dtype=tf.float32, ), } ], ) export_archive.write_out(temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_outputs[0].numpy(), revived_model.serve(ref_inputs)[0].numpy(), atol=1e-6, ) self.assertAllClose( ref_outputs[1].numpy(), revived_model.serve(ref_inputs)[1].numpy(), atol=1e-6, ) def test_model_with_keras_lookup_table(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") text_vectorization = keras.layers.TextVectorization() text_vectorization.adapt(["one two", "three four", "five six"]) model = keras.Sequential( [ text_vectorization, keras.layers.Embedding(10, 32), keras.layers.Dense(1), ] ) ref_input = tf.convert_to_tensor(["one two three four"]) ref_output = model(ref_input).numpy() export_lib.export_model(model, temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output, revived_model.serve(ref_input).numpy(), atol=1e-6 ) def test_model_with_tf_lookup_table(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") class MyVocabTable(keras.layers.Layer): def __init__(self, vocab): super().__init__() self.keys = [""] + vocab self.values = range(len(self.keys)) self.init = tf.lookup.KeyValueTensorInitializer( self.keys, self.values, key_dtype=tf.string, value_dtype=tf.int64, ) num_oov_buckets = 1 self.table = tf.lookup.StaticVocabularyTable( self.init, num_oov_buckets ) def call(self, x): result = self.table.lookup(x) if isinstance(result, tf.RaggedTensor): result = result.to_tensor() return result vocab_table = MyVocabTable(["a", "b", "c"]) vocab_table(tf.constant([""] + list("abcdefg"))) model = keras.Sequential([vocab_table]) ref_input = tf.constant([""] + list("abcdefg")) ref_output = model(ref_input) export_lib.export_model(model, temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output, revived_model.serve(ref_input).numpy(), atol=1e-6 ) def test_track_multiple_layers(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") layer_1 = keras.layers.Dense(2) ref_input_1 = tf.random.normal((3, 4)) ref_output_1 = layer_1(ref_input_1).numpy() layer_2 = keras.layers.Dense(3) ref_input_2 = tf.random.normal((3, 5)) ref_output_2 = layer_2(ref_input_2).numpy() export_archive = export_lib.ExportArchive() export_archive.add_endpoint( "call_1", layer_1.call, input_signature=[ tf.TensorSpec( shape=(None, 4), dtype=tf.float32, ), ], ) export_archive.add_endpoint( "call_2", layer_2.call, input_signature=[ tf.TensorSpec( shape=(None, 5), dtype=tf.float32, ), ], ) export_archive.write_out(temp_filepath) revived_layer = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output_1, revived_layer.call_1(ref_input_1).numpy(), atol=1e-6, ) self.assertAllClose( ref_output_2, revived_layer.call_2(ref_input_2).numpy(), atol=1e-6, ) def test_non_standard_layer_signature(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_layer") layer = keras.layers.MultiHeadAttention(2, 2) x1 = tf.random.normal((3, 2, 2)) x2 = tf.random.normal((3, 2, 2)) ref_output = layer(x1, x2).numpy() # Build layer (important) export_archive = export_lib.ExportArchive() export_archive.track(layer) export_archive.add_endpoint( "call", layer.call, input_signature=[ tf.TensorSpec( shape=(None, 2, 2), dtype=tf.float32, ), tf.TensorSpec( shape=(None, 2, 2), dtype=tf.float32, ), ], ) export_archive.write_out(temp_filepath) revived_layer = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output, revived_layer.call(query=x1, value=x2).numpy(), atol=1e-6, ) def test_variable_collection(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = keras.Sequential( [ keras.Input((10,)), keras.layers.Dense(2), keras.layers.Dense(2), ] ) # Test variable tracking export_archive = export_lib.ExportArchive() export_archive.track(model) export_archive.add_endpoint( "call", model.call, input_signature=[ tf.TensorSpec( shape=(None, 10), dtype=tf.float32, ) ], ) export_archive.add_variable_collection( "my_vars", model.layers[1].weights ) self.assertLen(export_archive.my_vars, 2) export_archive.write_out(temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertLen(revived_model.my_vars, 2) def test_export_model_errors(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") # Model has not been built model = keras.Sequential([keras.layers.Dense(2)]) with self.assertRaisesRegex(ValueError, "It must be built"): export_lib.export_model(model, temp_filepath) # Subclassed model has not been called class MyModel(keras.Model): def __init__(self, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(2) def build(self, input_shape): self.dense.build(input_shape) self.built = True def call(self, x): return self.dense(x) model = MyModel() model.build((2, 3)) with self.assertRaisesRegex(ValueError, "It must be called"): export_lib.export_model(model, temp_filepath) def test_export_archive_errors(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = keras.Sequential([keras.layers.Dense(2)]) model(tf.random.normal((2, 3))) # Endpoint name reuse export_archive = export_lib.ExportArchive() export_archive.track(model) export_archive.add_endpoint( "call", model.call, input_signature=[ tf.TensorSpec( shape=(None, 3), dtype=tf.float32, ) ], ) with self.assertRaisesRegex(ValueError, "already taken"): export_archive.add_endpoint( "call", model.call, input_signature=[ tf.TensorSpec( shape=(None, 3), dtype=tf.float32, ) ], ) # Write out with no endpoints export_archive = export_lib.ExportArchive() export_archive.track(model) with self.assertRaisesRegex(ValueError, "No endpoints have been set"): export_archive.write_out(temp_filepath) # Invalid object type with self.assertRaisesRegex(ValueError, "Invalid resource type"): export_archive = export_lib.ExportArchive() export_archive.track("model") # Set endpoint with no input signature export_archive = export_lib.ExportArchive() export_archive.track(model) with self.assertRaisesRegex( ValueError, "you must provide an `input_signature`" ): export_archive.add_endpoint( "call", model.call, ) # Set endpoint that has never been called export_archive = export_lib.ExportArchive() export_archive.track(model) @tf.function() def my_endpoint(x): return model(x) export_archive = export_lib.ExportArchive() export_archive.track(model) with self.assertRaisesRegex( ValueError, "you must either provide a function" ): export_archive.add_endpoint( "call", my_endpoint, ) def test_export_no_assets(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") # Case where there are legitimately no assets. model = keras.Sequential([keras.layers.Flatten()]) model(tf.random.normal((2, 3))) export_archive = export_lib.ExportArchive() export_archive.add_endpoint( "call", model.call, input_signature=[ tf.TensorSpec( shape=(None, 3), dtype=tf.float32, ) ], ) export_archive.write_out(temp_filepath) @test_combinations.run_with_all_model_types def test_model_export_method(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = get_model() ref_input = tf.random.normal((3, 10)) ref_output = model(ref_input).numpy() model.export(temp_filepath) revived_model = tf.saved_model.load(temp_filepath) self.assertAllClose( ref_output, revived_model.serve(ref_input).numpy(), atol=1e-6 ) @test_utils.run_v2_only class TestReloadedLayer(tf.test.TestCase, parameterized.TestCase): @test_combinations.run_with_all_model_types def test_reloading_export_archive(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = get_model() ref_input = tf.random.normal((3, 10)) ref_output = model(ref_input).numpy() export_lib.export_model(model, temp_filepath) reloaded_layer = export_lib.ReloadedLayer(temp_filepath) self.assertAllClose( reloaded_layer(ref_input).numpy(), ref_output, atol=1e-7 ) self.assertLen(reloaded_layer.weights, len(model.weights)) self.assertLen( reloaded_layer.trainable_weights, len(model.trainable_weights) ) self.assertLen( reloaded_layer.non_trainable_weights, len(model.non_trainable_weights), ) # Test fine-tuning new_model = keras.Sequential([reloaded_layer]) new_model.compile(optimizer="rmsprop", loss="mse") x = tf.random.normal((32, 10)) y = tf.random.normal((32, 1)) new_model.train_on_batch(x, y) new_output = reloaded_layer(ref_input).numpy() self.assertNotAllClose(new_output, ref_output, atol=1e-5) # Test that trainable can be set to False reloaded_layer.trainable = False new_model.compile(optimizer="rmsprop", loss="mse") x = tf.random.normal((32, 10)) y = tf.random.normal((32, 1)) new_model.train_on_batch(x, y) # The output must not have changed self.assertAllClose( reloaded_layer(ref_input).numpy(), new_output, atol=1e-7 ) @test_combinations.run_with_all_model_types def test_reloading_default_saved_model(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = get_model() ref_input = tf.random.normal((3, 10)) ref_output = model(ref_input).numpy() tf.saved_model.save(model, temp_filepath) reloaded_layer = export_lib.ReloadedLayer( temp_filepath, call_endpoint="serving_default" ) # The output is a dict, due to the nature of SavedModel saving. new_output = reloaded_layer(ref_input) self.assertAllClose( new_output[list(new_output.keys())[0]].numpy(), ref_output, atol=1e-7, ) self.assertLen(reloaded_layer.weights, len(model.weights)) self.assertLen( reloaded_layer.trainable_weights, len(model.trainable_weights) ) self.assertLen( reloaded_layer.non_trainable_weights, len(model.non_trainable_weights), ) def test_call_training(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") keras.utils.set_random_seed(1337) model = keras.Sequential( [ keras.Input((10,)), keras.layers.Dense(10), keras.layers.Dropout(0.99999), ] ) export_archive = export_lib.ExportArchive() export_archive.track(model) export_archive.add_endpoint( name="call_inference", fn=lambda x: model(x, training=False), input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)], ) export_archive.add_endpoint( name="call_training", fn=lambda x: model(x, training=True), input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)], ) export_archive.write_out(temp_filepath) reloaded_layer = export_lib.ReloadedLayer( temp_filepath, call_endpoint="call_inference", call_training_endpoint="call_training", ) inference_output = reloaded_layer( tf.random.normal((1, 10)), training=False ) training_output = reloaded_layer( tf.random.normal((1, 10)), training=True ) self.assertAllClose(np.mean(training_output), 0.0, atol=1e-7) self.assertNotAllClose(np.mean(inference_output), 0.0, atol=1e-7) @test_combinations.run_with_all_model_types def test_serialization(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = get_model() ref_input = tf.random.normal((3, 10)) ref_output = model(ref_input).numpy() export_lib.export_model(model, temp_filepath) reloaded_layer = export_lib.ReloadedLayer(temp_filepath) # Test reinstantiation from config config = reloaded_layer.get_config() rereloaded_layer = export_lib.ReloadedLayer.from_config(config) self.assertAllClose( rereloaded_layer(ref_input).numpy(), ref_output, atol=1e-7 ) # Test whole model saving with reloaded layer inside model = keras.Sequential([reloaded_layer]) temp_model_filepath = os.path.join(self.get_temp_dir(), "m.keras") model.save(temp_model_filepath, save_format="keras_v3") reloaded_model = keras.models.load_model( temp_model_filepath, custom_objects={"ReloadedLayer": export_lib.ReloadedLayer}, ) self.assertAllClose( reloaded_model(ref_input).numpy(), ref_output, atol=1e-7 ) def test_errors(self): # Test missing call endpoint temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") model = keras.Sequential([keras.Input((2,)), keras.layers.Dense(3)]) export_lib.export_model(model, temp_filepath) with self.assertRaisesRegex(ValueError, "The endpoint 'wrong'"): export_lib.ReloadedLayer(temp_filepath, call_endpoint="wrong") # Test missing call training endpoint with self.assertRaisesRegex(ValueError, "The endpoint 'wrong'"): export_lib.ReloadedLayer( temp_filepath, call_endpoint="serve", call_training_endpoint="wrong", ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/export/export_lib_test.py/0
{ "file_path": "tf-keras/tf_keras/export/export_lib_test.py", "repo_id": "tf-keras", "token_count": 11862 }
161
# Description: # Contains TF-Keras integration tests that verify with other TF high level APIs. # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test") load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") # buildifier: disable=same-origin-load load("@org_keras//tf_keras:tf_keras.bzl", "distribute_py_test") load("@org_keras//tf_keras:tf_keras.bzl", "tpu_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//tf_keras:friends", "//third_party/tensorflow/tools/pip_package:__pkg__", ], licenses = ["notice"], ) tf_py_test( name = "forwardprop_test", srcs = ["forwardprop_test.py"], python_version = "PY3", deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) tf_py_test( name = "function_test", srcs = ["function_test.py"], python_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) tf_py_test( name = "gradients_test", srcs = ["gradients_test.py"], python_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) cuda_py_test( name = "saved_model_test", srcs = ["saved_model_test.py"], python_version = "PY3", tags = [ "no_oss", # TODO(keras-team): Fails in OSS due to reference to `tf.keras` ], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) tf_py_test( name = "legacy_rnn_test", # Remove this target in when TF 1 is deprecated. srcs = ["legacy_rnn_test.py"], python_version = "PY3", tags = [ "no_oss", # TODO(keras-team): Fails in OSS cpu tests due to lazy loading of `keras` module. ], deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) tf_py_test( name = "module_test", srcs = ["module_test.py"], deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) tf_py_test( name = "vectorized_map_test", srcs = ["vectorized_map_test.py"], python_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) cuda_py_test( name = "gradient_checkpoint_test", srcs = ["gradient_checkpoint_test.py"], python_version = "PY3", tags = ["no_oss"], # TODO(b/249526796) deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) cuda_py_test( name = "central_storage_strategy_test", srcs = ["central_storage_strategy_test.py"], python_version = "PY3", tags = [ "multi_and_single_gpu", "no_oss", # TODO(keras-team): Fails in OSS due to reference to `tf.keras`. "no_windows_gpu", # TODO(b/130551176) ], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/utils:kpl_test_utils", ], ) tpu_py_test( name = "tpu_strategy_test", srcs = ["tpu_strategy_test.py"], disable_experimental = True, disable_mlir_bridge = False, python_version = "PY3", tags = ["no_oss"], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", ], ) tf_py_test( name = "multi_worker_tutorial_test", srcs = ["multi_worker_tutorial_test.py"], python_version = "PY3", shard_count = 6, tags = [ "no_oss", # TODO(keras-team): Fails in OSS cpu tests due to reference to `tf.keras`. "no_windows", # TODO(b/183102726) "noasan", # TODO(b/156029134) "nomac", # TODO(b/182567880) "nomsan", # TODO(b/156029134) "notsan", # TODO(b/156029134) ], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_portpicker_installed", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) distribute_py_test( name = "ctl_tutorial_test", srcs = ["ctl_tutorial_test.py"], main = "ctl_tutorial_test.py", shard_count = 5, tags = [ "multi_and_single_gpu", "no_oss", # TODO(keras-team): Fails in OSS due to references to `tf.keras` "nomultivm", # TODO(b/170502145) ], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/distribute:strategy_combinations", ], ) distribute_py_test( name = "parameter_server_keras_preprocessing_test", srcs = ["parameter_server_keras_preprocessing_test.py"], python_version = "PY3", shard_count = 6, # TODO(b/184290570): Investigate why only 1 shard times out. tags = [ "multi_and_single_gpu", "no_oss", # TODO(b/194935930): Flaky test "nomultivm", # TODO(b/170502145) "notap", # b/216629693 ], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_portpicker_installed", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/testing_infra:test_utils", ], ) distribute_py_test( name = "distributed_training_test", srcs = ["distributed_training_test.py"], python_version = "PY3", shard_count = 50, tags = [ "multi_gpu", "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) "nomultivm", # TODO(b/170502145) "notsan", # TODO(b/184542721) ], deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) distribute_py_test( name = "mwms_multi_process_runner_test", srcs = ["mwms_multi_process_runner_test.py"], python_version = "PY3", tags = [ "multi_gpu", "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) "nomultivm", # TODO(b/170502145) "notpu", "notsan", # TODO(b/184542721) ], deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) py_library( name = "preprocessing_test_utils", srcs = ["preprocessing_test_utils.py"], srcs_version = "PY3", deps = ["//:expect_tensorflow_installed"], ) distribute_py_test( name = "preprocessing_applied_in_dataset_creator_test", srcs = ["preprocessing_applied_in_dataset_creator_test.py"], python_version = "PY3", shard_count = 50, tags = [ "multi_gpu", "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) "nomultivm", # TODO(b/170502145) "notsan", # TODO(b/184542721) ], deps = [ ":preprocessing_test_utils", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) distribute_py_test( name = "preprocessing_applied_in_dataset_test", srcs = ["preprocessing_applied_in_dataset_test.py"], python_version = "PY3", shard_count = 50, tags = [ "multi_gpu", "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) "nomultivm", # TODO(b/170502145) "notsan", # TODO(b/184542721) ], deps = [ ":preprocessing_test_utils", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) distribute_py_test( name = "preprocessing_applied_in_model_test", srcs = ["preprocessing_applied_in_model_test.py"], python_version = "PY3", shard_count = 50, tags = [ "multi_gpu", "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) "nomultivm", # TODO(b/170502145) "notsan", # TODO(b/184542721) ], deps = [ ":preprocessing_test_utils", "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) distribute_py_test( name = "parameter_server_custom_training_loop_test", srcs = ["parameter_server_custom_training_loop_test.py"], python_version = "PY3", tags = [ "multi_gpu", "no_oss", # TODO(b/183640564): Re-enable "no_rocm", "noasan", # TODO(b/184542721) "nomsan", # TODO(b/184542721) "nomultivm", # TODO(b/170502145) "notsan", # TODO(b/184542721) ], deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", ], ) tf_py_test( name = "custom_object_saving_test", srcs = ["custom_object_saving_test.py"], python_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "parameter_server_training_metric_test", srcs = ["parameter_server_training_metric_test.py"], python_version = "PY3", tags = [ "nomac", # TODO(mihaimaruseac): b/127695564 "notsan", # TODO(b/156029134) ], deps = [ "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/api:tf_keras_api", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "fit_test", size = "medium", srcs = ["fit_test.py"], python_version = "PY3", shard_count = 28, tags = [ "no_oss", # TODO(keras-team): Fails in OSS due to reference to `tf.keras` ], deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/integration_test/models", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "saving_v3_test", size = "medium", srcs = ["saving_v3_test.py"], python_version = "PY3", shard_count = 12, tags = [ "no_oss", # TODO(keras-team): Fails in OSS. ], deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/integration_test/models", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "py_metric_test", size = "medium", srcs = ["py_metric_test.py"], python_version = "PY3", shard_count = 2, deps = [ "//:expect_tensorflow_installed", "//tf_keras/api:tf_keras_api", "//tf_keras/metrics", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "extension_type_test", size = "medium", srcs = ["extension_type_test.py"], python_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/api:tf_keras_api", "//tf_keras/engine", "//tf_keras/engine:input_layer", "//tf_keras/saving", ], )
tf-keras/tf_keras/integration_test/BUILD/0
{ "file_path": "tf-keras/tf_keras/integration_test/BUILD", "repo_id": "tf-keras", "token_count": 5787 }
162
"""Bert model. Adapted from https://keras.io/examples/nlp/masked_language_modeling/ """ import numpy as np import tensorflow as tf from tensorflow import keras from tf_keras.integration_test.models.input_spec import InputSpec SEQUENCE_LENGTH = 16 VOCAB_SIZE = 1000 EMBED_DIM = 64 NUM_HEAD = 2 FF_DIM = 32 NUM_LAYERS = 2 def get_data_spec(batch_size): return ( InputSpec((batch_size,), dtype="string"), InputSpec((batch_size, SEQUENCE_LENGTH, VOCAB_SIZE)), ) def get_input_preprocessor(): input_vectorizer = keras.layers.TextVectorization( max_tokens=VOCAB_SIZE, output_mode="int", output_sequence_length=SEQUENCE_LENGTH, ) text_ds = tf.data.Dataset.from_tensor_slices( [ "Lorem ipsum dolor sit amet", "consectetur adipiscing elit", "sed do eiusmod tempor incididunt ut", "labore et dolore magna aliqua.", "Ut enim ad minim veniam", "quis nostrud exercitation ullamco", "laboris nisi ut aliquip ex ea commodo consequat.", ] ) input_vectorizer.adapt(text_ds) return input_vectorizer def bert_module(query, key, value, i): attention_output = keras.layers.MultiHeadAttention( num_heads=NUM_HEAD, key_dim=EMBED_DIM // NUM_HEAD, )(query, key, value) attention_output = keras.layers.Dropout(0.1)(attention_output) attention_output = keras.layers.LayerNormalization(epsilon=1e-6)( query + attention_output ) ffn = keras.Sequential( [ keras.layers.Dense(FF_DIM, activation="relu"), keras.layers.Dense(EMBED_DIM), ], ) ffn_output = ffn(attention_output) ffn_output = keras.layers.Dropout(0.1)(ffn_output) sequence_output = keras.layers.LayerNormalization(epsilon=1e-6)( attention_output + ffn_output ) return sequence_output def get_pos_encoding_matrix(max_len, d_emb): pos_enc = np.array( [ [pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)] if pos != 0 else np.zeros(d_emb) for pos in range(max_len) ] ) pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) return pos_enc loss_fn = keras.losses.CategoricalCrossentropy() loss_tracker = keras.metrics.Mean(name="loss") class MaskedLanguageModel(keras.Model): def train_step(self, inputs): if len(inputs) == 3: features, labels, sample_weight = inputs else: features, labels = inputs sample_weight = None with tf.GradientTape() as tape: predictions = self(features, training=True) loss = loss_fn(labels, predictions, sample_weight=sample_weight) trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) self.optimizer.apply_gradients(zip(gradients, trainable_vars)) loss_tracker.update_state(loss, sample_weight=sample_weight) return {"loss": loss_tracker.result()} @property def metrics(self): return [loss_tracker] def get_model( build=False, compile=False, jit_compile=False, include_preprocessing=True ): if include_preprocessing: inputs = keras.layers.Input((), dtype="string") x = get_input_preprocessor()(inputs) else: inputs = keras.layers.Input((SEQUENCE_LENGTH,), dtype=tf.int64) x = inputs word_embeddings = keras.layers.Embedding(VOCAB_SIZE, EMBED_DIM)(x) position_embeddings = keras.layers.Embedding( input_dim=SEQUENCE_LENGTH, output_dim=EMBED_DIM, weights=[get_pos_encoding_matrix(SEQUENCE_LENGTH, EMBED_DIM)], trainable=False, )(tf.range(start=0, limit=SEQUENCE_LENGTH, delta=1)) embeddings = word_embeddings + position_embeddings encoder_output = embeddings for i in range(NUM_LAYERS): encoder_output = bert_module( encoder_output, encoder_output, encoder_output, i ) mlm_output = keras.layers.Dense( VOCAB_SIZE, name="mlm_cls", activation="softmax" )(encoder_output) model = MaskedLanguageModel(inputs, mlm_output) if compile: optimizer = keras.optimizers.Adam() model.compile(optimizer=optimizer, jit_compile=jit_compile) return model def get_custom_objects(): return { "MaskedLanguageModel": MaskedLanguageModel, }
tf-keras/tf_keras/integration_test/models/bert.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/models/bert.py", "repo_id": "tf-keras", "token_count": 2060 }
163
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for multi-worker training tutorial.""" import contextlib import os import re import unittest import uuid import zipfile import numpy as np import tensorflow.compat.v2 as tf from absl import logging from absl.testing import parameterized PER_WORKER_BATCH_SIZE = 64 NUM_WORKERS = 2 NUM_EPOCHS = 2 NUM_STEPS_PER_EPOCH = 50 def _is_chief(task_type, task_id): # Note: there are two possible `TF_CONFIG` configuration. # 1) In addition to `worker` tasks, a `chief` task type is use; # in this case, this function should be modified to # `return task_type == 'chief'`. # 2) Only `worker` task type is used; in this case, worker 0 is # regarded as the chief. The implementation demonstrated here # is for this case. return task_type == "worker" and task_id == 0 def _get_temp_dir(dirpath, task_id): base_dirpath = "workertemp_" + str(task_id) temp_dir = os.path.join(dirpath, base_dirpath) tf.io.gfile.makedirs(temp_dir) return temp_dir def write_filepath(filepath, task_type, task_id): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) if not _is_chief(task_type, task_id): dirpath = _get_temp_dir(dirpath, task_id) return os.path.join(dirpath, base) class MultiWorkerTutorialTest(parameterized.TestCase, tf.test.TestCase): """Test of multi-worker training flow in tutorials on tensorflow.org. Please see below test method docs for what actual tutorial is being covered. """ # TODO(rchao): Add a test to demonstrate gather with MWMS. @contextlib.contextmanager def skip_fetch_failure_exception(self): try: yield except zipfile.BadZipfile: # There can be a race when multiple processes are downloading the # data. Skip the test if that results in loading errors. self.skipTest( "Data loading error: Bad magic number for file header." ) except Exception as e: if "URL fetch failure" in str(e): self.skipTest( "URL fetch error not considered failure of the test." ) else: raise def mnist_dataset(self): path_to_use = f"mnist_{str(uuid.uuid4())}.npz" with self.skip_fetch_failure_exception(): (x_train, y_train), _ = tf.keras.datasets.mnist.load_data( path=path_to_use ) # The `x` arrays are in uint8 and have values in the range [0, 255]. # We need to convert them to float32 with values in the range [0, 1] x_train = x_train / np.float32(255) y_train = y_train.astype(np.int64) train_dataset = tf.data.Dataset.from_tensor_slices( (x_train, y_train) ).shuffle(60000) return train_dataset def dataset_fn(self, global_batch_size, input_context): batch_size = input_context.get_per_replica_batch_size(global_batch_size) dataset = self.mnist_dataset() dataset = dataset.shard( input_context.num_input_pipelines, input_context.input_pipeline_id ) dataset = dataset.batch(batch_size) return dataset def build_cnn_model(self): return tf.keras.Sequential( [ tf.keras.layers.Input(shape=(28, 28)), tf.keras.layers.Reshape(target_shape=(28, 28, 1)), tf.keras.layers.Conv2D(32, 3, activation="relu"), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(10), ] ) def build_and_compile_cnn_model(self): model = self.build_cnn_model() model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True ), optimizer=tf.keras.optimizers.SGD(learning_rate=0.001), metrics=["accuracy"], ) return model @tf.__internal__.test.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], tf_api_version=2 ) ) def testSingleWorkerModelFit(self): single_worker_dataset = self.mnist_dataset().batch( PER_WORKER_BATCH_SIZE ) single_worker_model = self.build_and_compile_cnn_model() single_worker_model.fit(single_worker_dataset, epochs=NUM_EPOCHS) @tf.__internal__.test.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], tf_api_version=2 ) ) def testMwmsWithModelFit(self, mode): """Test multi-worker training flow demoed in go/multi-worker-with-keras. This test should be kept in sync with the code samples in go/multi-worker-with-keras. Args: mode: Runtime mode. """ def fn(model_path, checkpoint_dir): global_batch_size = PER_WORKER_BATCH_SIZE * NUM_WORKERS strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() with strategy.scope(): multi_worker_model = self.build_and_compile_cnn_model() callbacks = [ tf.keras.callbacks.ModelCheckpoint( filepath=os.path.join(self.get_temp_dir(), "checkpoint") ) ] multi_worker_dataset = strategy.distribute_datasets_from_function( lambda input_context: self.dataset_fn( global_batch_size, input_context ) ) multi_worker_model.fit( multi_worker_dataset, epochs=NUM_EPOCHS, steps_per_epoch=50, callbacks=callbacks, ) task_type, task_id = ( strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id, ) write_model_path = write_filepath(model_path, task_type, task_id) multi_worker_model.save(write_model_path) if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(os.path.dirname(write_model_path)) # Make sure chief finishes saving before non-chief's assertions. tf.__internal__.distribute.multi_process_runner.get_barrier().wait() if not tf.io.gfile.exists(model_path): raise RuntimeError() if tf.io.gfile.exists(write_model_path) != _is_chief( task_type, task_id ): raise RuntimeError() with strategy.scope(): loaded_model = tf.keras.models.load_model(model_path) loaded_model.fit(multi_worker_dataset, epochs=1, steps_per_epoch=1) checkpoint = tf.train.Checkpoint(model=multi_worker_model) write_checkpoint_dir = write_filepath( checkpoint_dir, task_type, task_id ) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=write_checkpoint_dir, max_to_keep=1 ) checkpoint_manager.save() if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(write_checkpoint_dir) # Make sure chief finishes saving before non-chief's assertions. tf.__internal__.distribute.multi_process_runner.get_barrier().wait() if not tf.io.gfile.exists(checkpoint_dir): raise RuntimeError() if tf.io.gfile.exists(write_checkpoint_dir) != _is_chief( task_type, task_id ): raise RuntimeError() latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) checkpoint.restore(latest_checkpoint) multi_worker_model.fit( multi_worker_dataset, epochs=1, steps_per_epoch=1 ) logging.info("testMwmsWithModelFit successfully ends") model_path = os.path.join(self.get_temp_dir(), "model.tf") checkpoint_dir = os.path.join(self.get_temp_dir(), "ckpt") try: mpr_result = tf.__internal__.distribute.multi_process_runner.run( fn, tf.__internal__.distribute.multi_process_runner.create_cluster_spec( # noqa: E501 num_workers=NUM_WORKERS ), args=(model_path, checkpoint_dir), return_output=True, ) except tf.errors.UnavailableError: self.skipTest("Skipping rare disconnection among the workers.") self.assertTrue( any( [ "testMwmsWithModelFit successfully ends" in msg for msg in mpr_result.stdout ] ) ) def extract_accuracy(worker_id, input_string): match = re.match( r"\[worker\-{}\].*accuracy: (\d+\.\d+).*".format(worker_id), input_string, ) return None if match is None else float(match.group(1)) for worker_id in range(NUM_WORKERS): accu_result = tf.nest.map_structure( lambda x: extract_accuracy(worker_id, x), mpr_result.stdout, ) self.assertTrue( any(accu_result), "Every worker is supposed to have accuracy result.", ) @tf.__internal__.test.combinations.generate( tf.__internal__.test.combinations.combine( mode=["eager"], tf_api_version=2 ) ) def testMwmsWithCtl(self, mode): """Test multi-worker CTL training flow demo'ed in a to-be-added tutorial.""" def proc_func(checkpoint_dir): global_batch_size = PER_WORKER_BATCH_SIZE * NUM_WORKERS strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() try: with strategy.scope(): multi_worker_model = self.build_cnn_model() multi_worker_dataset = ( strategy.distribute_datasets_from_function( lambda input_context: self.dataset_fn( global_batch_size, input_context, ) ) ) optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001) train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name="train_accuracy" ) @tf.function def train_step(iterator): """Training step function.""" def step_fn(inputs): """Per-Replica step function.""" x, y = inputs with tf.GradientTape() as tape: predictions = multi_worker_model(x, training=True) per_batch_loss = ( tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE, )(y, predictions) ) loss = tf.nn.compute_average_loss( per_batch_loss, global_batch_size=global_batch_size, ) grads = tape.gradient( loss, multi_worker_model.trainable_variables ) optimizer.apply_gradients( zip(grads, multi_worker_model.trainable_variables) ) train_accuracy.update_state(y, predictions) return loss per_replica_losses = strategy.run( step_fn, args=(next(iterator),) ) return strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None, ) epoch = tf.Variable( initial_value=tf.constant(0, dtype=tf.dtypes.int64), name="epoch", ) step_in_epoch = tf.Variable( initial_value=tf.constant(0, dtype=tf.dtypes.int64), name="step_in_epoch", ) task_type, task_id = ( strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id, ) checkpoint = tf.train.Checkpoint( model=multi_worker_model, epoch=epoch, step_in_epoch=step_in_epoch, ) write_checkpoint_dir = write_filepath( checkpoint_dir, task_type, task_id ) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=write_checkpoint_dir, max_to_keep=1 ) latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) if latest_checkpoint: checkpoint.restore(latest_checkpoint) while epoch.numpy() < NUM_EPOCHS: iterator = iter(multi_worker_dataset) total_loss = 0.0 num_batches = 0 while step_in_epoch.numpy() < NUM_STEPS_PER_EPOCH: total_loss += train_step(iterator) num_batches += 1 step_in_epoch.assign_add(1) train_loss = total_loss / num_batches logging.info( "Epoch: %d, accuracy: %f, train_loss: %f.", epoch.numpy(), train_accuracy.result(), train_loss, ) train_accuracy.reset_state() checkpoint_manager.save() if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(write_checkpoint_dir) epoch.assign_add(1) step_in_epoch.assign(0) except tf.errors.UnavailableError as e: logging.info("UnavailableError occurred: %r", e) raise unittest.SkipTest("Skipping test due to UnavailableError") logging.info("testMwmsWithCtl successfully ends") checkpoint_dir = os.path.join(self.get_temp_dir(), "ckpt") mpr_result = tf.__internal__.distribute.multi_process_runner.run( proc_func, tf.__internal__.distribute.multi_process_runner.create_cluster_spec( num_workers=NUM_WORKERS ), return_output=True, args=(checkpoint_dir,), ) self.assertTrue( any( [ "testMwmsWithCtl successfully ends" in msg for msg in mpr_result.stdout ] ) ) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/integration_test/multi_worker_tutorial_test.py/0
{ "file_path": "tf-keras/tf_keras/integration_test/multi_worker_tutorial_test.py", "repo_id": "tf-keras", "token_count": 8415 }
164
build_file: "tf-keras/tf_keras/kokoro/github/ubuntu/cpu/build.sh" action { define_artifacts { regex: "**/sponge_log.log" regex: "**/sponge_log.xml" } }
tf-keras/tf_keras/kokoro/github/ubuntu/cpu/continuous.cfg/0
{ "file_path": "tf-keras/tf_keras/kokoro/github/ubuntu/cpu/continuous.cfg", "repo_id": "tf-keras", "token_count": 76 }
165
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ReLU layer.""" import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.run_all_keras_modes class ReLUTest(test_combinations.TestCase): def test_relu(self): test_utils.layer_test( keras.layers.ReLU, kwargs={"max_value": 10}, input_shape=(2, 3, 4), supports_masking=True, ) x = keras.backend.ones((3, 4)) if not tf.executing_eagerly(): # Test that we use `leaky_relu` when appropriate in graph mode. self.assertIn( "LeakyRelu", keras.layers.ReLU(negative_slope=0.2)(x).name ) # Test that we use `relu` when appropriate in graph mode. self.assertIn("Relu", keras.layers.ReLU()(x).name) # Test that we use `relu6` when appropriate in graph mode. self.assertIn("Relu6", keras.layers.ReLU(max_value=6)(x).name) def test_relu_with_invalid_max_value(self): with self.assertRaisesRegex( ValueError, "max_value of a ReLU layer cannot be a negative " "value. Received: -10", ): test_utils.layer_test( keras.layers.ReLU, kwargs={"max_value": -10}, input_shape=(2, 3, 4), supports_masking=True, ) def test_relu_with_invalid_negative_slope(self): with self.assertRaisesRegex( ValueError, "negative_slope of a ReLU layer cannot be a negative " "value. Received: None", ): test_utils.layer_test( keras.layers.ReLU, kwargs={"negative_slope": None}, input_shape=(2, 3, 4), supports_masking=True, ) with self.assertRaisesRegex( ValueError, "negative_slope of a ReLU layer cannot be a negative " "value. Received: -10", ): test_utils.layer_test( keras.layers.ReLU, kwargs={"negative_slope": -10}, input_shape=(2, 3, 4), supports_masking=True, ) def test_relu_with_invalid_threshold(self): with self.assertRaisesRegex( ValueError, "threshold of a ReLU layer cannot be a negative " "value. Received: None", ): test_utils.layer_test( keras.layers.ReLU, kwargs={"threshold": None}, input_shape=(2, 3, 4), supports_masking=True, ) with self.assertRaisesRegex( ValueError, "threshold of a ReLU layer cannot be a negative " "value. Received: -10", ): test_utils.layer_test( keras.layers.ReLU, kwargs={"threshold": -10}, input_shape=(2, 3, 4), supports_masking=True, ) @test_combinations.run_with_all_model_types def test_relu_layer_as_activation(self): layer = keras.layers.Dense(1, activation=keras.layers.ReLU()) model = test_utils.get_model_from_layers([layer], input_shape=(10,)) model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly()) model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/activation/relu_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/activation/relu_test.py", "repo_id": "tf-keras", "token_count": 2023 }
166
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras convolution layers.""" # Convolution layer aliases. # Convolution layers. from tf_keras.layers.convolutional.conv1d import Conv1D from tf_keras.layers.convolutional.conv1d import Convolution1D from tf_keras.layers.convolutional.conv1d_transpose import Conv1DTranspose from tf_keras.layers.convolutional.conv1d_transpose import ( Convolution1DTranspose, ) from tf_keras.layers.convolutional.conv2d import Conv2D from tf_keras.layers.convolutional.conv2d import Convolution2D from tf_keras.layers.convolutional.conv2d_transpose import Conv2DTranspose from tf_keras.layers.convolutional.conv2d_transpose import ( Convolution2DTranspose, ) from tf_keras.layers.convolutional.conv3d import Conv3D from tf_keras.layers.convolutional.conv3d import Convolution3D from tf_keras.layers.convolutional.conv3d_transpose import Conv3DTranspose from tf_keras.layers.convolutional.conv3d_transpose import ( Convolution3DTranspose, ) from tf_keras.layers.convolutional.depthwise_conv1d import DepthwiseConv1D from tf_keras.layers.convolutional.depthwise_conv2d import DepthwiseConv2D from tf_keras.layers.convolutional.separable_conv1d import SeparableConv1D from tf_keras.layers.convolutional.separable_conv1d import ( SeparableConvolution1D, ) from tf_keras.layers.convolutional.separable_conv2d import SeparableConv2D from tf_keras.layers.convolutional.separable_conv2d import ( SeparableConvolution2D, ) # Pooling layers imported for backwards namespace compatibility. from tf_keras.layers.pooling.average_pooling1d import AveragePooling1D from tf_keras.layers.pooling.average_pooling2d import AveragePooling2D from tf_keras.layers.pooling.average_pooling3d import AveragePooling3D from tf_keras.layers.pooling.max_pooling1d import MaxPooling1D from tf_keras.layers.pooling.max_pooling2d import MaxPooling2D from tf_keras.layers.pooling.max_pooling3d import MaxPooling3D # Reshaping layers imported for backwards namespace compatibility from tf_keras.layers.reshaping.cropping1d import Cropping1D from tf_keras.layers.reshaping.cropping2d import Cropping2D from tf_keras.layers.reshaping.cropping3d import Cropping3D from tf_keras.layers.reshaping.up_sampling1d import UpSampling1D from tf_keras.layers.reshaping.up_sampling2d import UpSampling2D from tf_keras.layers.reshaping.up_sampling3d import UpSampling3D from tf_keras.layers.reshaping.zero_padding1d import ZeroPadding1D from tf_keras.layers.reshaping.zero_padding2d import ZeroPadding2D from tf_keras.layers.reshaping.zero_padding3d import ZeroPadding3D
tf-keras/tf_keras/layers/convolutional/__init__.py/0
{ "file_path": "tf-keras/tf_keras/layers/convolutional/__init__.py", "repo_id": "tf-keras", "token_count": 1068 }
167
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras depthwise separable 2D convolution.""" import tensorflow.compat.v2 as tf from tf_keras import activations from tf_keras import constraints from tf_keras import initializers from tf_keras import regularizers from tf_keras.layers.convolutional.base_separable_conv import SeparableConv from tf_keras.utils import conv_utils # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export( "keras.layers.SeparableConv2D", "keras.layers.SeparableConvolution2D" ) class SeparableConv2D(SeparableConv): """Depthwise separable 2D convolution. Separable convolutions consist of first performing a depthwise spatial convolution (which acts on each input channel separately) followed by a pointwise convolution which mixes the resulting output channels. The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. Intuitively, separable convolutions can be understood as a way to factorize a convolution kernel into two smaller kernels, or as an extreme version of an Inception block. Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Current implementation only supports equal length strides in the row and column dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding with zeros evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. When unspecified, uses `image_data_format` value found in your Keras config file at `~/.keras/keras.json` (if exists) else 'channels_last'. Defaults to 'channels_last'. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. activation: Activation function to use. If you don't specify anything, no activation is applied (see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: An initializer for the depthwise convolution kernel (see `keras.initializers`). If None, then the default initializer ('glorot_uniform') will be used. pointwise_initializer: An initializer for the pointwise convolution kernel (see `keras.initializers`). If None, then the default initializer ('glorot_uniform') will be used. bias_initializer: An initializer for the bias vector. If None, the default initializer ('zeros') will be used (see `keras.initializers`). depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see `keras.regularizers`). pointwise_regularizer: Regularizer function applied to the pointwise kernel matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its "activation") (see `keras.regularizers`). depthwise_constraint: Constraint function applied to the depthwise kernel matrix (see `keras.constraints`). pointwise_constraint: Constraint function applied to the pointwise kernel matrix (see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`). Input shape: 4D tensor with shape: `(batch_size, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch_size, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. Returns: A tensor of rank 4 representing `activation(separableconv2d(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is "causal". """ def __init__( self, filters, kernel_size, strides=(1, 1), padding="valid", data_format=None, dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer="glorot_uniform", pointwise_initializer="glorot_uniform", bias_initializer="zeros", depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, **kwargs ): super().__init__( rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activations.get(activation), use_bias=use_bias, depthwise_initializer=initializers.get(depthwise_initializer), pointwise_initializer=initializers.get(pointwise_initializer), bias_initializer=initializers.get(bias_initializer), depthwise_regularizer=regularizers.get(depthwise_regularizer), pointwise_regularizer=regularizers.get(pointwise_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), depthwise_constraint=constraints.get(depthwise_constraint), pointwise_constraint=constraints.get(pointwise_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs ) def call(self, inputs): # Apply the actual ops. if self.data_format == "channels_last": strides = (1,) + self.strides + (1,) else: strides = (1, 1) + self.strides outputs = tf.nn.separable_conv2d( inputs, self.depthwise_kernel, self.pointwise_kernel, strides=strides, padding=self.padding.upper(), dilations=self.dilation_rate, data_format=conv_utils.convert_data_format( self.data_format, ndim=4 ), ) if self.use_bias: outputs = tf.nn.bias_add( outputs, self.bias, data_format=conv_utils.convert_data_format( self.data_format, ndim=4 ), ) if self.activation is not None: return self.activation(outputs) return outputs # Alias SeparableConvolution2D = SeparableConv2D
tf-keras/tf_keras/layers/convolutional/separable_conv2d.py/0
{ "file_path": "tf-keras/tf_keras/layers/convolutional/separable_conv2d.py", "repo_id": "tf-keras", "token_count": 3450 }
168
# Description: # Contains the TF-Keras merging layers. # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//tf_keras:friends", "//third_party/py/tensorflow_gnn:__subpackages__", "//third_party/tensorflow/python/distribute:__pkg__", "//third_party/tensorflow/python/feature_column:__pkg__", "//third_party/tensorflow/python/trackable:__pkg__", "//third_party/tensorflow/tools/pip_package:__pkg__", "//third_party/tensorflow_models/official/projects/residual_mobilenet/modeling/backbones:__pkg__", ], licenses = ["notice"], ) py_library( name = "merging", srcs = ["__init__.py"], srcs_version = "PY3", deps = [ ":add", ":average", ":concatenate", ":dot", ":maximum", ":minimum", ":multiply", ":subtract", ], ) py_library( name = "base_merge", srcs = ["base_merge.py"], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/engine:base_layer", "//tf_keras/utils:tf_utils", ], ) py_library( name = "add", srcs = ["add.py"], srcs_version = "PY3", deps = [ ":base_merge", ], ) py_library( name = "subtract", srcs = ["subtract.py"], srcs_version = "PY3", deps = [ ":base_merge", "//tf_keras/utils:tf_utils", ], ) py_library( name = "multiply", srcs = ["multiply.py"], srcs_version = "PY3", deps = [ ":base_merge", ], ) py_library( name = "average", srcs = ["average.py"], srcs_version = "PY3", deps = [ ":base_merge", ], ) py_library( name = "maximum", srcs = ["maximum.py"], srcs_version = "PY3", deps = [ ":base_merge", "//:expect_tensorflow_installed", ], ) py_library( name = "minimum", srcs = ["minimum.py"], srcs_version = "PY3", deps = [ ":base_merge", "//:expect_tensorflow_installed", ], ) py_library( name = "concatenate", srcs = ["concatenate.py"], srcs_version = "PY3", deps = [ ":base_merge", "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/utils:tf_utils", ], ) py_library( name = "dot", srcs = ["dot.py"], srcs_version = "PY3", deps = [ ":base_merge", "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/engine:base_layer_utils", "//tf_keras/utils:tf_utils", ], ) tf_py_test( name = "merging_test", size = "medium", srcs = ["merging_test.py"], python_version = "PY3", shard_count = 4, deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", ], )
tf-keras/tf_keras/layers/merging/BUILD/0
{ "file_path": "tf-keras/tf_keras/layers/merging/BUILD", "repo_id": "tf-keras", "token_count": 1595 }
169
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for normalization layers under DTensor context.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras.dtensor import test_util from tf_keras.dtensor import utils from tf_keras.layers.normalization import batch_normalization from tf_keras.testing_infra import test_utils # isort: off # Import the MirroredStrategy that is backed by DTensor # It is not a public API yet, so we do a private symbol import for now. from tensorflow.python.distribute.experimental import ( mirrored_strategy as dtensor_mirrored_strategy, ) @test_utils.run_v2_only class BatchNormalizationDTensorTest(test_util.DTensorBaseTest): def setUp(self): super().setUp() global_ids = test_util.create_device_ids_array((2,)) local_device_ids = np.ravel(global_ids).tolist() mesh_dict = { "CPU": tf.experimental.dtensor.Mesh( ["batch"], global_ids, local_device_ids, test_util.create_device_list((2,), "CPU"), ) } self.mesh = self.configTestMesh(mesh_dict) def test_strategy_backed_by_dtensor(self): strategy = dtensor_mirrored_strategy.MirroredStrategy(mesh=self.mesh) with strategy.scope(): self.assertTrue(utils.running_with_dtensor_strategy()) self.assertFalse(utils.running_with_dtensor_strategy()) normal_mirrored_strategy = tf.distribute.MirroredStrategy( ["CPU:0", "CPU:1"] ) self.assertFalse(utils.running_with_dtensor_strategy()) with normal_mirrored_strategy.scope(): self.assertFalse(utils.running_with_dtensor_strategy()) @parameterized.product( training=[True, False], synchronized=[True, False], renorm=[True, False], use_mask=[True, False], ) def test_batch_normalization_with_dtensor_strategy( self, training, synchronized, renorm, use_mask ): num_replica = 2 local_batch_size = 4 global_batch_size = num_replica * local_batch_size feature_shape = [3, 5] global_inputs = tf.random.uniform( shape=[global_batch_size, *feature_shape], dtype=tf.float32 ) replica_inputs = tf.reshape( global_inputs, [num_replica, local_batch_size, *feature_shape] ) if use_mask: mask = tf.concat( [ tf.ones(shape=[global_batch_size, 2]), tf.zeros(shape=[global_batch_size, 1]), ], axis=-1, ) mask = tf.cast(mask, tf.bool) mask = tf.reshape(mask, [num_replica, local_batch_size, 3]) def value_fn(value_context): return { "inputs": replica_inputs[ value_context.replica_id_in_sync_group ], "mask": mask[value_context.replica_id_in_sync_group], } else: def value_fn(value_context): return replica_inputs[value_context.replica_id_in_sync_group] normal_strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"]) dtensor_strategy = dtensor_mirrored_strategy.MirroredStrategy( mesh=self.mesh ) init_kwargs = {"synchronized": synchronized, "renorm": renorm} bn_layer_0 = batch_normalization.BatchNormalization(**init_kwargs) bn_layer_1 = batch_normalization.BatchNormalization(**init_kwargs) run_kwargs = {"training": training} normal_strategy_result = self._run_bn_training_with_strategy( normal_strategy, value_fn, bn_layer_0, run_kwargs ) if training and not synchronized and renorm: # This is an unsupported case at the moment. with self.assertRaisesRegexp(NotImplementedError, "not supported"): self._run_bn_training_with_strategy( dtensor_strategy, value_fn, bn_layer_1, run_kwargs ) return else: dtensor_strategy_result = self._run_bn_training_with_strategy( dtensor_strategy, value_fn, bn_layer_1, run_kwargs ) self.assertAllClose( normal_strategy_result.values, dtensor_strategy_result.values ) self.assertAllClose(bn_layer_0.moving_mean, bn_layer_1.moving_mean) self.assertAllClose( bn_layer_0.moving_variance, bn_layer_1.moving_variance ) def _run_bn_training_with_strategy( self, strategy, value_fn, bn_layer, run_kwargs ): @tf.function def run_fn(inputs): if isinstance(inputs, dict): return bn_layer(**inputs, **run_kwargs) return bn_layer(inputs, **run_kwargs) distributed_inputs = ( strategy.experimental_distribute_values_from_function(value_fn) ) return strategy.run(run_fn, args=(distributed_inputs,)) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/normalization/batch_normalization_dtensor_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/normalization/batch_normalization_dtensor_test.py", "repo_id": "tf-keras", "token_count": 2603 }
170
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for average pooling layers.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class AveragePoolingTest(tf.test.TestCase, parameterized.TestCase): def test_average_pooling_1d(self): for padding in ["valid", "same"]: for stride in [1, 2]: test_utils.layer_test( keras.layers.AveragePooling1D, kwargs={"strides": stride, "padding": padding}, input_shape=(3, 5, 4), ) test_utils.layer_test( keras.layers.AveragePooling1D, kwargs={"data_format": "channels_first"}, input_shape=(3, 2, 6), ) def test_average_pooling_2d(self): test_utils.layer_test( keras.layers.AveragePooling2D, kwargs={"strides": (2, 2), "padding": "same", "pool_size": (2, 2)}, input_shape=(3, 5, 6, 4), ) test_utils.layer_test( keras.layers.AveragePooling2D, kwargs={"strides": (2, 2), "padding": "valid", "pool_size": (3, 3)}, input_shape=(3, 5, 6, 4), ) # This part of the test can only run on GPU but doesn't appear # to be properly assigned to a GPU when running in eager mode. if not tf.executing_eagerly(): # Only runs on GPU with CUDA, channels_first is not supported on # CPU. # TODO(b/62340061): Support channels_first on CPU. if tf.test.is_gpu_available(cuda_only=True): test_utils.layer_test( keras.layers.AveragePooling2D, kwargs={ "strides": (1, 1), "padding": "valid", "pool_size": (2, 2), "data_format": "channels_first", }, input_shape=(3, 4, 5, 6), ) def test_average_pooling_3d(self): pool_size = (3, 3, 3) test_utils.layer_test( keras.layers.AveragePooling3D, kwargs={"strides": 2, "padding": "valid", "pool_size": pool_size}, input_shape=(3, 11, 12, 10, 4), ) test_utils.layer_test( keras.layers.AveragePooling3D, kwargs={ "strides": 3, "padding": "valid", "data_format": "channels_first", "pool_size": pool_size, }, input_shape=(3, 4, 11, 12, 10), ) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/pooling/average_pooling_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/pooling/average_pooling_test.py", "repo_id": "tf-keras", "token_count": 1641 }
171
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Max pooling 2D layer.""" import tensorflow.compat.v2 as tf from tf_keras.layers.pooling.base_pooling2d import Pooling2D # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.layers.MaxPooling2D", "keras.layers.MaxPool2D") class MaxPooling2D(Pooling2D): """Max pooling operation for 2D spatial data. Downsamples the input along its spatial dimensions (height and width) by taking the maximum value over an input window (of size defined by `pool_size`) for each channel of the input. The window is shifted by `strides` along each dimension. The resulting output, when using the `"valid"` padding option, has a spatial shape (number of rows or columns) of: `output_shape = math.floor((input_shape - pool_size) / strides) + 1` (when `input_shape >= pool_size`) The resulting output shape when using the `"same"` padding option is: `output_shape = math.floor((input_shape - 1) / strides) + 1` For example, for `strides=(1, 1)` and `padding="valid"`: >>> x = tf.constant([[1., 2., 3.], ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = tf.reshape(x, [1, 3, 3, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='valid') >>> max_pool_2d(x) <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy= array([[[[5.], [6.]], [[8.], [9.]]]], dtype=float32)> For example, for `strides=(2, 2)` and `padding="valid"`: >>> x = tf.constant([[1., 2., 3., 4.], ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> x = tf.reshape(x, [1, 3, 4, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(2, 2), padding='valid') >>> max_pool_2d(x) <tf.Tensor: shape=(1, 1, 2, 1), dtype=float32, numpy= array([[[[6.], [8.]]]], dtype=float32)> Usage Example: >>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]], ... [[2.], [2.], [3.], [2.]], ... [[4.], [1.], [1.], [1.]], ... [[2.], [2.], [1.], [4.]]]]) >>> output = tf.constant([[[[1], [0]], ... [[0], [1]]]]) >>> model = tf.keras.models.Sequential() >>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... input_shape=(4, 4, 1))) >>> model.compile('adam', 'mean_squared_error') >>> model.predict(input_image, steps=1) array([[[[2.], [4.]], [[4.], [4.]]]], dtype=float32) For example, for stride=(1, 1) and padding="same": >>> x = tf.constant([[1., 2., 3.], ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = tf.reshape(x, [1, 3, 3, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='same') >>> max_pool_2d(x) <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy= array([[[[5.], [6.], [6.]], [[8.], [9.], [9.]], [[8.], [9.], [9.]]]], dtype=float32)> Args: pool_size: integer or tuple of 2 integers, window size over which to take the maximum. `(2, 2)` will take the max value over a 2x2 pooling window. If only one integer is specified, the same window length will be used for both dimensions. strides: Integer, tuple of 2 integers, or None. Strides values. Specifies how far the pooling window moves for each pooling step. If None, it will default to `pool_size`. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. When unspecified, uses `image_data_format` value found in your TF-Keras config file at `~/.keras/keras.json` (if exists) else 'channels_last'. Defaults to 'channels_last'. Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`. Returns: A tensor of rank 4 representing the maximum pooled values. See above for output shape. """ def __init__( self, pool_size=(2, 2), strides=None, padding="valid", data_format=None, **kwargs ): super().__init__( tf.compat.v1.nn.max_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs ) # Alias MaxPool2D = MaxPooling2D
tf-keras/tf_keras/layers/pooling/max_pooling2d.py/0
{ "file_path": "tf-keras/tf_keras/layers/pooling/max_pooling2d.py", "repo_id": "tf-keras", "token_count": 2844 }
172
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Distribution tests for keras.layers.preprocessing.discretization.""" import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.distribute import strategy_combinations from tf_keras.layers.preprocessing import discretization from tf_keras.layers.preprocessing import preprocessing_test_utils from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils @test_utils.run_v2_only @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine( strategy=strategy_combinations.all_strategies + strategy_combinations.multi_worker_mirrored_strategies + strategy_combinations.parameter_server_strategies_single_worker + strategy_combinations.parameter_server_strategies_multi_worker, mode=["eager"], ) ) class DiscretizationDistributionTest( test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest ): def test_strategy(self, strategy): input_array = np.array([[-1.5, 1.0, 3.4, 0.5], [0.0, 3.0, 1.3, 0.0]]) expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]] expected_output_shape = [None, 4] tf.config.set_soft_device_placement(True) with strategy.scope(): input_data = keras.Input(shape=(4,)) layer = discretization.Discretization( bin_boundaries=[0.0, 1.0, 2.0] ) bucket_data = layer(input_data) self.assertAllEqual( expected_output_shape, bucket_data.shape.as_list() ) model = keras.Model(inputs=input_data, outputs=bucket_data) output_dataset = model.predict(input_array) self.assertAllEqual(expected_output, output_dataset) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/layers/preprocessing/discretization_distribution_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/preprocessing/discretization_distribution_test.py", "repo_id": "tf-keras", "token_count": 939 }
173
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Distribution tests for keras.layers.preprocessing.normalization.""" import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras.distribute import strategy_combinations from tf_keras.layers.preprocessing import normalization from tf_keras.layers.preprocessing import preprocessing_test_utils from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils def _get_layer_computation_test_cases(): test_cases = ( { "adapt_data": np.array( [[1.0], [2.0], [3.0], [4.0], [5.0]], dtype=np.float32 ), "axis": -1, "test_data": np.array([[1.0], [2.0], [3.0]], np.float32), "expected": np.array([[-1.414214], [-0.707107], [0]], np.float32), "testcase_name": "2d_single_element", }, { "adapt_data": np.array( [[1.0], [2.0], [3.0], [4.0], [5.0]], dtype=np.float32 ), "axis": None, "test_data": np.array([[1.0], [2.0], [3.0]], np.float32), "expected": np.array([[-1.414214], [-0.707107], [0]], np.float32), "testcase_name": "2d_single_element_none_axis", }, { "adapt_data": np.array( [[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32 ), "axis": None, "test_data": np.array([[1.0], [2.0], [3.0]], np.float32), "expected": np.array([[-1.414214], [-0.707107], [0]], np.float32), "testcase_name": "2d_single_element_none_axis_flat_data", }, { "adapt_data": np.array( [ [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], [[3.0, 4.0, 5.0], [4.0, 5.0, 6.0]], ], np.float32, ), "axis": 1, "test_data": np.array( [ [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], [[3.0, 4.0, 5.0], [4.0, 5.0, 6.0]], ], np.float32, ), "expected": np.array( [ [[-1.549193, -0.774597, 0.0], [-1.549193, -0.774597, 0.0]], [[0.0, 0.774597, 1.549193], [0.0, 0.774597, 1.549193]], ], np.float32, ), "testcase_name": "3d_internal_axis", }, { "adapt_data": np.array( [ [[1.0, 0.0, 3.0], [2.0, 3.0, 4.0]], [[3.0, -1.0, 5.0], [4.0, 5.0, 8.0]], ], np.float32, ), "axis": (1, 2), "test_data": np.array( [ [[3.0, 1.0, -1.0], [2.0, 5.0, 4.0]], [[3.0, 0.0, 5.0], [2.0, 5.0, 8.0]], ], np.float32, ), "expected": np.array( [ [[1.0, 3.0, -5.0], [-1.0, 1.0, -1.0]], [[1.0, 1.0, 1.0], [-1.0, 1.0, 1.0]], ], np.float32, ), "testcase_name": "3d_multiple_axis", }, ) crossed_test_cases = [] # Cross above test cases with use_dataset in (True, False) for use_dataset in (True, False): for case in test_cases: case = case.copy() if use_dataset: case["testcase_name"] = case["testcase_name"] + "_with_dataset" case["use_dataset"] = use_dataset crossed_test_cases.append(case) return crossed_test_cases @test_utils.run_v2_only @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.times( tf.__internal__.test.combinations.combine( strategy=strategy_combinations.all_strategies + strategy_combinations.multi_worker_mirrored_strategies + strategy_combinations.parameter_server_strategies_single_worker + strategy_combinations.parameter_server_strategies_multi_worker, mode=["eager"], ), _get_layer_computation_test_cases(), ) ) class NormalizationTest( test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest ): def test_layer_computation( self, strategy, adapt_data, axis, test_data, use_dataset, expected ): input_shape = tuple([None for _ in range(test_data.ndim - 1)]) if use_dataset: # TF-Keras APIs expect batched datasets adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch(2) test_data = tf.data.Dataset.from_tensor_slices(test_data).batch(2) with strategy.scope(): input_data = keras.Input(shape=input_shape) layer = normalization.Normalization(axis=axis) layer.adapt(adapt_data) output = layer(input_data) model = keras.Model(input_data, output) output_data = model.predict(test_data) self.assertAllClose(expected, output_data) if __name__ == "__main__": tf.__internal__.distribute.multi_process_runner.test_main()
tf-keras/tf_keras/layers/preprocessing/normalization_distribution_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/preprocessing/normalization_distribution_test.py", "repo_id": "tf-keras", "token_count": 3115 }
174
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras cropping layer for 2D input.""" import tensorflow.compat.v2 as tf from tf_keras.engine.base_layer import Layer from tf_keras.engine.input_spec import InputSpec from tf_keras.utils import conv_utils # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.layers.Cropping2D") class Cropping2D(Layer): """Cropping layer for 2D input (e.g. picture). It crops along spatial dimensions, i.e. height and width. Examples: >>> input_shape = (2, 28, 28, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x) >>> print(y.shape) (2, 24, 20, 3) Args: cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric cropping values for height and width: `(symmetric_height_crop, symmetric_width_crop)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_crop, bottom_crop), (left_crop, right_crop))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. When unspecified, uses `image_data_format` value found in your TF-Keras config file at `~/.keras/keras.json` (if exists) else 'channels_last'. Defaults to 'channels_last'. Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, cropped_rows, cropped_cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, cropped_rows, cropped_cols)` """ def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs): super().__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(cropping, int): self.cropping = ((cropping, cropping), (cropping, cropping)) elif hasattr(cropping, "__len__"): if len(cropping) != 2: raise ValueError( "`cropping` should have two elements. " f"Received: {cropping}." ) height_cropping = conv_utils.normalize_tuple( cropping[0], 2, "1st entry of cropping", allow_zero=True ) width_cropping = conv_utils.normalize_tuple( cropping[1], 2, "2nd entry of cropping", allow_zero=True ) self.cropping = (height_cropping, width_cropping) else: raise ValueError( "`cropping` should be either an int, " "a tuple of 2 ints " "(symmetric_height_crop, symmetric_width_crop), " "or a tuple of 2 tuples of 2 ints " "((top_crop, bottom_crop), (left_crop, right_crop)). " f"Received: {cropping}." ) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == "channels_first": return tf.TensorShape( [ input_shape[0], input_shape[1], input_shape[2] - self.cropping[0][0] - self.cropping[0][1] if input_shape[2] else None, input_shape[3] - self.cropping[1][0] - self.cropping[1][1] if input_shape[3] else None, ] ) else: return tf.TensorShape( [ input_shape[0], input_shape[1] - self.cropping[0][0] - self.cropping[0][1] if input_shape[1] else None, input_shape[2] - self.cropping[1][0] - self.cropping[1][1] if input_shape[2] else None, input_shape[3], ] ) def call(self, inputs): if self.data_format == "channels_first": if ( inputs.shape[2] is not None and sum(self.cropping[0]) >= inputs.shape[2] ) or ( inputs.shape[3] is not None and sum(self.cropping[1]) >= inputs.shape[3] ): raise ValueError( "Argument `cropping` must be " "greater than the input shape. Received: inputs.shape=" f"{inputs.shape}, and cropping={self.cropping}" ) if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[ :, :, self.cropping[0][0] :, self.cropping[1][0] : ] elif self.cropping[0][1] == 0: return inputs[ :, :, self.cropping[0][0] :, self.cropping[1][0] : -self.cropping[1][1], ] elif self.cropping[1][1] == 0: return inputs[ :, :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] :, ] return inputs[ :, :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] : -self.cropping[1][1], ] else: if ( inputs.shape[1] is not None and sum(self.cropping[0]) >= inputs.shape[1] ) or ( inputs.shape[2] is not None and sum(self.cropping[1]) >= inputs.shape[2] ): raise ValueError( "Argument `cropping` must be " "greater than the input shape. Received: inputs.shape=" f"{inputs.shape}, and cropping={self.cropping}" ) if self.cropping[0][1] == self.cropping[1][1] == 0: return inputs[ :, self.cropping[0][0] :, self.cropping[1][0] :, : ] elif self.cropping[0][1] == 0: return inputs[ :, self.cropping[0][0] :, self.cropping[1][0] : -self.cropping[1][1], :, ] elif self.cropping[1][1] == 0: return inputs[ :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] :, :, ] return inputs[ :, self.cropping[0][0] : -self.cropping[0][1], self.cropping[1][0] : -self.cropping[1][1], :, ] def get_config(self): config = {"cropping": self.cropping, "data_format": self.data_format} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
tf-keras/tf_keras/layers/reshaping/cropping2d.py/0
{ "file_path": "tf-keras/tf_keras/layers/reshaping/cropping2d.py", "repo_id": "tf-keras", "token_count": 4414 }
175
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras zero-padding layer for 2D input.""" import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.engine.base_layer import Layer from tf_keras.engine.input_spec import InputSpec from tf_keras.utils import conv_utils # isort: off from tensorflow.python.util.tf_export import keras_export @keras_export("keras.layers.ZeroPadding2D") class ZeroPadding2D(Layer): """Zero-padding layer for 2D input (e.g. picture). This layer can add rows and columns of zeros at the top, bottom, left and right side of an image tensor. Examples: >>> input_shape = (1, 1, 2, 2) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[[0 1] [2 3]]]] >>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x) >>> print(y) tf.Tensor( [[[[0 0] [0 0] [0 0] [0 0]] [[0 0] [0 1] [2 3] [0 0]] [[0 0] [0 0] [0 0] [0 0]]]], shape=(1, 3, 4, 2), dtype=int64) Args: padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_height_pad, symmetric_width_pad)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_pad, bottom_pad), (left_pad, right_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. When unspecified, uses `image_data_format` value found in your TF-Keras config file at `~/.keras/keras.json` (if exists) else 'channels_last'. Defaults to 'channels_last'. Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, padded_rows, padded_cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, padded_rows, padded_cols)` """ def __init__(self, padding=(1, 1), data_format=None, **kwargs): super().__init__(**kwargs) self.data_format = conv_utils.normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding)) elif hasattr(padding, "__len__"): if len(padding) != 2: raise ValueError( f"`padding` should have two elements. Received: {padding}." ) height_padding = conv_utils.normalize_tuple( padding[0], 2, "1st entry of padding", allow_zero=True ) width_padding = conv_utils.normalize_tuple( padding[1], 2, "2nd entry of padding", allow_zero=True ) self.padding = (height_padding, width_padding) else: raise ValueError( "`padding` should be either an int, " "a tuple of 2 ints " "(symmetric_height_pad, symmetric_width_pad), " "or a tuple of 2 tuples of 2 ints " "((top_pad, bottom_pad), (left_pad, right_pad)). " f"Received: {padding}." ) self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == "channels_first": if input_shape[2] is not None: rows = input_shape[2] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[3] is not None: cols = input_shape[3] + self.padding[1][0] + self.padding[1][1] else: cols = None return tf.TensorShape([input_shape[0], input_shape[1], rows, cols]) elif self.data_format == "channels_last": if input_shape[1] is not None: rows = input_shape[1] + self.padding[0][0] + self.padding[0][1] else: rows = None if input_shape[2] is not None: cols = input_shape[2] + self.padding[1][0] + self.padding[1][1] else: cols = None return tf.TensorShape([input_shape[0], rows, cols, input_shape[3]]) def call(self, inputs): return backend.spatial_2d_padding( inputs, padding=self.padding, data_format=self.data_format ) def get_config(self): config = {"padding": self.padding, "data_format": self.data_format} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
tf-keras/tf_keras/layers/reshaping/zero_padding2d.py/0
{ "file_path": "tf-keras/tf_keras/layers/reshaping/zero_padding2d.py", "repo_id": "tf-keras", "token_count": 2662 }
176
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RNN cell wrappers.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras import layers from tf_keras.layers.rnn import cell_wrappers from tf_keras.layers.rnn import legacy_cells from tf_keras.legacy_tf_layers import base as legacy_base_layer from tf_keras.testing_infra import test_combinations from tf_keras.utils import generic_utils @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class RNNCellWrapperTest(tf.test.TestCase, parameterized.TestCase): def testResidualWrapper(self): wrapper_type = cell_wrappers.ResidualWrapper x = tf.convert_to_tensor(np.array([[1.0, 1.0, 1.0]]), dtype="float32") m = tf.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]), dtype="float32") base_cell = legacy_cells.GRUCell( 3, kernel_initializer=tf.compat.v1.constant_initializer(0.5), bias_initializer=tf.compat.v1.constant_initializer(0.5), ) g, m_new = base_cell(x, m) wrapper_object = wrapper_type(base_cell) self.assertDictEqual( {"cell": base_cell}, wrapper_object._trackable_children() ) wrapper_object.get_config() # Should not throw an error g_res, m_new_res = wrapper_object(x, m) self.evaluate([tf.compat.v1.global_variables_initializer()]) res = self.evaluate([g, g_res, m_new, m_new_res]) # Residual connections self.assertAllClose(res[1], res[0] + [1.0, 1.0, 1.0]) # States are left untouched self.assertAllClose(res[2], res[3]) def testResidualWrapperWithSlice(self): wrapper_type = cell_wrappers.ResidualWrapper x = tf.convert_to_tensor( np.array([[1.0, 1.0, 1.0, 1.0, 1.0]]), dtype="float32" ) m = tf.convert_to_tensor(np.array([[0.1, 0.1, 0.1]]), dtype="float32") base_cell = legacy_cells.GRUCell( 3, kernel_initializer=tf.compat.v1.constant_initializer(0.5), bias_initializer=tf.compat.v1.constant_initializer(0.5), ) g, m_new = base_cell(x, m) def residual_with_slice_fn(inp, out): inp_sliced = tf.slice(inp, [0, 0], [-1, 3]) return inp_sliced + out g_res, m_new_res = wrapper_type(base_cell, residual_with_slice_fn)(x, m) self.evaluate([tf.compat.v1.global_variables_initializer()]) res_g, res_g_res, res_m_new, res_m_new_res = self.evaluate( [g, g_res, m_new, m_new_res] ) # Residual connections self.assertAllClose(res_g_res, res_g + [1.0, 1.0, 1.0]) # States are left untouched self.assertAllClose(res_m_new, res_m_new_res) def testDeviceWrapper(self): wrapper_type = cell_wrappers.DeviceWrapper x = tf.zeros([1, 3]) m = tf.zeros([1, 3]) cell = legacy_cells.GRUCell(3) wrapped_cell = wrapper_type(cell, "/cpu:0") self.assertDictEqual({"cell": cell}, wrapped_cell._trackable_children()) wrapped_cell.get_config() # Should not throw an error outputs, _ = wrapped_cell(x, m) self.assertIn("cpu:0", outputs.device.lower()) @parameterized.parameters( [cell_wrappers.DropoutWrapper, cell_wrappers.ResidualWrapper] ) def testWrapperKerasStyle(self, wrapper): """Tests if wrapper cell is instantiated in keras style scope.""" wrapped_cell = wrapper(legacy_cells.BasicRNNCell(1)) self.assertIsNone(getattr(wrapped_cell, "_keras_style", None)) @parameterized.parameters( [cell_wrappers.DropoutWrapper, cell_wrappers.ResidualWrapper] ) def testWrapperWeights(self, wrapper): """Tests that wrapper weights contain wrapped cells weights.""" base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell") rnn_cell = wrapper(base_cell) rnn_layer = layers.RNN(rnn_cell) inputs = tf.convert_to_tensor([[[1]]], dtype=tf.float32) rnn_layer(inputs) wrapper_name = generic_utils.to_snake_case(wrapper.__name__) expected_weights = [ "rnn/" + wrapper_name + "/" + var for var in ("kernel:0", "recurrent_kernel:0", "bias:0") ] self.assertLen(rnn_cell.weights, 3) self.assertCountEqual( [v.name for v in rnn_cell.weights], expected_weights ) self.assertCountEqual( [v.name for v in rnn_cell.trainable_variables], expected_weights ) self.assertCountEqual( [v.name for v in rnn_cell.non_trainable_variables], [] ) self.assertCountEqual( [v.name for v in rnn_cell.cell.weights], expected_weights ) @parameterized.parameters( [cell_wrappers.DropoutWrapper, cell_wrappers.ResidualWrapper] ) def testWrapperV2Caller(self, wrapper): """Tests that wrapper V2 is using the LayerRNNCell's caller.""" with legacy_base_layer.keras_style_scope(): base_cell = legacy_cells.MultiRNNCell( [legacy_cells.BasicRNNCell(1) for _ in range(2)] ) rnn_cell = wrapper(base_cell) inputs = tf.convert_to_tensor([[1]], dtype=tf.float32) state = tf.convert_to_tensor([[1]], dtype=tf.float32) _ = rnn_cell(inputs, [state, state]) weights = base_cell._cells[0].weights self.assertLen(weights, expected_len=2) self.assertTrue(all("_wrapper" in v.name for v in weights)) @parameterized.parameters( [cell_wrappers.DropoutWrapper, cell_wrappers.ResidualWrapper] ) def testWrapperV2Build(self, wrapper): cell = legacy_cells.LSTMCell(10) wrapper = wrapper(cell) wrapper.build((1,)) self.assertTrue(cell.built) def testDeviceWrapperSerialization(self): wrapper_cls = cell_wrappers.DeviceWrapper cell = layers.LSTMCell(10) wrapper = wrapper_cls(cell, "/cpu:0") config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertDictEqual(config, reconstructed_wrapper.get_config()) self.assertIsInstance(reconstructed_wrapper, wrapper_cls) def testResidualWrapperSerialization(self): wrapper_cls = cell_wrappers.ResidualWrapper cell = layers.LSTMCell(10) wrapper = wrapper_cls(cell) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertDictEqual(config, reconstructed_wrapper.get_config()) self.assertIsInstance(reconstructed_wrapper, wrapper_cls) wrapper = wrapper_cls(cell, residual_fn=lambda i, o: i + i + o) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) # Assert the reconstructed function will perform the math correctly. self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 4) def residual_fn(inputs, outputs): return inputs * 3 + outputs wrapper = wrapper_cls(cell, residual_fn=residual_fn) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) # Assert the reconstructed function will perform the math correctly. self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 5) def testDropoutWrapperSerialization(self): wrapper_cls = cell_wrappers.DropoutWrapper cell = layers.GRUCell(10) wrapper = wrapper_cls(cell) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertDictEqual(config, reconstructed_wrapper.get_config()) self.assertIsInstance(reconstructed_wrapper, wrapper_cls) wrapper = wrapper_cls(cell, dropout_state_filter_visitor=lambda s: True) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertTrue(reconstructed_wrapper._dropout_state_filter(None)) def dropout_state_filter_visitor(unused_state): return False wrapper = wrapper_cls( cell, dropout_state_filter_visitor=dropout_state_filter_visitor ) config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertFalse(reconstructed_wrapper._dropout_state_filter(None)) def testDropoutWrapperWithKerasLSTMCell(self): wrapper_cls = cell_wrappers.DropoutWrapper cell = layers.LSTMCell(10) with self.assertRaisesRegex(ValueError, "does not work with "): wrapper_cls(cell) cell = layers.LSTMCellV2(10) with self.assertRaisesRegex(ValueError, "does not work with "): wrapper_cls(cell) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/rnn/cell_wrappers_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/rnn/cell_wrappers_test.py", "repo_id": "tf-keras", "token_count": 4097 }
177
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RNN cell wrappers v1 implementation.""" import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras.layers.rnn import legacy_cell_wrappers from tf_keras.layers.rnn import legacy_cells from tf_keras.testing_infra import test_combinations @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class RNNCellWrapperV1Test(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters( [ legacy_cell_wrappers.DropoutWrapper, legacy_cell_wrappers.ResidualWrapper, ] ) def testWrapperKerasStyle(self, wrapper): """Tests if wrapper cell is instantiated in keras style scope.""" wrapped_cell = wrapper(legacy_cells.BasicRNNCell(1)) self.assertFalse(wrapped_cell._keras_style) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/layers/rnn/legacy_cell_wrappers_test.py/0
{ "file_path": "tf-keras/tf_keras/layers/rnn/legacy_cell_wrappers_test.py", "repo_id": "tf-keras", "token_count": 500 }
178
# Description: # Contains the legacy TF layers (internal TensorFlow version). # Placeholder: load unaliased py_library # buildifier: disable=same-origin-load load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//learning/brain/contrib:__subpackages__", "//tf_keras:friends", "//third_party/tensorflow:__subpackages__", ], licenses = ["notice"], ) py_library( name = "layers", srcs = [ "__init__.py", ], deps = [ ":convolutional", ":core", ":layers_base", ":normalization", ":pooling", ], ) py_library( name = "layers_base", srcs = [ "__init__.py", "base.py", "migration_utils.py", "variable_scope_shim.py", ], srcs_version = "PY3", visibility = [ "//learning/brain/contrib:__subpackages__", "//tf_keras:friends", "//third_party/tensorflow:__subpackages__", "//third_party/tf_seq2seq:__pkg__", ], deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/engine:base_layer", "//tf_keras/mixed_precision:policy", ], ) py_library( name = "convolutional", srcs = ["convolutional.py"], srcs_version = "PY3", deps = [ ":layers_base", "//:expect_tensorflow_installed", "//tf_keras/layers", ], ) py_library( name = "core", srcs = ["core.py"], srcs_version = "PY3", deps = [ ":layers_base", "//:expect_tensorflow_installed", "//tf_keras/layers", ], ) py_library( name = "normalization", srcs = ["normalization.py"], srcs_version = "PY3", deps = [ ":layers_base", "//:expect_tensorflow_installed", "//tf_keras/layers/normalization", ], ) py_library( name = "pooling", srcs = ["pooling.py"], srcs_version = "PY3", deps = [ ":layers_base", "//:expect_tensorflow_installed", "//tf_keras/layers", ], ) tf_py_test( name = "base_test", size = "small", srcs = ["base_test.py"], main = "base_test.py", python_version = "PY3", deps = [ ":core", ":layers_base", "//:expect_tensorflow_installed", "//tf_keras/engine:base_layer", "//tf_keras/engine:input_spec", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "core_test", size = "small", srcs = ["core_test.py"], main = "core_test.py", python_version = "PY3", deps = [ ":core", "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "convolutional_test", size = "small", srcs = ["convolutional_test.py"], main = "convolutional_test.py", python_version = "PY3", deps = [ ":convolutional", "//:expect_tensorflow_installed", ], ) tf_py_test( name = "pooling_test", size = "small", srcs = ["pooling_test.py"], main = "pooling_test.py", python_version = "PY3", tags = ["no_rocm"], deps = [ ":pooling", "//:expect_tensorflow_installed", ], ) cuda_py_test( name = "normalization_test", size = "medium", srcs = ["normalization_test.py"], main = "normalization_test.py", python_version = "PY3", shard_count = 10, deps = [ ":convolutional", ":layers_base", ":normalization", "//:expect_tensorflow_installed", ], ) tf_py_test( name = "variable_scope_shim_test", size = "small", srcs = ["variable_scope_shim_test.py"], main = "variable_scope_shim_test.py", python_version = "PY3", tags = ["no_windows"], deps = [ ":core", ":layers_base", "//:expect_tensorflow_installed", "//tf_keras/engine:base_layer", "//tf_keras/engine:input_spec", "//tf_keras/testing_infra:test_combinations", ], ) tf_py_test( name = "migration_utils_test", size = "small", srcs = ["migration_utils_test.py"], main = "migration_utils_test.py", python_version = "PY3", deps = [ ":layers", "//:expect_tensorflow_installed", "//tf_keras/engine:base_layer", "//tf_keras/engine:input_spec", "//tf_keras/testing_infra:test_combinations", ], )
tf-keras/tf_keras/legacy_tf_layers/BUILD/0
{ "file_path": "tf-keras/tf_keras/legacy_tf_layers/BUILD", "repo_id": "tf-keras", "token_count": 2266 }
179
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in loss functions.""" import abc import functools import warnings import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.saving import saving_lib from tf_keras.saving.legacy import serialization as legacy_serialization from tf_keras.saving.serialization_lib import deserialize_keras_object from tf_keras.saving.serialization_lib import serialize_keras_object from tf_keras.utils import losses_utils from tf_keras.utils import tf_utils # isort: off from tensorflow.python.ops.ragged import ragged_map_ops from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls @keras_export("keras.losses.Loss") class Loss: """Loss base class. To be implemented by subclasses: * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`. Example subclass implementation: ```python class MeanSquaredError(Loss): def call(self, y_true, y_pred): return tf.reduce_mean(tf.math.square(y_pred - y_true), axis=-1) ``` When using a Loss under a `tf.distribute.Strategy`, except passing it to `Model.compile()` for use by `Model.fit()`, please use reduction types 'SUM' or 'NONE', and reduce losses explicitly. Using 'AUTO' or 'SUM_OVER_BATCH_SIZE' will raise an error when calling the Loss object from a custom training loop or from user-defined code in `Layer.call()`. Please see this custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training) for more details on this. """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None): """Initializes `Loss` class. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. """ losses_utils.ReductionV2.validate(reduction) self.reduction = reduction self.name = name # SUM_OVER_BATCH is only allowed in losses managed by `fit` or # CannedEstimators. self._allow_sum_over_batch_size = False self._set_name_scope() def _set_name_scope(self): """Creates a valid `name_scope` name.""" if self.name is None: self._name_scope = self.__class__.__name__.strip("_") elif self.name == "<lambda>": self._name_scope = "lambda" else: # E.g. '_my_loss' => 'my_loss' self._name_scope = self.name.strip("_") def __call__(self, y_true, y_pred, sample_weight=None): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` sample_weight: Optional `sample_weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `sample_weight` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `sample_weight` vector. If the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to this shape), then each loss element of `y_pred` is scaled by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss functions reduce by 1 dimension, usually axis=-1.) Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1` because all loss functions reduce by 1 dimension, usually axis=-1.) Raises: ValueError: If the shape of `sample_weight` is invalid. """ # If we are wrapping a lambda function strip '<>' from the name as it is # not accepted in scope name. graph_ctx = tf_utils.graph_context_for_symbolic_tensors( y_true, y_pred, sample_weight ) with backend.name_scope(self._name_scope), graph_ctx: if tf.executing_eagerly(): call_fn = self.call else: call_fn = tf.__internal__.autograph.tf_convert( self.call, tf.__internal__.autograph.control_status_ctx() ) losses = call_fn(y_true, y_pred) in_mask = losses_utils.get_mask(y_pred) out_mask = losses_utils.get_mask(losses) if in_mask is not None and out_mask is not None: mask = in_mask & out_mask elif in_mask is not None: mask = in_mask elif out_mask is not None: mask = out_mask else: mask = None reduction = self._get_reduction() sample_weight = losses_utils.apply_valid_mask( losses, sample_weight, mask, reduction ) return losses_utils.compute_weighted_loss( losses, sample_weight, reduction=reduction ) @classmethod def from_config(cls, config): """Instantiates a `Loss` from its config (output of `get_config()`). Args: config: Output of `get_config()`. Returns: A `Loss` instance. """ return cls(**config) def get_config(self): """Returns the config dictionary for a `Loss` instance.""" return {"reduction": self.reduction, "name": self.name} @abc.abstractmethod @doc_controls.for_subclass_implementers def call(self, y_true, y_pred): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` Returns: Loss values with the shape `[batch_size, d0, .. dN-1]`. """ raise NotImplementedError("Must be implemented in subclasses.") def _get_reduction(self): """Handles `AUTO` reduction cases and returns the reduction value.""" if ( not self._allow_sum_over_batch_size and tf.distribute.has_strategy() and ( self.reduction == losses_utils.ReductionV2.AUTO or self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE ) ): raise ValueError( "Please use `tf.keras.losses.Reduction.SUM` or " "`tf.keras.losses.Reduction.NONE` for loss reduction when " "losses are used with `tf.distribute.Strategy`, " "except for specifying losses in `Model.compile()` " "for use by the built-in training looop `Model.fit()`.\n" "Please see https://www.tensorflow.org/tutorials" "/distribute/custom_training for more details." ) if self.reduction == losses_utils.ReductionV2.AUTO: return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE return self.reduction @keras_export("keras.__internal__.losses.LossFunctionWrapper", v1=[]) class LossFunctionWrapper(Loss): """Wraps a loss function in the `Loss` class.""" def __init__( self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs ): """Initializes `LossFunctionWrapper` class. Args: fn: The loss function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. **kwargs: The keyword arguments that are passed on to `fn`. """ super().__init__(reduction=reduction, name=name) self.fn = fn self._fn_kwargs = kwargs def call(self, y_true, y_pred): """Invokes the `LossFunctionWrapper` instance. Args: y_true: Ground truth values. y_pred: The predicted values. Returns: Loss values per sample. """ if tf.is_tensor(y_pred) and tf.is_tensor(y_true): y_pred, y_true = losses_utils.squeeze_or_expand_dimensions( y_pred, y_true ) ag_fn = tf.__internal__.autograph.tf_convert( self.fn, tf.__internal__.autograph.control_status_ctx() ) return ag_fn(y_true, y_pred, **self._fn_kwargs) def get_config(self): config = {} for k, v in self._fn_kwargs.items(): config[k] = ( backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v ) if saving_lib.saving_v3_enabled(): from tf_keras.utils import get_registered_name config["fn"] = get_registered_name(self.fn) base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): """Instantiates a `Loss` from its config (output of `get_config()`). Args: config: Output of `get_config()`. Returns: A `keras.losses.Loss` instance. """ if saving_lib.saving_v3_enabled(): fn_name = config.pop("fn", None) if fn_name and cls is LossFunctionWrapper: config["fn"] = get(fn_name) return cls(**config) @keras_export("keras.losses.MeanSquaredError") class MeanSquaredError(LossFunctionWrapper): """Computes the mean of squares of errors between labels and predictions. `loss = mean(square(y_true - y_pred))` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mse = tf.keras.losses.MeanSquaredError() >>> mse(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mse(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mse(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="mean_squared_error" ): """Initializes `MeanSquaredError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_squared_error'. """ super().__init__(mean_squared_error, name=name, reduction=reduction) @keras_export("keras.losses.MeanAbsoluteError") class MeanAbsoluteError(LossFunctionWrapper): """Computes the mean of absolute difference between labels and predictions. `loss = mean(abs(y_true - y_pred))` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError() >>> mae(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mae(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mae(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="mean_absolute_error", ): """Initializes `MeanAbsoluteError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_absolute_error'. """ super().__init__(mean_absolute_error, name=name, reduction=reduction) @keras_export("keras.losses.MeanAbsolutePercentageError") class MeanAbsolutePercentageError(LossFunctionWrapper): """Computes the mean absolute percentage error between `y_true` & `y_pred`. Formula: `loss = 100 * abs((y_true - y_pred) / y_true)` Note that to avoid dividing by zero, a small epsilon value is added to the denominator. Standalone usage: >>> y_true = [[2., 1.], [2., 3.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError() >>> mape(y_true, y_pred).numpy() 50. >>> # Calling with 'sample_weight'. >>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 20. >>> # Using 'sum' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mape(y_true, y_pred).numpy() 100. >>> # Using 'none' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mape(y_true, y_pred).numpy() array([25., 75.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsolutePercentageError()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="mean_absolute_percentage_error", ): """Initializes `MeanAbsolutePercentageError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_absolute_percentage_error'. """ super().__init__( mean_absolute_percentage_error, name=name, reduction=reduction ) @keras_export("keras.losses.MeanSquaredLogarithmicError") class MeanSquaredLogarithmicError(LossFunctionWrapper): """Computes the mean squared logarithmic error between `y_true` & `y_pred`. `loss = square(log(y_true + 1.) - log(y_pred + 1.))` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError() >>> msle(y_true, y_pred).numpy() 0.240 >>> # Calling with 'sample_weight'. >>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.120 >>> # Using 'sum' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.SUM) >>> msle(y_true, y_pred).numpy() 0.480 >>> # Using 'none' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.NONE) >>> msle(y_true, y_pred).numpy() array([0.240, 0.240], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="mean_squared_logarithmic_error", ): """Initializes `MeanSquaredLogarithmicError` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'mean_squared_logarithmic_error'. """ super().__init__( mean_squared_logarithmic_error, name=name, reduction=reduction ) @keras_export("keras.losses.BinaryCrossentropy") class BinaryCrossentropy(LossFunctionWrapper): """Computes the cross-entropy loss between true labels and predicted labels. Use this cross-entropy loss for binary (0 or 1) classification applications. The loss function requires the following inputs: - `y_true` (true label): This is either 0 or 1. - `y_pred` (predicted value): This is the model's prediction, i.e, a single floating-point value which either represents a [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf] when `from_logits=True`) or a probability (i.e, value in [0., 1.] when `from_logits=False`). **Recommended Usage:** (set `from_logits=True`) With `tf.keras` API: ```python model.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), .... ) ``` As a standalone function: >>> # Example 1: (batch_size = 1, number of samples = 4) >>> y_true = [0, 1, 0, 0] >>> y_pred = [-18.6, 0.51, 2.94, -12.8] >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred).numpy() 0.865 >>> # Example 2: (batch_size = 2, number of samples = 4) >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]] >>> # Using default 'auto'/'sum_over_batch_size' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred).numpy() 0.865 >>> # Using 'sample_weight' attribute >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.243 >>> # Using 'sum' reduction` type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True, ... reduction=tf.keras.losses.Reduction.SUM) >>> bce(y_true, y_pred).numpy() 1.730 >>> # Using 'none' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True, ... reduction=tf.keras.losses.Reduction.NONE) >>> bce(y_true, y_pred).numpy() array([0.235, 1.496], dtype=float32) **Default Usage:** (set `from_logits=False`) >>> # Make the following updates to the above "Recommended Usage" section >>> # 1. Set `from_logits=False` >>> tf.keras.losses.BinaryCrossentropy() # OR ...('from_logits=False') >>> # 2. Update `y_pred` to use probabilities instead of logits >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]] """ def __init__( self, from_logits=False, label_smoothing=0.0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name="binary_crossentropy", ): """Initializes `BinaryCrossentropy` instance. Args: from_logits: Whether to interpret `y_pred` as a tensor of [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume that `y_pred` contains probabilities (i.e., values in [0, 1]). label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we compute the loss between the predicted labels and a smoothed version of the true labels, where the smoothing squeezes the labels towards 0.5. Larger values of `label_smoothing` correspond to heavier smoothing. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Name for the op. Defaults to 'binary_crossentropy'. """ super().__init__( binary_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) self.from_logits = from_logits @keras_export("keras.losses.BinaryFocalCrossentropy") class BinaryFocalCrossentropy(LossFunctionWrapper): """Computes focal cross-entropy loss between true labels and predictions. Binary cross-entropy loss is often used for binary (0 or 1) classification tasks. The loss function requires the following inputs: - `y_true` (true label): This is either 0 or 1. - `y_pred` (predicted value): This is the model's prediction, i.e, a single floating-point value which either represents a [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf] when `from_logits=True`) or a probability (i.e, value in [0., 1.] when `from_logits=False`). According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it helps to apply a "focal factor" to down-weight easy examples and focus more on hard examples. By default, the focal tensor is computed as follows: `focal_factor = (1 - output) ** gamma` for class 1 `focal_factor = output ** gamma` for class 0 where `gamma` is a focusing parameter. When `gamma=0`, this function is equivalent to the binary crossentropy loss. With the `compile()` API: ```python model.compile( loss=tf.keras.losses.BinaryFocalCrossentropy(gamma=2.0, from_logits=True), .... ) ``` As a standalone function: >>> # Example 1: (batch_size = 1, number of samples = 4) >>> y_true = [0, 1, 0, 0] >>> y_pred = [-18.6, 0.51, 2.94, -12.8] >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=2, ... from_logits=True) >>> loss(y_true, y_pred).numpy() 0.691 >>> # Apply class weight >>> loss = tf.keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=2, from_logits=True) >>> loss(y_true, y_pred).numpy() 0.51 >>> # Example 2: (batch_size = 2, number of samples = 4) >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]] >>> # Using default 'auto'/'sum_over_batch_size' reduction type. >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3, ... from_logits=True) >>> loss(y_true, y_pred).numpy() 0.647 >>> # Apply class weight >>> loss = tf.keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=3, from_logits=True) >>> loss(y_true, y_pred).numpy() 0.482 >>> # Using 'sample_weight' attribute with focal effect >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3, ... from_logits=True) >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.133 >>> # Apply class weight >>> loss = tf.keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=3, from_logits=True) >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.097 >>> # Using 'sum' reduction` type. >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=4, ... from_logits=True, ... reduction=tf.keras.losses.Reduction.SUM) >>> loss(y_true, y_pred).numpy() 1.222 >>> # Apply class weight >>> loss = tf.keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=4, from_logits=True, ... reduction=tf.keras.losses.Reduction.SUM) >>> loss(y_true, y_pred).numpy() 0.914 >>> # Using 'none' reduction type. >>> loss = tf.keras.losses.BinaryFocalCrossentropy( ... gamma=5, from_logits=True, ... reduction=tf.keras.losses.Reduction.NONE) >>> loss(y_true, y_pred).numpy() array([0.0017 1.1561], dtype=float32) >>> # Apply class weight >>> loss = tf.keras.losses.BinaryFocalCrossentropy( ... apply_class_balancing=True, gamma=5, from_logits=True, ... reduction=tf.keras.losses.Reduction.NONE) >>> loss(y_true, y_pred).numpy() array([0.0004 0.8670], dtype=float32) Args: apply_class_balancing: A bool, whether to apply weight balancing on the binary classes 0 and 1. alpha: A weight balancing factor for class 1, default is `0.25` as mentioned in reference [Lin et al., 2018]( https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is `1.0 - alpha`. gamma: A focusing parameter used to compute the focal factor, default is `2.0` as mentioned in the reference [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf). from_logits: Whether to interpret `y_pred` as a tensor of [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume that `y_pred` are probabilities (i.e., values in `[0, 1]`). label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs. When > `0`, we compute the loss between the predicted labels and a smoothed version of the true labels, where the smoothing squeezes the labels towards `0.5`. Larger values of `label_smoothing` correspond to heavier smoothing. axis: The axis along which to compute crossentropy (the features axis). Defaults to `-1`. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Name for the op. Defaults to 'binary_focal_crossentropy'. """ def __init__( self, apply_class_balancing=False, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name="binary_focal_crossentropy", ): """Initializes `BinaryFocalCrossentropy` instance.""" super().__init__( binary_focal_crossentropy, apply_class_balancing=apply_class_balancing, alpha=alpha, gamma=gamma, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) self.from_logits = from_logits self.apply_class_balancing = apply_class_balancing self.alpha = alpha self.gamma = gamma def get_config(self): config = { "apply_class_balancing": self.apply_class_balancing, "alpha": self.alpha, "gamma": self.gamma, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export("keras.losses.CategoricalCrossentropy") class CategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided in a `one_hot` representation. If you want to provide labels as integers, please use `SparseCategoricalCrossentropy` loss. There should be `# classes` floating point values per feature. In the snippet below, there is `# classes` floating pointing values per example. The shape of both `y_pred` and `y_true` are `[batch_size, num_classes]`. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy() >>> cce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> cce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> cce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy()) ``` """ def __init__( self, from_logits=False, label_smoothing=0.0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name="categorical_crossentropy", ): """Initializes `CategoricalCrossentropy` instance. Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'categorical_crossentropy'. """ super().__init__( categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) @keras_export("keras.losses.CategoricalFocalCrossentropy") class CategoricalFocalCrossentropy(LossFunctionWrapper): """Computes the alpha balanced focal crossentropy loss. Use this crossentropy loss function when there are two or more label classes and if you want to handle class imbalance without using `class_weights`. We expect labels to be provided in a `one_hot` representation. According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it helps to apply a focal factor to down-weight easy examples and focus more on hard examples. The general formula for the focal loss (FL) is as follows: `FL(p_t) = (1 − p_t)^gamma * log(p_t)` where `p_t` is defined as follows: `p_t = output if y_true == 1, else 1 - output` `(1 − p_t)^gamma` is the `modulating_factor`, where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal effect on the cross entropy. `gamma` reduces the importance given to simple examples in a smooth manner. The authors use alpha-balanced variant of focal loss (FL) in the paper: `FL(p_t) = −alpha * (1 − p_t)^gamma * log(p_t)` where `alpha` is the weight factor for the classes. If `alpha` = 1, the loss won't be able to handle class imbalance properly as all classes will have the same weight. This can be a constant or a list of constants. If alpha is a list, it must have the same length as the number of classes. The formula above can be generalized to: `FL(p_t) = alpha * (1 − p_t)^gamma * CrossEntropy(y_true, y_pred)` where minus comes from `CrossEntropy(y_true, y_pred)` (CE). Extending this to multi-class case is straightforward: `FL(p_t) = alpha * (1 − p_t)^gamma * CategoricalCE(y_true, y_pred)` In the snippet below, there is `# classes` floating pointing values per example. The shape of both `y_pred` and `y_true` are `[batch_size, num_classes]`. Standalone usage: >>> y_true = [[0., 1., 0.], [0., 0., 1.]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cce = tf.keras.losses.CategoricalFocalCrossentropy() >>> cce(y_true, y_pred).numpy() 0.23315276 >>> # Calling with 'sample_weight'. >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.1632 >>> # Using 'sum' reduction type. >>> cce = tf.keras.losses.CategoricalFocalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> cce(y_true, y_pred).numpy() 0.46631 >>> # Using 'none' reduction type. >>> cce = tf.keras.losses.CategoricalFocalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> cce(y_true, y_pred).numpy() array([3.2058331e-05, 4.6627346e-01], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalFocalCrossentropy()) ``` Args: alpha: A weight balancing factor for all classes, default is `0.25` as mentioned in the reference. It can be a list of floats or a scalar. In the multi-class case, alpha may be set by inverse class frequency by using `compute_class_weight` from `sklearn.utils`. gamma: A focusing parameter, default is `2.0` as mentioned in the reference. It helps to gradually reduce the importance given to simple (easy) examples in a smooth manner. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'categorical_focal_crossentropy'. """ def __init__( self, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name="categorical_focal_crossentropy", ): """Initializes `CategoricalFocalCrossentropy` instance.""" super().__init__( categorical_focal_crossentropy, alpha=alpha, gamma=gamma, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) self.from_logits = from_logits self.alpha = alpha self.gamma = gamma def get_config(self): config = { "alpha": self.alpha, "gamma": self.gamma, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export("keras.losses.SparseCategoricalCrossentropy") class SparseCategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` loss. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy() >>> scce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> scce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> scce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy()) ``` """ def __init__( self, from_logits=False, ignore_class=None, reduction=losses_utils.ReductionV2.AUTO, name="sparse_categorical_crossentropy", ): """Initializes `SparseCategoricalCrossentropy` instance. Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. ignore_class: Optional integer. The ID of a class to be ignored during loss computation. This is useful, for example, in segmentation problems featuring a "void" class (commonly -1 or 255) in segmentation maps. By default (`ignore_class=None`), all classes are considered. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'sparse_categorical_crossentropy'. """ super().__init__( sparse_categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits, ignore_class=ignore_class, ) @keras_export("keras.losses.CosineSimilarity") class CosineSimilarity(LossFunctionWrapper): """Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = -((0. + 0.) + (0.5 + 0.5)) / 2 >>> cosine_loss(y_true, y_pred).numpy() -0.5 >>> # Calling with 'sample_weight'. >>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() -0.0999 >>> # Using 'sum' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.SUM) >>> cosine_loss(y_true, y_pred).numpy() -0.999 >>> # Using 'none' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.NONE) >>> cosine_loss(y_true, y_pred).numpy() array([-0., -0.999], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1)) ``` Args: axis: The axis along which the cosine similarity is computed (the features axis). Defaults to -1. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'cosine_similarity'. """ def __init__( self, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name="cosine_similarity", ): super().__init__( cosine_similarity, reduction=reduction, name=name, axis=axis ) @keras_export("keras.losses.Hinge") class Hinge(LossFunctionWrapper): """Computes the hinge loss between `y_true` & `y_pred`. `loss = maximum(1 - y_true * y_pred, 0)` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Hinge() >>> h(y_true, y_pred).numpy() 1.3 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.55 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.6 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.1, 1.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name="hinge"): """Initializes `Hinge` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'hinge'. """ super().__init__(hinge, name=name, reduction=reduction) @keras_export("keras.losses.SquaredHinge") class SquaredHinge(LossFunctionWrapper): """Computes the squared hinge loss between `y_true` & `y_pred`. `loss = square(maximum(1 - y_true * y_pred, 0))` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.SquaredHinge() >>> h(y_true, y_pred).numpy() 1.86 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.73 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 3.72 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.46, 2.26], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="squared_hinge" ): """Initializes `SquaredHinge` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'squared_hinge'. """ super().__init__(squared_hinge, name=name, reduction=reduction) @keras_export("keras.losses.CategoricalHinge") class CategoricalHinge(LossFunctionWrapper): """Computes the categorical hinge loss between `y_true` & `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.CategoricalHinge() >>> h(y_true, y_pred).numpy() 1.4 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.6 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.8 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.2, 1.6], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="categorical_hinge" ): """Initializes `CategoricalHinge` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'categorical_hinge'. """ super().__init__(categorical_hinge, name=name, reduction=reduction) @keras_export("keras.losses.Poisson") class Poisson(LossFunctionWrapper): """Computes the Poisson loss between `y_true` & `y_pred`. `loss = y_pred - y_true * log(y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> p = tf.keras.losses.Poisson() >>> p(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.4 >>> # Using 'sum' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.SUM) >>> p(y_true, y_pred).numpy() 0.999 >>> # Using 'none' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.NONE) >>> p(y_true, y_pred).numpy() array([0.999, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name="poisson"): """Initializes `Poisson` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'poisson'. """ super().__init__(poisson, name=name, reduction=reduction) @keras_export("keras.losses.LogCosh") class LogCosh(LossFunctionWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error `y_pred - y_true`. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> l = tf.keras.losses.LogCosh() >>> l(y_true, y_pred).numpy() 0.108 >>> # Calling with 'sample_weight'. >>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.087 >>> # Using 'sum' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.SUM) >>> l(y_true, y_pred).numpy() 0.217 >>> # Using 'none' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.NONE) >>> l(y_true, y_pred).numpy() array([0.217, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="log_cosh" ): """Initializes `LogCosh` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'log_cosh'. """ super().__init__(log_cosh, name=name, reduction=reduction) @keras_export("keras.losses.KLDivergence") class KLDivergence(LossFunctionWrapper): """Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> kl = tf.keras.losses.KLDivergence() >>> kl(y_true, y_pred).numpy() 0.458 >>> # Calling with 'sample_weight'. >>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.366 >>> # Using 'sum' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.SUM) >>> kl(y_true, y_pred).numpy() 0.916 >>> # Using 'none' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.NONE) >>> kl(y_true, y_pred).numpy() array([0.916, -3.08e-06], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence()) ``` """ def __init__( self, reduction=losses_utils.ReductionV2.AUTO, name="kl_divergence" ): """Initializes `KLDivergence` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'kl_divergence'. """ super().__init__(kl_divergence, name=name, reduction=reduction) @keras_export("keras.losses.Huber") class Huber(LossFunctionWrapper): """Computes the Huber loss between `y_true` & `y_pred`. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Huber() >>> h(y_true, y_pred).numpy() 0.155 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.09 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 0.31 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([0.18, 0.13], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Huber()) ``` """ def __init__( self, delta=1.0, reduction=losses_utils.ReductionV2.AUTO, name="huber_loss", ): """Initializes `Huber` instance. Args: delta: A float, the point where the Huber loss function changes from a quadratic to linear. reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction ption will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a `tf.distribute.Strategy`, except via `Model.compile()` and `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'huber_loss'. """ super().__init__(huber, name=name, reduction=reduction, delta=delta) @keras_export( "keras.metrics.mean_squared_error", "keras.metrics.mse", "keras.metrics.MSE", "keras.losses.mean_squared_error", "keras.losses.mse", "keras.losses.MSE", ) @tf.__internal__.dispatch.add_dispatch_support def mean_squared_error(y_true, y_pred): """Computes the mean squared error between labels and predictions. After computing the squared distance between the inputs, the mean value over the last dimension is returned. `loss = mean(square(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1) def _ragged_tensor_apply_loss(loss_fn, y_true, y_pred, y_pred_extra_dim=False): """Apply a loss function on a per batch basis. Args: loss_fn: The loss function y_true: truth values (RaggedTensor) y_pred: predicted values (RaggedTensor) y_pred_extra_dim: whether y_pred has an additional dimension compared to y_true Returns: Loss-function result. A dense tensor if the output has a single dimension (per-batch loss value); a ragged tensor otherwise. """ def rt_is_equiv_dense(rt): """Returns true if this RaggedTensor has the same row_lengths across all ragged dimensions and thus can be converted to a dense tensor without loss of information. Args: rt: RaggedTensor. """ return tf.reduce_all( [ tf.equal( tf.math.reduce_variance( tf.cast(row_lens, backend.floatx()) ), tf.constant([0.0]), ) for row_lens in rt.nested_row_lengths() ] ) def _convert_to_dense(inputs): return tuple( rt.to_tensor() if isinstance(rt, tf.RaggedTensor) else rt for rt in inputs ) def _call_loss(inputs, ragged_output): """Adapt the result to ragged or dense tensor according to the expected output type. This is done so that all the return values of the map operation have the same type. """ r = loss_fn(*inputs) if ragged_output and not isinstance(r, tf.RaggedTensor): r = tf.RaggedTensor.from_tensor(r) elif not ragged_output and isinstance(r, tf.RaggedTensor): r = r.to_tensor() return r def _wrapper(inputs, ragged_output): _, y_pred = inputs if isinstance(y_pred, tf.RaggedTensor): return tf.cond( rt_is_equiv_dense(y_pred), lambda: _call_loss(_convert_to_dense(inputs), ragged_output), lambda: _call_loss(inputs, ragged_output), ) return loss_fn(*inputs) if not isinstance(y_true, tf.RaggedTensor): return loss_fn(y_true, y_pred.to_tensor()) lshape = y_pred.shape.as_list()[1:-1] if len(lshape) > 0: spec = tf.RaggedTensorSpec(shape=lshape, dtype=y_pred.dtype) else: spec = tf.TensorSpec(shape=[], dtype=y_pred.dtype) nested_splits_list = [rt.nested_row_splits for rt in (y_true, y_pred)] if y_pred_extra_dim: # The last dimension of a categorical prediction may be ragged or not. rdims = [len(slist) for slist in nested_splits_list] if rdims[0] == rdims[1] - 1: nested_splits_list[1] = nested_splits_list[1][:-1] map_fn = functools.partial(_wrapper, ragged_output=len(lshape) > 1) assertion_list = ragged_util.assert_splits_match(nested_splits_list) with tf.control_dependencies(assertion_list): return ragged_map_ops.map_fn(map_fn, elems=(y_true, y_pred), dtype=spec) @dispatch.dispatch_for_types(mean_squared_error, tf.RaggedTensor) def _ragged_tensor_mse(y_true, y_pred): """Implements support for handling RaggedTensors. Args: y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`. y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. When the number of dimensions of the batch feature vector [d0, .. dN] is greater than one the return value is a RaggedTensor. Otherwise, a Dense tensor with dimensions [batch_size] is returned. """ return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred) @keras_export( "keras.metrics.mean_absolute_error", "keras.metrics.mae", "keras.metrics.MAE", "keras.losses.mean_absolute_error", "keras.losses.mae", "keras.losses.MAE", ) @tf.__internal__.dispatch.add_dispatch_support def mean_absolute_error(y_true, y_pred): """Computes the mean absolute error between labels and predictions. `loss = mean(abs(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return backend.mean(tf.abs(y_pred - y_true), axis=-1) @dispatch.dispatch_for_types(mean_absolute_error, tf.RaggedTensor) def _ragged_tensor_mae(y_true, y_pred): """RaggedTensor adapter for mean_absolute_error.""" return _ragged_tensor_apply_loss(mean_absolute_error, y_true, y_pred) @keras_export( "keras.metrics.mean_absolute_percentage_error", "keras.metrics.mape", "keras.metrics.MAPE", "keras.losses.mean_absolute_percentage_error", "keras.losses.mape", "keras.losses.MAPE", ) @tf.__internal__.dispatch.add_dispatch_support def mean_absolute_percentage_error(y_true, y_pred): """Computes the mean absolute percentage error between `y_true` & `y_pred`. `loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)` Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) diff = tf.abs( (y_true - y_pred) / backend.maximum(tf.abs(y_true), backend.epsilon()) ) return 100.0 * backend.mean(diff, axis=-1) @dispatch.dispatch_for_types(mean_absolute_percentage_error, tf.RaggedTensor) def _ragged_tensor_mape(y_true, y_pred): """Support RaggedTensors.""" return _ragged_tensor_apply_loss( mean_absolute_percentage_error, y_true, y_pred ) @keras_export( "keras.metrics.mean_squared_logarithmic_error", "keras.metrics.msle", "keras.metrics.MSLE", "keras.losses.mean_squared_logarithmic_error", "keras.losses.msle", "keras.losses.MSLE", ) @tf.__internal__.dispatch.add_dispatch_support def mean_squared_logarithmic_error(y_true, y_pred): """Computes the mean squared logarithmic error between `y_true` & `y_pred`. `loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = np.maximum(y_true, 1e-7) >>> y_pred = np.maximum(y_pred, 1e-7) >>> assert np.allclose( ... loss.numpy(), ... np.mean( ... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) first_log = tf.math.log(backend.maximum(y_pred, backend.epsilon()) + 1.0) second_log = tf.math.log(backend.maximum(y_true, backend.epsilon()) + 1.0) return backend.mean( tf.math.squared_difference(first_log, second_log), axis=-1 ) @dispatch.dispatch_for_types(mean_squared_logarithmic_error, tf.RaggedTensor) def _ragged_tensor_msle(y_true, y_pred): """Implements support for handling RaggedTensors.""" return _ragged_tensor_apply_loss( mean_squared_logarithmic_error, y_true, y_pred ) def _maybe_convert_labels(y_true): """Converts binary labels into -1/1.""" are_zeros = tf.equal(y_true, 0) are_ones = tf.equal(y_true, 1) is_binary = tf.reduce_all(tf.logical_or(are_zeros, are_ones)) def _convert_binary_labels(): # Convert the binary labels to -1 or 1. return 2.0 * y_true - 1.0 updated_y_true = tf.__internal__.smart_cond.smart_cond( is_binary, _convert_binary_labels, lambda: y_true ) return updated_y_true @keras_export("keras.metrics.squared_hinge", "keras.losses.squared_hinge") @tf.__internal__.dispatch.add_dispatch_support def squared_hinge(y_true, y_pred): """Computes the squared hinge loss between `y_true` & `y_pred`. `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return backend.mean( tf.square(tf.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1 ) @keras_export("keras.metrics.hinge", "keras.losses.hinge") @tf.__internal__.dispatch.add_dispatch_support def hinge(y_true, y_pred): """Computes the hinge loss between `y_true` & `y_pred`. `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return backend.mean(tf.maximum(1.0 - y_true * y_pred, 0.0), axis=-1) @keras_export("keras.losses.categorical_hinge") @tf.__internal__.dispatch.add_dispatch_support def categorical_hinge(y_true, y_pred): """Computes the categorical hinge loss between `y_true` & `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = np.random.randint(0, 3, size=(2,)) >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> pos = np.sum(y_true * y_pred, axis=-1) >>> neg = np.amax((1. - y_true) * y_pred, axis=-1) >>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.)) Args: y_true: The ground truth values. `y_true` values are expected to be either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor). y_pred: The predicted values. Returns: Categorical hinge loss values. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) pos = tf.reduce_sum(y_true * y_pred, axis=-1) neg = tf.reduce_max((1.0 - y_true) * y_pred, axis=-1) zero = tf.cast(0.0, y_pred.dtype) return tf.maximum(neg - pos + 1.0, zero) @keras_export("keras.losses.huber", v1=[]) @tf.__internal__.dispatch.add_dispatch_support def huber(y_true, y_pred, delta=1.0): """Computes Huber loss value. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = d * |x| - 0.5 * d^2 if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. delta: A float, the point where the Huber loss function changes from a quadratic to linear. Returns: Tensor with one scalar loss entry per sample. """ y_pred = tf.cast(y_pred, dtype=backend.floatx()) y_true = tf.cast(y_true, dtype=backend.floatx()) delta = tf.cast(delta, dtype=backend.floatx()) error = tf.subtract(y_pred, y_true) abs_error = tf.abs(error) half = tf.convert_to_tensor(0.5, dtype=abs_error.dtype) return backend.mean( tf.where( abs_error <= delta, half * tf.square(error), delta * abs_error - half * tf.square(delta), ), axis=-1, ) @keras_export( "keras.losses.log_cosh", "keras.losses.logcosh", "keras.metrics.log_cosh", "keras.metrics.logcosh", ) @tf.__internal__.dispatch.add_dispatch_support def log_cosh(y_true, y_pred): """Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.logcosh(y_true, y_pred) >>> assert loss.shape == (2,) >>> x = y_pred - y_true >>> assert np.allclose( ... loss.numpy(), ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.), ... axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Logcosh error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) def _logcosh(x): return ( x + tf.math.softplus(-2.0 * x) - tf.cast(tf.math.log(2.0), x.dtype) ) return backend.mean(_logcosh(y_pred - y_true), axis=-1) @keras_export( "keras.metrics.categorical_crossentropy", "keras.losses.categorical_crossentropy", ) @tf.__internal__.dispatch.add_dispatch_support def categorical_crossentropy( y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1 ): """Computes the categorical crossentropy loss. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Defaults to -1. The dimension along which the entropy is computed. Returns: Categorical crossentropy loss value. """ if isinstance(axis, bool): raise ValueError( "`axis` must be of type `int`. " f"Received: axis={axis} of type {type(axis)}" ) y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) label_smoothing = tf.convert_to_tensor(label_smoothing, dtype=y_pred.dtype) if y_pred.shape[-1] == 1: warnings.warn( "In loss categorical_crossentropy, expected " "y_pred.shape to be (batch_size, num_classes) " f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. " "Consider using 'binary_crossentropy' if you only have 2 classes.", SyntaxWarning, stacklevel=2, ) def _smooth_labels(): num_classes = tf.cast(tf.shape(y_true)[axis], y_pred.dtype) return y_true * (1.0 - label_smoothing) + ( label_smoothing / num_classes ) y_true = tf.__internal__.smart_cond.smart_cond( label_smoothing, _smooth_labels, lambda: y_true ) return backend.categorical_crossentropy( y_true, y_pred, from_logits=from_logits, axis=axis ) @dispatch.dispatch_for_types(categorical_crossentropy, tf.RaggedTensor) def _ragged_tensor_categorical_crossentropy( y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1 ): """Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: The axis along which to compute crossentropy (the features axis). Defaults to -1. Returns: Categorical crossentropy loss value. Expected shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by CategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectively the resulting loss is the sum of the individual loss values divided by 3. """ fn = functools.partial( categorical_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export( "keras.metrics.categorical_focal_crossentropy", "keras.losses.categorical_focal_crossentropy", ) @tf.__internal__.dispatch.add_dispatch_support def categorical_focal_crossentropy( y_true, y_pred, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, ): """Computes the categorical focal crossentropy loss. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]] >>> loss = tf.keras.losses.categorical_focal_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([2.63401289e-04, 6.75912094e-01], dtype=float32) Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. alpha: A weight balancing factor for all classes, default is `0.25` as mentioned in the reference. It can be a list of floats or a scalar. In the multi-class case, alpha may be set by inverse class frequency by using `compute_class_weight` from `sklearn.utils`. gamma: A focusing parameter, default is `2.0` as mentioned in the reference. It helps to gradually reduce the importance given to simple examples in a smooth manner. When `gamma` = 0, there is no focal effect on the categorical crossentropy. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Defaults to -1. The dimension along which the entropy is computed. Returns: Categorical focal crossentropy loss value. """ if isinstance(axis, bool): raise ValueError( "`axis` must be of type `int`. " f"Received: axis={axis} of type {type(axis)}" ) y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) label_smoothing = tf.convert_to_tensor(label_smoothing, dtype=y_pred.dtype) if y_pred.shape[-1] == 1: warnings.warn( "In loss categorical_focal_crossentropy, expected " "y_pred.shape to be (batch_size, num_classes) " f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. " "Consider using 'binary_crossentropy' if you only have 2 classes.", SyntaxWarning, stacklevel=2, ) def _smooth_labels(): num_classes = tf.cast(tf.shape(y_true)[-1], y_pred.dtype) return y_true * (1.0 - label_smoothing) + ( label_smoothing / num_classes ) y_true = tf.__internal__.smart_cond.smart_cond( label_smoothing, _smooth_labels, lambda: y_true ) return backend.categorical_focal_crossentropy( target=y_true, output=y_pred, alpha=alpha, gamma=gamma, from_logits=from_logits, axis=axis, ) @dispatch.dispatch_for_types(categorical_focal_crossentropy, tf.RaggedTensor) def _ragged_tensor_categorical_focal_crossentropy( y_true, y_pred, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, ): """Implements support for handling RaggedTensors. Expected shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by CategoricalFocalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectively the resulting loss is the sum of the individual loss values divided by 3. Args: alpha: A weight balancing factor for all classes, default is `0.25` as mentioned in the reference. It can be a list of floats or a scalar. In the multi-class case, alpha may be set by inverse class frequency by using `compute_class_weight` from `sklearn.utils`. gamma: A focusing parameter, default is `2.0` as mentioned in the reference. It helps to gradually reduce the importance given to simple examples in a smooth manner. When `gamma` = 0, there is no focal effect on the categorical crossentropy. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Defaults to -1. The dimension along which the entropy is computed. Returns: Categorical focal crossentropy loss value. """ fn = functools.partial( categorical_focal_crossentropy, alpha=alpha, gamma=gamma, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export( "keras.metrics.sparse_categorical_crossentropy", "keras.losses.sparse_categorical_crossentropy", ) @tf.__internal__.dispatch.add_dispatch_support def sparse_categorical_crossentropy( y_true, y_pred, from_logits=False, axis=-1, ignore_class=None ): """Computes the sparse categorical crossentropy loss. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) >>> y_true = [[[ 0, 2], ... [-1, -1]], ... [[ 0, 2], ... [-1, -1]]] >>> y_pred = [[[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], ... [[0.2, 0.5, 0.3], [0.0, 1.0, 0.0]]], ... [[[1.0, 0.0, 0.0], [0.0, 0.5, 0.5]], ... [[0.2, 0.5, 0.3], [0.0, 1.0, 0.0]]]] >>> loss = tf.keras.losses.sparse_categorical_crossentropy( ... y_true, y_pred, ignore_class=-1) >>> loss.numpy() array([[[2.3841855e-07, 2.3841855e-07], [0.0000000e+00, 0.0000000e+00]], [[2.3841855e-07, 6.9314730e-01], [0.0000000e+00, 0.0000000e+00]]], dtype=float32) Args: y_true: Ground truth values. y_pred: The predicted values. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. axis: Defaults to -1. The dimension along which the entropy is computed. ignore_class: Optional integer. The ID of a class to be ignored during loss computation. This is useful, for example, in segmentation problems featuring a "void" class (commonly -1 or 255) in segmentation maps. By default (`ignore_class=None`), all classes are considered. Returns: Sparse categorical crossentropy loss value. """ return backend.sparse_categorical_crossentropy( y_true, y_pred, from_logits=from_logits, ignore_class=ignore_class, axis=axis, ) @dispatch.dispatch_for_types(sparse_categorical_crossentropy, tf.RaggedTensor) def _ragged_tensor_sparse_categorical_crossentropy( y_true, y_pred, from_logits=False, axis=-1, ignore_class=None ): """Implements support for handling RaggedTensors. Expected y_pred shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by SparseCategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectively, the resulting loss is the sum of the individual loss values divided by 3. """ fn = functools.partial( sparse_categorical_crossentropy, from_logits=from_logits, ignore_class=ignore_class, axis=axis, ) return _ragged_tensor_apply_loss(fn, y_true, y_pred, y_pred_extra_dim=True) @keras_export( "keras.metrics.binary_crossentropy", "keras.losses.binary_crossentropy" ) @tf.__internal__.dispatch.add_dispatch_support def binary_crossentropy( y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1 ): """Computes the binary crossentropy loss. Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.916 , 0.714], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by squeezing them towards 0.5 That is, using `1. - 0.5 * label_smoothing` for the target class and `0.5 * label_smoothing` for the non-target class. axis: The axis along which the mean is computed. Defaults to -1. Returns: Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) label_smoothing = tf.convert_to_tensor(label_smoothing, dtype=y_pred.dtype) def _smooth_labels(): return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing y_true = tf.__internal__.smart_cond.smart_cond( label_smoothing, _smooth_labels, lambda: y_true ) return backend.mean( backend.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=axis, ) @dispatch.dispatch_for_types(binary_crossentropy, tf.RaggedTensor) def _ragged_tensor_binary_crossentropy( y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1 ): """Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Axis along which to compute crossentropy. Returns: Binary crossentropy loss value. Expected shape: (batch, sequence_len) with sequence_len being variable per batch. Return shape: (batch,); returns the per batch mean of the loss values. When used by BinaryCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over the number of batches. """ fn = functools.partial( binary_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export( "keras.metrics.binary_focal_crossentropy", "keras.losses.binary_focal_crossentropy", ) @tf.__internal__.dispatch.add_dispatch_support def binary_focal_crossentropy( y_true, y_pred, apply_class_balancing=False, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, ): """Computes the binary focal crossentropy loss. According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it helps to apply a focal factor to down-weight easy examples and focus more on hard examples. By default, the focal tensor is computed as follows: `focal_factor = (1 - output)**gamma` for class 1 `focal_factor = output**gamma` for class 0 where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal effect on the binary crossentropy loss. If `apply_class_balancing == True`, this function also takes into account a weight balancing factor for the binary classes 0 and 1 as follows: `weight = alpha` for class 1 (`target == 1`) `weight = 1 - alpha` for class 0 where `alpha` is a float in the range of `[0, 1]`. Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> loss = tf.keras.losses.binary_focal_crossentropy(y_true, y_pred, ... gamma=2) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.330, 0.206], dtype=float32) Args: y_true: Ground truth values, of shape `(batch_size, d0, .. dN)`. y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`. apply_class_balancing: A bool, whether to apply weight balancing on the binary classes 0 and 1. alpha: A weight balancing factor for class 1, default is `0.25` as mentioned in the reference. The weight for class 0 is `1.0 - alpha`. gamma: A focusing parameter, default is `2.0` as mentioned in the reference. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in `[0, 1]`. If higher than 0 then smooth the labels by squeezing them towards `0.5`, i.e., using `1. - 0.5 * label_smoothing` for the target class and `0.5 * label_smoothing` for the non-target class. axis: The axis along which the mean is computed. Defaults to `-1`. Returns: Binary focal crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) label_smoothing = tf.convert_to_tensor(label_smoothing, dtype=y_pred.dtype) def _smooth_labels(): return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing y_true = tf.__internal__.smart_cond.smart_cond( label_smoothing, _smooth_labels, lambda: y_true ) return backend.mean( backend.binary_focal_crossentropy( target=y_true, output=y_pred, apply_class_balancing=apply_class_balancing, alpha=alpha, gamma=gamma, from_logits=from_logits, ), axis=axis, ) @dispatch.dispatch_for_types(binary_focal_crossentropy, tf.RaggedTensor) def _ragged_tensor_binary_focal_crossentropy( y_true, y_pred, apply_class_balancing=False, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, ): """Implements support for handling RaggedTensors. Expected shape: `(batch, sequence_len)` with sequence_len being variable per batch. Return shape: `(batch,)`; returns the per batch mean of the loss values. When used by BinaryFocalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over the number of batches. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. apply_class_balancing: A bool, whether to apply weight balancing on the binary classes 0 and 1. alpha: A weight balancing factor for class 1, default is `0.25` as mentioned in the reference [Lin et al., 2018]( https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is `1.0 - alpha`. gamma: A focusing parameter, default is `2.0` as mentioned in the reference. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. axis: Axis along which to compute crossentropy. Returns: Binary focal crossentropy loss value. """ fn = functools.partial( binary_focal_crossentropy, apply_class_balancing=apply_class_balancing, alpha=alpha, gamma=gamma, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis, ) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export( "keras.metrics.kl_divergence", "keras.metrics.kullback_leibler_divergence", "keras.metrics.kld", "keras.metrics.KLD", "keras.losses.kl_divergence", "keras.losses.kullback_leibler_divergence", "keras.losses.kld", "keras.losses.KLD", ) @tf.__internal__.dispatch.add_dispatch_support def kl_divergence(y_true, y_pred): """Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1) >>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1) >>> assert np.array_equal( ... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1)) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. Returns: A `Tensor` with loss. Raises: TypeError: If `y_true` cannot be cast to the `y_pred.dtype`. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) y_true = backend.clip(y_true, backend.epsilon(), 1) y_pred = backend.clip(y_pred, backend.epsilon(), 1) return tf.reduce_sum(y_true * tf.math.log(y_true / y_pred), axis=-1) @keras_export("keras.metrics.poisson", "keras.losses.poisson") @tf.__internal__.dispatch.add_dispatch_support def poisson(y_true, y_pred): """Computes the Poisson loss between y_true and y_pred. The Poisson loss is the mean of the elements of the `Tensor` `y_pred - y_true * log(y_pred)`. Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.poisson(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_pred = y_pred + 1e-7 >>> assert np.allclose( ... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Poisson loss value. shape = `[batch_size, d0, .. dN-1]`. Raises: InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return backend.mean( y_pred - y_true * tf.math.log(y_pred + backend.epsilon()), axis=-1 ) @keras_export( "keras.losses.cosine_similarity", v1=[ "keras.metrics.cosine_proximity", "keras.metrics.cosine", "keras.losses.cosine_proximity", "keras.losses.cosine", "keras.losses.cosine_similarity", ], ) @tf.__internal__.dispatch.add_dispatch_support def cosine_similarity(y_true, y_pred, axis=-1): """Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1) >>> loss.numpy() array([-0., -0.999, 0.999], dtype=float32) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Returns: Cosine similarity tensor. """ y_true = tf.linalg.l2_normalize(y_true, axis=axis) y_pred = tf.linalg.l2_normalize(y_pred, axis=axis) return -tf.reduce_sum(y_true * y_pred, axis=axis) # Aliases. bce = BCE = binary_crossentropy mse = MSE = mean_squared_error mae = MAE = mean_absolute_error mape = MAPE = mean_absolute_percentage_error msle = MSLE = mean_squared_logarithmic_error kld = KLD = kullback_leibler_divergence = kl_divergence logcosh = log_cosh huber_loss = huber def is_categorical_crossentropy(loss): result = ( isinstance(loss, CategoricalCrossentropy) or ( isinstance(loss, LossFunctionWrapper) and loss.fn == categorical_crossentropy ) or ( hasattr(loss, "__name__") and loss.__name__ == "categorical_crossentropy" ) or (loss == "categorical_crossentropy") ) return result @keras_export("keras.losses.serialize") def serialize(loss, use_legacy_format=False): """Serializes loss function or `Loss` instance. Args: loss: A TF-Keras `Loss` instance or a loss function. use_legacy_format: Boolean, whether to use the legacy serialization format. Defaults to `False`. Returns: Loss configuration dictionary. """ if loss is None: return None if not isinstance(loss, Loss): warnings.warn( "The `keras.losses.serialize()` API should only be used for " "objects of type `keras.losses.Loss`. Found an instance of type " f"{type(loss)}, which may lead to improper serialization." ) if use_legacy_format: return legacy_serialization.serialize_keras_object(loss) return serialize_keras_object(loss) @keras_export("keras.losses.deserialize") def deserialize(name, custom_objects=None, use_legacy_format=False): """Deserializes a serialized loss class/function instance. Args: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. use_legacy_format: Boolean, whether to use the legacy serialization format. Defaults to `False`. Returns: A TF-Keras `Loss` instance or a loss function. """ if use_legacy_format: return legacy_serialization.deserialize_keras_object( name, module_objects=globals(), custom_objects=custom_objects, printable_module_name="loss function", ) return deserialize_keras_object( name, module_objects=globals(), custom_objects=custom_objects, printable_module_name="loss function", ) @keras_export("keras.losses.get") def get(identifier): """Retrieves a TF-Keras loss as a `function`/`Loss` class instance. The `identifier` may be the string name of a loss function or `Loss` class. >>> loss = tf.keras.losses.get("categorical_crossentropy") >>> type(loss) <class 'function'> >>> loss = tf.keras.losses.get("CategoricalCrossentropy") >>> type(loss) <class '...keras.losses.CategoricalCrossentropy'> You can also specify `config` of the loss to this function by passing dict containing `class_name` and `config` as an identifier. Also note that the `class_name` must map to a `Loss` class >>> identifier = {"class_name": "CategoricalCrossentropy", ... "config": {"from_logits": True}} >>> loss = tf.keras.losses.get(identifier) >>> type(loss) <class '...keras.losses.CategoricalCrossentropy'> Args: identifier: A loss identifier. One of None or string name of a loss function/class or loss configuration dictionary or a loss function or a loss class instance. Returns: A TF-Keras loss as a `function`/ `Loss` class instance. Raises: ValueError: If `identifier` cannot be interpreted. """ if identifier is None: return None if isinstance(identifier, str): identifier = str(identifier) use_legacy_format = "module" not in identifier return deserialize(identifier, use_legacy_format=use_legacy_format) if isinstance(identifier, dict): return deserialize(identifier) if callable(identifier): return identifier raise ValueError( f"Could not interpret loss function identifier: {identifier}" ) LABEL_DTYPES_FOR_LOSSES = { tf.compat.v1.losses.sparse_softmax_cross_entropy: "int32", sparse_categorical_crossentropy: "int32", }
tf-keras/tf_keras/losses.py/0
{ "file_path": "tf-keras/tf_keras/losses.py", "repo_id": "tf-keras", "token_count": 48521 }
180
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests metrics correctness using TF-Keras model.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras import layers from tf_keras import losses from tf_keras import metrics from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils from tf_keras.utils import losses_utils def get_multi_io_model(): inp_1 = layers.Input(shape=(1,), name="input_1") inp_2 = layers.Input(shape=(1,), name="input_2") x = layers.Dense(3, kernel_initializer="ones", trainable=False) out_1 = layers.Dense( 1, kernel_initializer="ones", name="output_1", trainable=False ) out_2 = layers.Dense( 1, kernel_initializer="ones", name="output_2", trainable=False ) branch_a = [inp_1, x, out_1] branch_b = [inp_2, x, out_2] return test_utils.get_multi_io_model(branch_a, branch_b) def custom_generator_multi_io(sample_weights=None): batch_size = 2 num_samples = 5 inputs = np.asarray([[1.0], [2.0], [3.0], [4.0], [5.0]]) targets_1 = np.asarray([[2.0], [4.0], [6.0], [8.0], [10.0]]) targets_2 = np.asarray([[1.0], [2.0], [3.0], [4.0], [5.0]]) start = 0 while True: if start > num_samples: start = 0 end = start + batch_size x = [inputs[start:end], inputs[start:end]] y = [targets_1[start:end], targets_2[start:end]] if sample_weights: sw = tf.nest.map_structure(lambda w: w[start:end], sample_weights) else: sw = None start = end yield x, y, sw @test_combinations.run_with_all_model_types(exclude_models=["sequential"]) @test_combinations.run_all_keras_modes(always_skip_v1=True) class TestMetricsCorrectnessMultiIO(test_combinations.TestCase): def _get_compiled_multi_io_model(self): model = get_multi_io_model() model.compile( optimizer="rmsprop", loss="mse", metrics=[metrics.MeanSquaredError(name="mean_squared_error")], weighted_metrics=[ metrics.MeanSquaredError(name="mean_squared_error_2") ], run_eagerly=test_utils.should_run_eagerly(), ) return model def setUp(self): super(TestMetricsCorrectnessMultiIO, self).setUp() self.x = np.asarray([[1.0], [2.0], [3.0], [4.0], [5.0]]) self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0], [10.0]]) self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0], [5.0]]) self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0, 6.0]) self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5, 3.0]) # y_true_1 = [[2.], [4.], [6.], [8.], [10.]] # y_pred_1 = [[3.], [6.], [9.], [12.], [15.]] # y_true_2 = [[1.], [2.], [3.], [4.], [5.]] # y_pred_2 = [[3.], [6.], [9.], [12.], [15.]] # Weighted metric `output_1`: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) + # ((15 - 10)^2 * 6) # = 280 # Count = (2 + 3) + (4 + 5) + 6 = 20 # Result = 14 # Weighted metric `output_2`: # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) + # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5) + # (15 - 5)^2 * 3.0 # = 440 # Count = (3.5 + 2.5) + (1.5 + 0.5) + 3.0 = 11.0 # Result = 40 # Loss `output_1` with weights: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) + # ((15 - 10)^2 * 6) # = 280 # Count = 2 + 2 + 1 # Result = 56 # Loss `output_1` without weights/Metric `output_1`: # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + \ # (12 - 8)^2) + (15 - 10)^2 # = 55 # Count = 2 + 2 + 1 # Result = 11 # Loss `output_2` with weights: # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) + # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5) + # (15 - 5)^2 * 3.0 # = 440 # Count = 2 + 2 + 1 # Result = 88 # Loss `output_2` without weights/Metric `output_2`: # Total = ((3 - 1)^2 + (6 - 2)^2) + ((9 - 3)^2 + \ # (12 - 4)^2) + (15 - 5)^2 # = 220 # Count = 2 + 2 + 1 # Result = 44 # Total loss with weights = 56 + 88 = 144 # Total loss without weights = 11 + 44 = 55 self.wmse = "mean_squared_error_2" self.expected_fit_result_with_weights = { "output_1_mean_squared_error": [11, 11], "output_2_mean_squared_error": [44, 44], "output_1_" + self.wmse: [14, 14], "output_2_" + self.wmse: [40, 40], "loss": [144, 144], "output_1_loss": [56, 56], "output_2_loss": [88, 88], } self.expected_fit_result_with_weights_output_2 = { "output_1_mean_squared_error": [11, 11], "output_2_mean_squared_error": [44, 44], "output_1_" + self.wmse: [11, 11], "output_2_" + self.wmse: [40, 40], "loss": [99, 99], "output_1_loss": [11, 11], "output_2_loss": [88, 88], } self.expected_fit_result = { "output_1_mean_squared_error": [11, 11], "output_2_mean_squared_error": [44, 44], "output_1_" + self.wmse: [11, 11], "output_2_" + self.wmse: [44, 44], "loss": [55, 55], "output_1_loss": [11, 11], "output_2_loss": [44, 44], } # In the order: 'loss', 'output_1_loss', 'output_2_loss', # 'output_1_mean_squared_error', 'output_1_mean_squared_error_2', # 'output_2_mean_squared_error', 'output_2_mean_squared_error_2' self.expected_batch_result_with_weights = [144, 56, 88, 11, 14, 44, 40] self.expected_batch_result_with_weights_output_2 = [ 99, 11, 88, 11, 11, 44, 40, ] self.expected_batch_result = [55, 11, 44, 11, 11, 44, 44] def test_fit(self): model = self._get_compiled_multi_io_model() history = model.fit( [self.x, self.x], [self.y1, self.y2], batch_size=2, epochs=2, shuffle=False, ) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_sample_weight(self): model = self._get_compiled_multi_io_model() history = model.fit( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, batch_size=2, epochs=2, shuffle=False, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) # Set weights for one output (use batch size). history = model.fit( [self.x, self.x], [self.y1, self.y2], sample_weight={"output_2": self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False, ) for ( key, value, ) in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate( [self.x, self.x], [self.y1, self.y2], batch_size=2 ) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_with_sample_weight(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate( [self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights, 1e-3 ) # Set weights for one output. model = self._get_compiled_multi_io_model() eval_result = model.evaluate( [self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ "output_2": self.sample_weight_2, }, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights_output_2, 1e-3 ) # Verify that metric value is same with arbitrary weights and batch # size. x = np.random.random((50, 1)) y = np.random.random((50, 1)) w = np.random.random((50,)) mse1 = model.evaluate( [x, x], [y, y], sample_weight=[w, w], batch_size=5 )[3] mse2 = model.evaluate( [x, x], [y, y], sample_weight=[w, w], batch_size=10 )[3] self.assertAllClose(mse1, mse2, 1e-3) def test_train_on_batch(self): model = self._get_compiled_multi_io_model() result = model.train_on_batch([self.x, self.x], [self.y1, self.y2]) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_train_on_batch_with_sample_weight(self): model = self._get_compiled_multi_io_model() result = model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights, 1e-3 ) # Set weights for one output. result = model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights_output_2, 1e-3 ) def test_test_on_batch(self): model = self._get_compiled_multi_io_model() result = model.test_on_batch([self.x, self.x], [self.y1, self.y2]) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_test_on_batch_with_sample_weight(self): model = self._get_compiled_multi_io_model() result = model.test_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights, 1e-3 ) # Set weights for one output. result = model.test_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights_output_2, 1e-3 ) def test_fit_generator(self): model = self._get_compiled_multi_io_model() history = model.fit_generator( custom_generator_multi_io(), steps_per_epoch=3, epochs=2 ) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_sample_weight(self): model = self._get_compiled_multi_io_model() history = model.fit_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2] ), steps_per_epoch=3, epochs=2, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) # Set weights for one output. history = model.fit_generator( custom_generator_multi_io( sample_weights={"output_2": self.sample_weight_2} ), steps_per_epoch=3, epochs=2, ) for ( key, value, ) in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval_generator(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate_generator( custom_generator_multi_io(), steps=3 ) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_generator_with_sample_weight(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2] ), steps=3, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights, 1e-3 ) # Set weights for one output. eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights={"output_2": self.sample_weight_2} ), steps=3, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights_output_2, 1e-3 ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes(always_skip_v1=True) class TestMetricsCorrectnessSingleIO(test_combinations.TestCase): def _get_model(self): x = layers.Dense(3, kernel_initializer="ones", trainable=False) out = layers.Dense( 1, kernel_initializer="ones", name="output", trainable=False ) model = test_utils.get_model_from_layers([x, out], input_shape=(1,)) model.compile( optimizer="rmsprop", loss="mse", metrics=[metrics.MeanSquaredError(name="mean_squared_error")], weighted_metrics=[ metrics.MeanSquaredError(name="mean_squared_error_2") ], run_eagerly=test_utils.should_run_eagerly(), ) return model def _custom_generator(self, sample_weight=None): batch_size = 2 num_samples = 4 x = np.asarray([[1.0], [2.0], [3.0], [4.0]]) y = np.asarray([[2.0], [4.0], [6.0], [8.0]]) w = sample_weight i = 0 while True: batch_index = i * batch_size % num_samples i += 1 start = batch_index end = start + batch_size yield x[start:end], y[start:end], None if w is None else w[ start:end ] def setUp(self): super(TestMetricsCorrectnessSingleIO, self).setUp() self.x = np.asarray([[1.0], [2.0], [3.0], [4.0]]) self.y = np.asarray([[2.0], [4.0], [6.0], [8.0]]) self.sample_weight = np.asarray([2.0, 3.0, 4.0, 5.0]) self.class_weight = {i: 1 for i in range(10)} self.class_weight.update({2: 2, 4: 3, 6: 4, 8: 5}) # y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]] # Metric: # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30, # Count = 2 + 2 # Result = 7.5 # Weighted metric: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) # = 130 # Count = (2 + 3) + (4 + 5) # Result = 9.2857141 # Total loss with weights: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) # = 130, # Count = 2 + 2 # Result = 32.5 # Total loss without weights: # Total = ((3 - 2)^2 + (6 - 4)^2) + # ((9 - 6)^2 + (12 - 8)^2) # = 30, # Count = 2 + 2 # Result = 7.5 wmse = "mean_squared_error_2" self.expected_fit_result_with_weights = { "mean_squared_error": [7.5, 7.5], wmse: [9.286, 9.286], "loss": [32.5, 32.5], } self.expected_fit_result = { "mean_squared_error": [7.5, 7.5], wmse: [7.5, 7.5], "loss": [7.5, 7.5], } # In the order: 'loss', 'mean_squared_error', 'mean_squared_error_2' self.expected_batch_result_with_weights = [32.5, 7.5, 9.286] self.expected_batch_result = [7.5, 7.5, 7.5] def test_fit(self): model = self._get_model() history = model.fit( self.x, self.y, batch_size=2, epochs=2, shuffle=False ) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_sample_weight(self): model = self._get_model() history = model.fit( self.x, self.y, sample_weight=self.sample_weight, batch_size=2, epochs=2, shuffle=False, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_class_weight(self): model = self._get_model() history = model.fit( self.x, self.y, class_weight=self.class_weight, batch_size=2, epochs=2, shuffle=False, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval(self): model = self._get_model() eval_result = model.evaluate(self.x, self.y, batch_size=2) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_with_sample_weight(self): model = self._get_model() eval_result = model.evaluate( self.x, self.y, batch_size=2, sample_weight=self.sample_weight ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights, 1e-3 ) # Verify that metric value is same with arbitrary weights and batch # size. x = np.random.random((50, 1)) y = np.random.random((50, 1)) w = np.random.random((50,)) mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1] mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1] self.assertAllClose(mse1, mse2, 1e-3) def test_train_on_batch(self): model = self._get_model() result = model.train_on_batch(self.x, self.y) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_train_on_batch_with_sample_weight(self): model = self._get_model() result = model.train_on_batch( self.x, self.y, sample_weight=self.sample_weight ) self.assertAllClose( result, self.expected_batch_result_with_weights, 1e-3 ) def test_train_on_batch_with_class_weight(self): model = self._get_model() result = model.train_on_batch( self.x, self.y, class_weight=self.class_weight ) self.assertAllClose( result, self.expected_batch_result_with_weights, 1e-3 ) def test_test_on_batch(self): model = self._get_model() result = model.test_on_batch(self.x, self.y) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_test_on_batch_with_sample_weight(self): model = self._get_model() result = model.test_on_batch( self.x, self.y, sample_weight=self.sample_weight ) self.assertAllClose( result, self.expected_batch_result_with_weights, 1e-3 ) def test_fit_generator(self): model = self._get_model() history = model.fit_generator( self._custom_generator(), steps_per_epoch=2, epochs=2 ) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_sample_weight(self): model = self._get_model() history = model.fit_generator( self._custom_generator(sample_weight=self.sample_weight), steps_per_epoch=2, epochs=2, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_class_weight(self): model = self._get_model() history = model.fit_generator( self._custom_generator(), steps_per_epoch=2, epochs=2, class_weight=self.class_weight, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval_generator(self): model = self._get_model() eval_result = model.evaluate_generator( self._custom_generator(), steps=2 ) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_generator_with_sample_weight(self): model = self._get_model() eval_result = model.evaluate_generator( self._custom_generator(sample_weight=self.sample_weight), steps=2 ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights, 1e-3 ) @test_combinations.run_with_all_model_types(exclude_models=["sequential"]) @test_combinations.run_all_keras_modes(always_skip_v1=True) @parameterized.parameters( [ losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, losses_utils.ReductionV2.AUTO, losses_utils.ReductionV2.SUM, ] ) class TestOutputLossMetrics(test_combinations.TestCase): def _get_compiled_multi_io_model(self, loss): model = get_multi_io_model() model.compile( optimizer="rmsprop", loss=loss, run_eagerly=test_utils.should_run_eagerly(), ) return model def setUp(self): super(TestOutputLossMetrics, self).setUp() self.x = np.asarray([[1.0], [2.0], [3.0], [4.0], [5.0]]) self.y1 = np.asarray([[2.0], [4.0], [6.0], [8.0], [10.0]]) self.y2 = np.asarray([[1.0], [2.0], [3.0], [4.0], [5.0]]) self.sample_weight_1 = np.asarray([2.0, 3.0, 4.0, 5.0, 6.0]) self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5, 3.0]) # y_true_1 = [[2.], [4.], [6.], [8.], [10.]] # y_pred_1 = [[3.], [6.], [9.], [12.], [15.]] # y_true_2 = [[1.], [2.], [3.], [4.], [5.]] # y_pred_2 = [[3.], [6.], [9.], [12.], [15.]] # Loss `output_1`: # Per-sample weighted losses # Batch 1 = [(3 - 2)^2 * 2, (6 - 4)^2 * 3)] = [2, 12] # Batch 2 = [((9 - 6)^2 * 4, (12 - 8)^2 * 5)] = [36, 80] # Batch 3 = [(15 - 10)^2 * 6] = [150] # Result (reduction=SUM) = ((2 + 12)*2 + (36 + 80)*2 + 150) / 5 = 82 # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 280 / 5 = 56 # Loss `output_2`: # Per-sample weighted losses # Batch 1 = [(3 - 1)^2 * 3.5, (6 - 2)^2 * 2.5)] = [14, 40] # Batch 2 = [(9 - 3)^2 * 1.5, (12 - 4)^2 * 0.5)] = [54, 32] # Batch 3 = [(15 - 5)^2 * 3] = [300] # Result (reduction=SUM) = ((14 + 40)*2 + (54 + 32)*2 + 300) / 5 = 116 # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 440 / 5 = 88 # When reduction is 'NONE' loss value that is passed to the optimizer # will be vector loss but what is reported is a scalar, which is an # average of all the values in all the batch vectors. # Total loss = Output_loss_1 + Output_loss_2 sum_over_batch_size_fit_result = { "loss": [144, 144], "output_1_loss": [56, 56], "output_2_loss": [88, 88], } self.expected_fit_result = { losses_utils.ReductionV2.NONE: sum_over_batch_size_fit_result, losses_utils.ReductionV2.SUM: { "loss": [198, 198], "output_1_loss": [82, 82], "output_2_loss": [116, 116], }, losses_utils.ReductionV2.AUTO: sum_over_batch_size_fit_result, losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result, # noqa: E501 } # In the order: 'loss', 'output_1_loss', 'output_2_loss', self.expected_batch_result = { losses_utils.ReductionV2.NONE: [144, 56, 88], losses_utils.ReductionV2.SUM: [198, 82, 116], losses_utils.ReductionV2.AUTO: [144, 56, 88], losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: [144, 56, 88], } # 2 + 12 + 36 + 80 + 150 = 280 # 14 + 40 + 54 + 32 + 300 = 440 self.expected_single_batch_result = [720, 280, 440] def test_fit(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction) ) history = model.fit( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, batch_size=2, epochs=2, shuffle=False, ) for key, value in self.expected_fit_result[reduction].items(): self.assertAllClose(history.history[key], value) def test_eval(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction) ) eval_result = model.evaluate( [self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) self.assertAllClose(eval_result, self.expected_batch_result[reduction]) def test_train_on_batch(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction) ) result = model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) expected_values = self.expected_batch_result[reduction] if reduction == losses_utils.ReductionV2.SUM: expected_values = self.expected_single_batch_result self.assertAllClose(result, expected_values) def test_test_on_batch(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction) ) result = model.test_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) expected_values = self.expected_batch_result[reduction] if reduction == losses_utils.ReductionV2.SUM: expected_values = self.expected_single_batch_result self.assertAllClose(result, expected_values) def test_fit_generator(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction) ) history = model.fit_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2] ), steps_per_epoch=3, epochs=2, ) for key, value in self.expected_fit_result[reduction].items(): self.assertAllClose(history.history[key], value) def test_eval_generator(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction) ) eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2] ), steps=3, ) self.assertAllClose(eval_result, self.expected_batch_result[reduction]) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/metrics/metrics_correctness_test.py/0
{ "file_path": "tf-keras/tf_keras/metrics/metrics_correctness_test.py", "repo_id": "tf-keras", "token_count": 15308 }
181
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the loss scaling optimizer class.""" import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras import optimizers from tf_keras.dtensor import utils as dtensor_utils from tf_keras.optimizers import optimizer from tf_keras.optimizers import utils as optimizer_utils from tf_keras.optimizers.legacy import optimizer_v2 from tf_keras.saving import serialization_lib # isort: off from tensorflow.python.platform import tf_logging from tensorflow.python.util.tf_export import keras_export class _UnwrapPreventer: """Wrapper that DistributionStrategy will not unwrap. Typically, DistributionStrategy will unwrap values when going from a cross- replica context to a replica context via `call_for_each_replica`. This class is a wrapper that DistributionStrategy will not unwrap, so it can be used to prevent it from unwrapping a value. TODO(reedwm): Find/implement a better way of preventing values from being unwrapped by DistributionStrategy """ __slots__ = ["value"] def __init__(self, value): self.value = value def _is_all_finite(grads): """Returns a scalar boolean tensor indicating if all gradients are finite.""" def raw_values(g): return g.values if isinstance(g, tf.IndexedSlices) else g is_finite_per_grad = [ tf.reduce_all(tf.math.is_finite(raw_values(g))) for g in grads if g is not None ] return tf.reduce_all(is_finite_per_grad) def _op_in_graph_mode(tensor): """Returns the tensor's op in graph mode, or the tensor in eager mode. This is useful because sometimes an op is needed in graph mode instead of a tensor. In eager mode, there are no ops. Args: tensor: A tensor. Returns: The tensor's op in graph mode. The tensor in eager mode. """ if tf.executing_eagerly(): return tensor return tensor.op def _assign_if_finite(var, value): """Assigns a value to a variable if the value is finite.""" return tf.cond( tf.math.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)), tf.no_op, ) def _maybe_warn_about_scaling( loss_has_been_scaled, gradients_have_been_unscaled ): """Warn if the loss or gradients hasn't been scaled or unscaled.""" if loss_has_been_scaled and gradients_have_been_unscaled: return example_code = """ with tf.GradientTape() as tape: loss = loss_fn() scaled_loss = opt.get_scaled_loss(loss) scaled_grads = tape.gradient(scaled_loss, vars) grads = opt.get_unscaled_gradients(scaled_grads) opt.apply_gradients([(grads, var)])""" if not loss_has_been_scaled and not gradients_have_been_unscaled: tf_logging.warning( "You forgot to call LossScaleOptimizer.get_scaled_loss() and " "LossScaleOptimizer.get_unscaled_gradients() before calling " "LossScaleOptimizer.apply_gradients(). This will likely result in " "worse model quality, so please call them in the correct places! " f"For example:{example_code}\nFor more information, see " "https://www.tensorflow.org/api_docs/python/tf/tf_keras/mixed_precision/LossScaleOptimizer" # noqa: E501 ) elif not loss_has_been_scaled: tf_logging.warning( "You forgot to call LossScaleOptimizer.get_scaled_loss() before " "calling LossScaleOptimizer.apply_gradients() (you did call " "get_unscaled_gradients() however). This will likely result in " "worse model quality, so please call get_scaled_loss() in the " f"correct place! For example:{example_code}\nFor more information, " "see " "https://www.tensorflow.org/api_docs/python/tf/tf_keras/mixed_precision/LossScaleOptimizer" # noqa: E501 ) elif not gradients_have_been_unscaled: tf_logging.warning( "You forgot to call LossScaleOptimizer.get_unscaled_gradients() " "before calling LossScaleOptimizer.apply_gradients() (you did call " "get_scaled_loss() however). This will likely result in worse " "model quality, so please call get_unscaled_gradients() in the " f"correct place! For example:{example_code}\nFor more information, " "see " "https://www.tensorflow.org/api_docs/python/tf/tf_keras/mixed_precision/LossScaleOptimizer" # noqa: E501 ) class _DynamicLossScaleState(tf.__internal__.tracking.Trackable): """The state of a dynamic loss scale.""" def __init__(self, initial_loss_scale, growth_steps, multiplier): """Creates the dynamic loss scale.""" super().__init__() self._initial_loss_scale = float(initial_loss_scale) self._growth_steps = int(growth_steps) self._multiplier = float(multiplier) self._weights = {} self._current_loss_scale = self._add_weight( name="current_loss_scale", dtype=tf.float32, initial_value=self._initial_loss_scale, ) # The number of consecutive steps with finite gradients since the last # nonfinite gradient or change in loss scale. The name is 'good_steps' # for backwards compatibility with older checkpoints. self._counter = self._add_weight( name="good_steps", dtype=tf.int64, initial_value=0 ) def _add_weight(self, name, initial_value, dtype=None): """Adds a weight to this loss scale. Args: name: Variable name. initial_value: The variable's initial value. dtype: The type of the variable. Returns: A variable. Raises: RuntimeError: If a weight with `name` has already been added. """ variable = tf.Variable( initial_value=initial_value, name=name, dtype=dtype, trainable=False, synchronization=tf.VariableSynchronization.AUTO, # Set aggregation to NONE, as loss scaling variables should never be # aggregated. aggregation=tf.VariableAggregation.NONE, ) if tf.executing_eagerly(): graph_key = None else: graph = tf.compat.v1.get_default_graph() graph_key = graph._graph_key key = (name, graph_key) self._weights[key] = variable self._handle_deferred_dependencies(name=name, trackable=variable) backend.track_variable(variable) return variable def _trackable_children(self, save_type="checkpoint", **kwargs): """From Trackable. Gather graph-specific weights to save.""" if tf.executing_eagerly(): graph_key = None else: graph = tf.compat.v1.get_default_graph() graph_key = graph._graph_key weights = {} for (name, g), v in sorted( self._weights.items(), key=lambda i: i[0][0] ): if g == graph_key: weights[name] = v weights.update(super()._trackable_children(save_type, **kwargs)) return weights def _lookup_dependency(self, name, cached_dependencies=None): """From Trackable. Find a weight in the current graph.""" if cached_dependencies is not None: unconditional = cached_dependencies.get(name) else: unconditional = super()._lookup_dependency(name) if unconditional is not None: return unconditional if tf.executing_eagerly(): graph_key = None else: graph = tf.compat.v1.get_default_graph() graph_key = graph._graph_key return self._weights.get((name, graph_key), None) @property def initial_loss_scale(self): return self._initial_loss_scale @property def growth_steps(self): return self._growth_steps @property def multiplier(self): return self._multiplier @property def current_loss_scale(self): """Returns the current loss scale as a float32 `tf.Variable`.""" return self._current_loss_scale @property def counter(self): """Returns the counter as a float32 `tf.Variable`.""" return self._counter def __call__(self): """Returns the current loss scale as a scalar `float32` tensor.""" return tf.convert_to_tensor(self._current_loss_scale) def update(self, grads): """Updates the value of the loss scale. Args: grads: A nested structure of unscaled gradients, each which is an all-reduced gradient of the loss with respect to a weight. Returns: update_op: In eager mode, None. In graph mode, an op to update the loss scale. should_apply_gradients: Either a bool or a scalar boolean tensor. If False, the caller should skip applying `grads` to the variables this step. """ grads = tf.nest.flatten(grads) if ( tf.distribute.has_strategy() and tf.distribute.in_cross_replica_context() ): distribution = tf.distribute.get_strategy() is_finite_per_replica = distribution.extended.call_for_each_replica( _is_all_finite, args=(grads,) ) # Each replica computed the same `is_finite` value, since `grads` is # all-reduced across replicas. Arbitrarily take `is_finite` from the # first replica. is_finite = distribution.experimental_local_results( is_finite_per_replica )[0] else: is_finite = _is_all_finite(grads) def update_if_finite_grads(): """Update assuming the gradients are finite.""" def incr_loss_scale(): new_loss_scale = self.current_loss_scale * self.multiplier return tf.group( _assign_if_finite(self.current_loss_scale, new_loss_scale), self.counter.assign(0), ) return tf.cond( self.counter + 1 >= self.growth_steps, incr_loss_scale, lambda: _op_in_graph_mode(self.counter.assign_add(1)), ) def update_if_not_finite_grads(): """Update assuming the gradients are nonfinite.""" new_loss_scale = tf.maximum( self.current_loss_scale / self.multiplier, 1 ) return tf.group( self.counter.assign(0), self.current_loss_scale.assign(new_loss_scale), ) update_op = tf.cond( is_finite, update_if_finite_grads, update_if_not_finite_grads ) should_apply_gradients = is_finite return update_op, should_apply_gradients # See LossScaleOptimizer docstring for why this is so big _DEFAULT_INITIAL_SCALE = 2**15 _DEFAULT_GROWTH_STEPS = 2000 # TODO(b/215389169): Delete this class after `OptimizerV2` is deprecated. class LossScaleOptimizerMetaclass(type): """Metaclass that delegates LossScaleOptimizer instance creation. This metaclass causes a LossScaleOptimizer or LossScaleOptimizerV3 to be created when a BaseLossScaleOptimizer is constructed. As a result, when a user creates a loss scale optimizer with `tf.keras.mixed_precision.LossScaleOptimizer(opt)`, either a LossScaleOptimizer or LossScaleOptimizerV3 will be created, depending on the type of `opt`. """ def __call__(cls, inner_optimizer, *args, **kwargs): if cls is not BaseLossScaleOptimizer: return super(LossScaleOptimizerMetaclass, cls).__call__( inner_optimizer, *args, **kwargs ) if isinstance(inner_optimizer, optimizer_v2.OptimizerV2): return LossScaleOptimizer(inner_optimizer, *args, **kwargs) elif isinstance(inner_optimizer, optimizer.Optimizer): return LossScaleOptimizerV3(inner_optimizer, *args, **kwargs) # Raise TypeError because inner_optimizer is not an optimizer msg = ( '"inner_optimizer" must be an instance of ' "`tf.keras.optimizers.Optimizer` or " "`tf.keras.optimizers.experimental.Optimizer`, but got: " f"{inner_optimizer}." ) raise TypeError(msg) # TODO(b/215389169): Delete this class after `OptimizerV2` is deprecated. @keras_export("keras.mixed_precision.LossScaleOptimizer") class BaseLossScaleOptimizer(metaclass=LossScaleOptimizerMetaclass): """An optimizer that applies loss scaling to prevent numeric underflow. Loss scaling is a technique to prevent numeric underflow in intermediate gradients when float16 is used. To prevent underflow, the loss is multiplied (or "scaled") by a certain factor called the "loss scale", which causes intermediate gradients to be scaled by the loss scale as well. The final gradients are divided (or "unscaled") by the loss scale to bring them back to their original value. `LossScaleOptimizer` wraps another optimizer and applies loss scaling to it. By default, the loss scale is dynamically updated over time so you do not have to choose the loss scale. The `minimize` method automatically scales the loss, unscales the gradients, and updates the loss scale so all you have to do is wrap your optimizer with a `LossScaleOptimizer` if you use `minimize`. For example: >>> opt = tf.keras.optimizers.experimental.SGD(0.25) >>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt) >>> var = tf.Variable(1.) >>> loss_fn = lambda: var ** 2 >>> # 'minimize' applies loss scaling and updates the loss sale. >>> opt.minimize(loss_fn, var_list=[var]) >>> var.numpy() 0.5 If a `tf.GradientTape` is used to compute gradients instead of `minimize`, you must scale the loss and gradients manually. This can be done with the `LossScaleOptimizer.get_scaled_loss` and `LossScaleOptimizer.get_unscaled_gradients` methods. For example: >>> with tf.GradientTape() as tape: ... loss = loss_fn() ... scaled_loss = opt.get_scaled_loss(loss) >>> scaled_grad = tape.gradient(scaled_loss, var) >>> (grad,) = opt.get_unscaled_gradients([scaled_grad]) >>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here >>> var.numpy() 0.25 Warning: If you forget to call `get_scaled_loss` or `get_unscaled_gradients` (or both) when using a `tf.GradientTape`, the model will likely converge to a worse quality. Please make sure you call each function exactly once. When mixed precision with float16 is used, there is typically no risk of underflow affecting model quality if loss scaling is properly used. See [the mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for more information on how to use mixed precision. Args: inner_optimizer: The `tf.keras.optimizers.Optimizer` or `tf.keras.optimizers.experimental.Optimizer` instance to wrap. dynamic: Bool indicating whether dynamic loss scaling is used. If `True`, the loss scale will be dynamically updated over time using an algorithm that keeps the loss scale at approximately its optimal value. If False, a single fixed loss scale is used and `initial_scale` must be specified, which is used as the loss scale. Recommended to keep as True, as choosing a fixed loss scale can be tricky. Currently, there is a small performance overhead to dynamic loss scaling compared to fixed loss scaling. Defaults to `True`. initial_scale: The initial loss scale. If `dynamic` is True, this defaults to `2 ** 15`. If `dynamic` is False, this must be specified and acts as the sole loss scale, as the loss scale does not change over time. When dynamic loss scaling is used, is better for this to be a very high number, because a loss scale that is too high gets lowered far more quickly than a loss scale that is too low gets raised. dynamic_growth_steps: With dynamic loss scaling, every `dynamic_growth_steps` steps with finite gradients, the loss scale is doubled. If a nonfinite gradient is encountered, the count is reset back to zero, gradients are skipped that step, and the loss scale is halved. The count can be queried with `LossScaleOptimizer.dynamic_counter`. This argument can only be specified if `dynamic` is True. Defaults to `2000`. `LossScaleOptimizer` will occasionally skip applying gradients to the variables, in which case the trainable variables will not change that step. This is done because the dynamic loss scale will sometimes be raised too high, causing overflow in the gradients. Typically, the first 2 to 15 steps of the model are skipped as the initial loss scale is very high, but afterwards steps will only be skipped on average 0.05% of the time (the fraction of steps skipped is `1 / dynamic_growth_steps`). `LossScaleOptimizer` delegates all public `Optimizer` methods to the inner optimizer. Additionally, in methods `minimize` and `get_gradients`, it scales the loss and unscales the gradients. In methods `minimize` and `apply_gradients`, it additionally updates the loss scale and skips applying gradients if any gradient has a nonfinite value. ### Hyperparameters If wrapping a `tf.keras.optimizers.Optimizer`, hyperparameters can be accessed and set on the LossScaleOptimizer, which will be delegated to the wrapped optimizer. >>> opt = tf.keras.optimizers.legacy.Adam(beta_1=0.8, epsilon=1e-5) >>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt) >>> opt.beta_1 # Equivalent to `opt.inner_optimizer.beta_1` 0.8 >>> opt.beta_1 = 0.7 # Equivalent to `opt.inner_optimizer.beta_1 = 0.7` >>> opt.beta_1 0.7 >>> opt.inner_optimizer.beta_1 0.7 However, accessing or setting non-hyperparameters is not delegated to the LossScaleOptimizer. In an Adam optimizer, `beta_1` is a hyperparameter but `epsilon` is not, as the Adam optimizer only calls `Optimizer._set_hyper` on `beta_1`. >>> opt.inner_optimizer.epsilon 1e-5 >>> opt.epsilon Traceback (most recent call last): ... AttributeError: 'LossScaleOptimizer' object has no attribute 'epsilon' >>> opt.epsilon = 1e-4 # This does NOT set epsilon on `opt.inner_optimizer` >>> opt.inner_optimizer.epsilon >>> 1e-5 In the above example, despite epsilon being set on the LossScaleOptimizer, the old epsilon value will still be used when training as epsilon was not set on the inner optimizer. """ @property def dynamic(self): """Bool indicating whether dynamic loss scaling is used.""" raise NotImplementedError @property def loss_scale(self): """The current loss scale as a float32 scalar tensor.""" raise NotImplementedError @property def dynamic_counter(self): """The number of steps since the loss scale was last increased or decreased. This is None if `LossScaleOptimizer.dynamic` is False. The counter is incremented every step. Once it reaches `LossScaleOptimizer.dynamic_growth_steps`, the loss scale will be doubled and the counter will be reset back to zero. If nonfinite gradients are encountered, the loss scale will be halved and the counter will be reset back to zero. """ raise NotImplementedError @property def initial_scale(self): """The initial loss scale. If `LossScaleOptimizer.dynamic` is False, this is the same number as `LossScaleOptimizer.loss_scale`, as the loss scale never changes. """ raise NotImplementedError @property def dynamic_growth_steps(self): """The number of steps it takes to increase the loss scale. This is None if `LossScaleOptimizer.dynamic` is False. Every `dynamic_growth_steps` consecutive steps with finite gradients, the loss scale is increased. """ raise NotImplementedError @property def inner_optimizer(self): """The optimizer that this LossScaleOptimizer is wrapping.""" raise NotImplementedError def get_scaled_loss(self, loss): """Scales the loss by the loss scale. This method is only needed if you compute gradients manually, e.g. with `tf.GradientTape`. In that case, call this method to scale the loss before passing the loss to `tf.GradientTape`. If you use `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss scaling is automatically applied and this method is unneeded. If this method is called, `get_unscaled_gradients` should also be called. See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an example. Args: loss: The loss, which will be multiplied by the loss scale. Can either be a tensor or a callable returning a tensor. Returns: `loss` multiplied by `LossScaleOptimizer.loss_scale`. """ # Calls to this function would be delegated to `get_scaled_loss` # of either `LossScaleOptimizer` or `LossScaleOptimizerV3`, depending on # the type of `inner_optimizer`. raise NotImplementedError def get_unscaled_gradients(self, grads): """Unscales the gradients by the loss scale. This method is only needed if you compute gradients manually, e.g. with `tf.GradientTape`. In that case, call this method to unscale the gradients after computing them with `tf.GradientTape`. If you use `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss scaling is automatically applied and this method is unneeded. If this method is called, `get_scaled_loss` should also be called. See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an example. Args: grads: A list of tensors, each which will be divided by the loss scale. Can have None values, which are ignored. Returns: A new list the same size as `grads`, where every non-None value in `grads` is divided by `LossScaleOptimizer.loss_scale`. """ # Calls to this function would be delegated to `get_unscaled_gradients` # of either `LossScaleOptimizer` or `LossScaleOptimizerV3`, depending on # the type of `inner_optimizer`. raise NotImplementedError class LossScaleOptimizer( tf.__internal__.tracking.DelegatingTrackableMixin, optimizer_v2.OptimizerV2, BaseLossScaleOptimizer, ): """An optimizer that applies loss scaling to prevent numeric underflow.""" _HAS_AGGREGATE_GRAD = True def __init__( self, inner_optimizer, dynamic=True, initial_scale=None, dynamic_growth_steps=None, ): if not isinstance(inner_optimizer, optimizer_v2.OptimizerV2): if isinstance(inner_optimizer, optimizer.Optimizer): # Give better error message if the new experimental optimizer is # passed. raise TypeError( "You passed an instance of the new experimental " "optimizer, `optimizer.Optimizer`, " "to LossScaleOptimizer, but " "only the classic optimizers subclassing from " "`tf.keras.optimizers.Optimizer` can be passed. Please " "use `loss_scale_optimizer.LossScaleOptimizerV3` " "instead of " "`tf.keras.mixed_precision.LossScaleOptimizer`, " "as the former supports wrapping " "instances of the new experimental optimizer. " f"Got optimizer: {inner_optimizer}" ) msg = ( '"inner_optimizer" must be an instance of ' "`tf.keras.optimizers.Optimizer`, but got: %s. " % inner_optimizer ) raise TypeError(msg) if not isinstance(dynamic, bool): # Catch errors if a user incorrectly passes a string or float to the # second argument argument, as this was commonly done for the # now-removed LossScaleOptimizerV1. raise TypeError( '"dynamic" argument to LossScaleOptimizer.__init__ must ' "be a bool, but got: %r" % (dynamic,) ) if isinstance(inner_optimizer, LossScaleOptimizer): raise TypeError( "LossScaleOptimizer cannot wrap another " "LossScaleOptimizer, but got: %s" % (inner_optimizer,) ) _raise_if_strategy_unsupported() if getattr( inner_optimizer, "_is_wrapped_by_loss_scale_optimizer", False ): # TODO(reedwm): Maybe support this. The difficulty is that LSO has # the same checkpoint format as the inner optimizer, so multiple # LSOs wrapping the same optimizer causes the checkpointing logic to # become confused. raise ValueError( '"inner_optimizer" is already wrapped by a ' "LossScaleOptimizer. An optimizer can only be wrapped " "by a single LossScaleOptimizer" ) self._optimizer = inner_optimizer self._optimizer._is_wrapped_by_loss_scale_optimizer = True # We don't call super().__init__, since we do not want to call # OptimizerV2's constructor. tf.__internal__.tracking.DelegatingTrackableMixin.__init__( self, self._optimizer ) if dynamic: if initial_scale is None: initial_scale = _DEFAULT_INITIAL_SCALE if dynamic_growth_steps is None: dynamic_growth_steps = _DEFAULT_GROWTH_STEPS self._loss_scale = _DynamicLossScaleState( initial_scale, dynamic_growth_steps, multiplier=2 ) self._track_trackable(self._loss_scale, "loss_scale") else: if initial_scale is None: raise ValueError( '"initial_scale" must be specified if "dynamic" is False' ) self._loss_scale = float(initial_scale) if dynamic_growth_steps is not None: raise ValueError( '"dynamic_growth_steps" must be None if "dynamic" ' "is False, but got: %s" % (dynamic_growth_steps,) ) # Used to track whether get_scaled_loss() and get_unscaled_gradients() # have been called self._loss_has_been_scaled = False self._gradients_have_been_unscaled = False # To support restoring TensorFlow 2.2 checkpoints. self._track_trackable( FakeOptimizerForRestoration(self._optimizer), "base_optimizer" ) @property def dynamic(self): return isinstance(self._loss_scale, _DynamicLossScaleState) @property def loss_scale(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return tf.convert_to_tensor(self._loss_scale.current_loss_scale) else: return tf.convert_to_tensor(self._loss_scale) @property def dynamic_counter(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return self._loss_scale.counter else: return None @property def initial_scale(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return self._loss_scale.initial_loss_scale else: return self._loss_scale @property def dynamic_growth_steps(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return self._loss_scale.growth_steps else: return None @property def inner_optimizer(self): return self._optimizer def get_scaled_loss(self, loss): self._loss_has_been_scaled = True if callable(loss): def new_loss(): loss_val = loss() return loss_val * tf.cast(self.loss_scale, loss_val.dtype) return new_loss else: return loss * tf.cast(self.loss_scale, loss.dtype) def get_unscaled_gradients(self, grads): self._gradients_have_been_unscaled = True loss_scale_reciprocal = 1.0 / self.loss_scale return [ _multiply_gradient(g, loss_scale_reciprocal) if g is not None else None for g in grads ] def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): tape = tf.GradientTape() if tape is None else tape with tape: loss = self.get_scaled_loss(loss) grads_and_vars = self._optimizer._compute_gradients( loss, var_list, grad_loss, tape=tape ) grads = [g for g, _ in grads_and_vars] weights = [v for _, v in grads_and_vars] unscaled_grads = self.get_unscaled_gradients(grads) return list(zip(unscaled_grads, weights)) def get_gradients(self, loss, params): loss = self.get_scaled_loss(loss) grads = self._optimizer.get_gradients(loss, params) return self.get_unscaled_gradients(grads) def _create_all_weights(self, var_list): self._optimizer._create_all_weights(var_list) def apply_gradients( self, grads_and_vars, name=None, experimental_aggregate_gradients=True ): if tf.distribute.in_cross_replica_context(): raise ValueError( "apply_gradients() must be called in a replica context." ) # We check for the strategy here despite already checking in the # constructor as frequently the optimizer is created outside the # strategy's scope. _raise_if_strategy_unsupported() _maybe_warn_about_scaling( self._loss_has_been_scaled, self._gradients_have_been_unscaled ) grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars) if experimental_aggregate_gradients: # We must aggregate the gradients here instead of in # self.optimizer.apply_gradients, so that any NaN or Inf gradients # are propagated to each replica. If any replica has a NaN or Inf # gradient, they must all have a NaN or Inf gradient so that they # all skip the step. grads_and_vars = self._optimizer._transform_unaggregated_gradients( grads_and_vars ) grads_and_vars = self._optimizer._aggregate_gradients( grads_and_vars ) grads_and_vars = tuple(grads_and_vars) grads = [g for g, _ in grads_and_vars] # We do not want DistributionStrategy to unwrap any MirroredVariables in # grads_and_vars, because even in a replica context, the wrapped # optimizer expects mirrored variables. So we wrap the variables with an # _UnwrapPreventer, preventing DistributionStrategy from unwrapping the # MirroredVariables. wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars]) def do_not_apply_fn(): # Normally self._optimizer.iterations is incremented in # self._optimizer.apply_gradients(). Since that is not called in # this branch, we increment it here instead. return self._optimizer.iterations.assign_add(1, read_value=False) def _if_should_apply_grads(grads): if isinstance(self._loss_scale, _DynamicLossScaleState): return self._loss_scale.update(grads) else: return (tf.no_op(), True) if tf.__internal__.distribute.strategy_supports_no_merge_call(): loss_scale_update_op, should_apply_grads = _if_should_apply_grads( grads ) def apply_fn(): return self._apply_gradients(grads, wrapped_vars, name) maybe_apply_op = tf.__internal__.smart_cond.smart_cond( should_apply_grads, apply_fn, do_not_apply_fn ) return tf.group(maybe_apply_op, loss_scale_update_op) else: def _apply_gradients_cross_replica( distribution, grads, wrapped_vars, name ): ( loss_scale_update_op, should_apply_grads, ) = _if_should_apply_grads(grads) def apply_fn(): return distribution.extended.call_for_each_replica( self._apply_gradients, args=(grads, wrapped_vars, name) ) # Note: We must call this cond() in a cross-replica context. # DistributionStrategy does not support having a cond in a # replica context with a branch that calls `merge_call`, and # self._optimizer.apply_gradients calls `merge_call`. maybe_apply_op = tf.__internal__.smart_cond.smart_cond( should_apply_grads, apply_fn, do_not_apply_fn ) return tf.group(maybe_apply_op, loss_scale_update_op) return tf.distribute.get_replica_context().merge_call( _apply_gradients_cross_replica, args=(grads, wrapped_vars, name) ) def _apply_gradients(self, grads, wrapped_vars, name): # Pass experimental_aggregate_gradients=False since LossScaleOptimizer # already aggregated the gradients. # TODO(reedwm): This will raise a fairly cryptic error message if # self._optimizer.apply_gradients does not take # experimental_aggregate_gradients. return self._optimizer.apply_gradients( list(zip(grads, wrapped_vars.value)), name=name, experimental_aggregate_gradients=False, ) def get_config(self): serialized_optimizer = optimizers.serialize(self._optimizer) return { "inner_optimizer": serialized_optimizer, "dynamic": self.dynamic, "initial_scale": self.initial_scale, "dynamic_growth_steps": self.dynamic_growth_steps, } @classmethod def from_config(cls, config, custom_objects=None): config = config.copy() # Make a copy, since we mutate config if "loss_scale" in config: # If loss_scale is in config, we assume we are deserializing a # LossScaleOptimizer from TF 2.3 or below. We convert the config so # it can be deserialized in the current LossScaleOptimizer. loss_scale = serialization_lib.deserialize_keras_object( config.pop("loss_scale"), module_objects={ "FixedLossScale": tf.compat.v1.mixed_precision.FixedLossScale, # noqa: E501 "DynamicLossScale": tf.compat.v1.mixed_precision.DynamicLossScale, # noqa: E501 }, printable_module_name="loss scale", ) if isinstance( loss_scale, tf.compat.v1.mixed_precision.FixedLossScale ): config["dynamic"] = False config["initial_scale"] = loss_scale._loss_scale_value elif isinstance( loss_scale, tf.compat.v1.mixed_precision.DynamicLossScale ): config["dynamic"] = True config["initial_scale"] = loss_scale.initial_loss_scale config["dynamic_growth_steps"] = loss_scale.increment_period if loss_scale.multiplier != 2: raise ValueError( "Cannot deserialize LossScaleOptimizer with a " "DynamicLossScale whose multiplier is not 2. Got " "DynamicLossScale: %s" % (loss_scale,) ) else: raise ValueError( "Serialized LossScaleOptimizers with a LossScale that is " "neither a FixedLossScale nor a DynamicLossScale can no " "longer be deserialized" ) config["inner_optimizer"] = config.pop("optimizer") if isinstance(config["inner_optimizer"], optimizer_v2.OptimizerV2): inner_optimizer = config["inner_optimizer"] else: inner_optimizer = optimizers.deserialize( config["inner_optimizer"], custom_objects=custom_objects, use_legacy_optimizer=True, ) del config["inner_optimizer"] return cls(inner_optimizer, **config) # Delegations: We delegate most OptimizerV2 methods to the wrapped optimizer # below. @property def iterations(self): return self._optimizer.iterations @iterations.setter def iterations(self, variable): self._optimizer.iterations = variable def get_slot_names(self): return self._optimizer.get_slot_names() def variables(self): return self._optimizer.variables() @property def weights(self): return self._optimizer.weights def get_weights(self): return self._optimizer.get_weights() def set_weights(self, weights): return self._optimizer.set_weights(weights) @property def clipnorm(self): return self._optimizer.clipnorm @clipnorm.setter def clipnorm(self, val): self._optimizer.clipnorm = val @property def global_clipnorm(self): return self._optimizer.global_clipnorm @global_clipnorm.setter def global_clipnorm(self, val): self._optimizer.global_clipnorm = val @property def clipvalue(self): return self._optimizer.clipvalue @clipvalue.setter def clipvalue(self, val): self._optimizer.clipvalue = val def _aggregate_gradients(self, grads_and_vars): return self._optimizer._aggregate_gradients(grads_and_vars) def _restore_slot_variable(self, slot_name, variable, slot_variable): return self._optimizer._restore_slot_variable( slot_name, variable, slot_variable, ) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable ): return self._optimizer._create_or_restore_slot_variable( slot_variable_position, slot_name, variable ) def get_slot(self, var, slot_name): return self._optimizer.get_slot(var, slot_name) def add_slot(self, var, slot_name, initializer="zeros"): return self._optimizer.add_slot(var, slot_name, initializer) def __getattribute__(self, name): try: return object.__getattribute__(self, name) except AttributeError as e: if name == "_optimizer" or name == "_hyper": # Avoid infinite recursion raise e # Delegate hyperparameter accesses to inner optimizer. if name == "lr": name = "learning_rate" if name in self._optimizer._hyper: return self._optimizer._get_hyper(name) raise e def __dir__(self): result = set(super().__dir__()) if "_optimizer" in result: result |= self._optimizer._hyper.keys() if "learning_rate" in self._optimizer._hyper.keys(): result.add("lr") return list(result) def __setattr__(self, name, value): if name == "lr": name = "learning_rate" # Delegate setting hyperparameter to inner optimizer if the attribute # does not exist on the LossScaleOptimizer try: # We cannot check for the 'iterations' attribute as it cannot be set # after it is accessed. if name != "iterations": object.__getattribute__(self, name) has_attribute = True except AttributeError: has_attribute = False if ( name != "_optimizer" and name in self._optimizer._hyper and not has_attribute ): self._optimizer._set_hyper(name, value) else: super().__setattr__(name, value) # Explicitly delegate learning_rate. Normally hyperparameters are delegated # in __getattribute__, but if a hyperparameter is not in # self._optimizer._hyper (e.g. because self._optimizer itself wraps another # optimizer), then it won't be delegated. Since learning_rate is a very # commonly accessed hyperparameter, we delegate it here. @property def learning_rate(self): return self._optimizer.learning_rate @learning_rate.setter def learning_rate(self, value): self._optimizer.learning_rate = value @property def lr(self): return self._optimizer.learning_rate @lr.setter def lr(self, value): self._optimizer.lr = value # We do not override some OptimizerV2 methods. For each, we describe why we # do not delegate them to self._optimizer: # * get_updates: get_updates() calls get_gradients(). Since we override # get_gradients(), we cannot delegate get_updates() to self._optimizer, # otherwise the overridden get_gradients() method would not be called. # Luckily, get_updates() does not access any OptimizerV2 fields, so # inheriting the OptimizerV2 version works fine. # * minimize: We don't delegate for a similar as get_updates(): it calls # both self._compute_gradients() and self.apply_gradients(), and both need # to have the LossScaleOptimizer version called. # TODO(reedwm): Maybe throw an error if mixed precision is used without this # optimizer being used. class LossScaleOptimizerV3( tf.__internal__.tracking.DelegatingTrackableMixin, optimizer.Optimizer, BaseLossScaleOptimizer, ): """An optimizer that applies loss scaling to prevent numeric underflow. This is a copy of the `mixed_precision.LossScaleOptimizer` class defined above, except it subclasses and wraps the new experimental Optimizer class instead of the `tf.keras.optimizers.Optimizer` class. Some of the methods this class defines and calls are different compared to LossScaleOptimizer due to the differences between the two Optimizer base classes. Additionally, this class does not support the legacy graph mode, but LossScaleOptimizer does. Since the new experimental Optimizer does not have a hyperparameter concept, LossScaleOptimizerV3 does not delegate arbitrary hyperparameter accesses to the inner optimizer, unlike LossScaleOptimizer. LossScaleOptimizerV3 does delegate the "learning_rate" attribute, however. """ @tf.__internal__.tracking.no_automatic_dependency_tracking def __init__( self, inner_optimizer, dynamic=True, initial_scale=None, dynamic_growth_steps=None, ): if not isinstance(inner_optimizer, optimizer.Optimizer): if isinstance(inner_optimizer, optimizer_v2.OptimizerV2): # Give better error message if the OptimizerV2 class is passed # instead of the new experimental optimizer. raise TypeError( "You passed a `tf.keras.optimizers.Optimizer` instance to " "LossScaleOptimizerV3, but only the new experimental " "optimizer defined in " "tf_keras/optimizer_expeirmental/optimizer.py can be " "passed. Please use " "`tf.keras.mixed_precision.LossScaleOptimizer` " "instead of LossScaleOptimizerV3, as the former supports " "`tf.keras.optimizers.Optimizer`s. Got optimizer: " f"{inner_optimizer}" ) raise TypeError( '"inner_optimizer" must be an instance of ' f"Optimizer, but got: {inner_optimizer}." ) if not isinstance(dynamic, bool): # Catch errors if a user incorrectly passes a string or float to the # second argument argument, as this was commonly done for the # now-removed LossScaleOptimizerV1. raise TypeError( '"dynamic" argument to LossScaleOptimizer.__init__ must ' f"be a bool, but got: {repr(dynamic)}" ) if isinstance(inner_optimizer, LossScaleOptimizerV3): raise TypeError( "LossScaleOptimizer cannot wrap another " f"LossScaleOptimizer, but got: {inner_optimizer}" ) _raise_if_strategy_unsupported() if getattr( inner_optimizer, "_is_wrapped_by_loss_scale_optimizer", False ): # TODO(reedwm): Maybe support this. The difficulty is that LSO has # the same checkpoint format as the inner optimizer, so multiple # LSOs wrapping the same optimizer causes the checkpointing logic to # become confused. raise ValueError( '"inner_optimizer" is already wrapped by a ' "LossScaleOptimizer. An optimizer can only be wrapped " "by a single LossScaleOptimizer" ) self._optimizer = inner_optimizer self._optimizer._is_wrapped_by_loss_scale_optimizer = True # We don't call super().__init__, since we do not want to call # Optimizer's constructor. tf.__internal__.tracking.DelegatingTrackableMixin.__init__( self, self._optimizer ) if dynamic: if initial_scale is None: initial_scale = _DEFAULT_INITIAL_SCALE if dynamic_growth_steps is None: dynamic_growth_steps = _DEFAULT_GROWTH_STEPS self._loss_scale = _DynamicLossScaleState( initial_scale, dynamic_growth_steps, multiplier=2 ) self._track_trackable(self._loss_scale, "loss_scale") else: if initial_scale is None: raise ValueError( '"initial_scale" must be specified if "dynamic" is False' ) self._loss_scale = float(initial_scale) if dynamic_growth_steps is not None: raise ValueError( '"dynamic_growth_steps" must be None if "dynamic" ' f"is False, but got: {dynamic_growth_steps}" ) # Used to track whether get_scaled_loss() and get_unscaled_gradients() # have been called self._loss_has_been_scaled = False self._gradients_have_been_unscaled = False @property def dynamic(self): return isinstance(self._loss_scale, _DynamicLossScaleState) @property def loss_scale(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return tf.convert_to_tensor(self._loss_scale.current_loss_scale) else: return tf.convert_to_tensor(self._loss_scale) @property def dynamic_counter(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return self._loss_scale.counter else: return None @property def initial_scale(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return self._loss_scale.initial_loss_scale else: return self._loss_scale @property def dynamic_growth_steps(self): if isinstance(self._loss_scale, _DynamicLossScaleState): return self._loss_scale.growth_steps else: return None @property def inner_optimizer(self): return self._optimizer def get_scaled_loss(self, loss): self._loss_has_been_scaled = True if callable(loss): def new_loss(): loss_val = loss() return loss_val * tf.cast(self.loss_scale, loss_val.dtype) return new_loss else: return loss * tf.cast(self.loss_scale, loss.dtype) def get_unscaled_gradients(self, grads): self._gradients_have_been_unscaled = True loss_scale_reciprocal = 1.0 / self.loss_scale return [ _multiply_gradient(g, loss_scale_reciprocal) if g is not None else None for g in grads ] def compute_gradients(self, loss, var_list, tape=None): tape = tf.GradientTape() if tape is None else tape with tape: loss = self.get_scaled_loss(loss) grads_and_vars = self._optimizer.compute_gradients( loss, var_list, tape=tape ) grads = [g for g, _ in grads_and_vars] weights = [v for _, v in grads_and_vars] unscaled_grads = self.get_unscaled_gradients(grads) return list(zip(unscaled_grads, weights)) def apply_gradients( self, grads_and_vars, skip_gradients_aggregation=False, **kwargs ): grads_and_vars = list(grads_and_vars) grads, trainable_variables = zip(*grads_and_vars) with tf.init_scope(): # Lift variable creation to init scope to avoid environment # issues. self.build(trainable_variables) if tf.distribute.in_cross_replica_context(): raise ValueError( "apply_gradients() must be called in a replica context." ) # We check for the strategy here despite already checking in the # constructor as frequently the optimizer is created outside the # strategy's scope. _raise_if_strategy_unsupported() _maybe_warn_about_scaling( self._loss_has_been_scaled, self._gradients_have_been_unscaled ) grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars) # `experimental_aggregate_gradients` is an arg in `apply_gradients` of # v2 optimizer -- the reverse of `skip_gradients_aggregation`. # We read it from kwargs for backward compatibility. experimental_aggregate_gradients = kwargs.pop( "experimental_aggregate_gradients", True ) run_with_dtensor = ( # `_run_with_dtensor` is for dtensor based strategy scope, and # `_mesh` is when user explicitly specify the mesh setting for # optimizer. self._optimizer._run_with_dtensor or self._optimizer._mesh ) if ( not skip_gradients_aggregation and experimental_aggregate_gradients and not run_with_dtensor ): # We must aggregate the gradients here instead of in # self.optimizer.apply_gradients, so that any NaN or Inf gradients # are propagated to each replica. If any replica has a NaN or Inf # gradient, they must all have a NaN or Inf gradient so that they # all skip the step. grads_and_vars = self._optimizer.aggregate_gradients(grads_and_vars) grads_and_vars = tuple(grads_and_vars) grads = [g for g, _ in grads_and_vars] # We do not want DistributionStrategy to unwrap any MirroredVariables in # grads_and_vars, because even in a replica context, the wrapped # optimizer expects mirrored variables. So we wrap the variables with an # _UnwrapPreventer, preventing DistributionStrategy from unwrapping the # MirroredVariables. wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars]) def do_not_apply_fn(): # Normally self._optimizer.iterations is incremented in # self._optimizer.apply_gradients(). Since that is not called in # this branch, we increment it here instead. self._optimizer.iterations.assign_add(1, read_value=False) def _if_should_apply_grads(grads): if isinstance(self._loss_scale, _DynamicLossScaleState): _, should_apply_grad = self._loss_scale.update(grads) return should_apply_grad else: return True if tf.__internal__.distribute.strategy_supports_no_merge_call(): should_apply_grads = _if_should_apply_grads(grads) def apply_fn(): return self._apply_gradients(grads, wrapped_vars) tf.__internal__.smart_cond.smart_cond( should_apply_grads, apply_fn, do_not_apply_fn ) else: def _apply_gradients_cross_replica( distribution, grads, wrapped_vars ): should_apply_grads = _if_should_apply_grads(grads) def apply_fn(): distribution.extended.call_for_each_replica( self._apply_gradients, args=(grads, wrapped_vars) ) # Note: We must call this cond() in a cross-replica context. # DistributionStrategy does not support having a cond in a # replica context with a branch that calls `merge_call`, and # self._optimizer.apply_gradients calls `merge_call`. tf.__internal__.smart_cond.smart_cond( should_apply_grads, apply_fn, do_not_apply_fn ) tf.distribute.get_replica_context().merge_call( _apply_gradients_cross_replica, args=(grads, wrapped_vars) ) def _apply_gradients(self, grads, wrapped_vars): # Pass skip_gradients_aggregation=True since LossScaleOptimizer # already aggregated the gradients. self._optimizer.apply_gradients( list(zip(grads, wrapped_vars.value)), skip_gradients_aggregation=True, ) def get_config(self): serialized_optimizer = optimizers.serialize(self._optimizer) return { "inner_optimizer": serialized_optimizer, "dynamic": self.dynamic, "initial_scale": self.initial_scale, "dynamic_growth_steps": self.dynamic_growth_steps, } @classmethod def from_config(cls, config, custom_objects=None): config = config.copy() # Make a copy, since we mutate config if isinstance(config["inner_optimizer"], optimizer.Optimizer): inner_optimizer = config["inner_optimizer"] else: inner_optimizer = optimizers.deserialize( config["inner_optimizer"], custom_objects=custom_objects, use_legacy_optimizer=False, ) del config["inner_optimizer"] return cls(inner_optimizer, **config) @property def iterations(self): return self._optimizer.iterations @iterations.setter def iterations(self, variable): self._optimizer.iterations = variable @property def variables(self): return self._optimizer.variables def build(self, var_list): return self._optimizer.build(var_list) @property def learning_rate(self): return self._optimizer.learning_rate @learning_rate.setter def learning_rate(self, learning_rate): self._optimizer.learning_rate = learning_rate @property def use_ema(self): return self._optimizer.use_ema @use_ema.setter def use_ema(self, use_ema): self._optimizer.use_ema = use_ema @property def ema_momentum(self): return self._optimizer.ema_momentum @ema_momentum.setter def ema_momentum(self, ema_momentum): self._optimizer.ema_momentum = ema_momentum def finalize_variable_values(self, var_list): self._optimizer.finalize_variable_values(var_list) class FakeOptimizerForRestoration(tf.__internal__.tracking.Trackable): """A fake optimizer used to support restoring TensorFlow 2.2 checkpoints. The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow. In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the following in LossScaleOptimizer.__init__ ``` self._track_trackable(self._optimizer, 'base_optimizer') ``` This means a dependency from the LossScaleOptimizer to the wrapped optimizer would be stored in the checkpoint. However now, the checkpoint format with a LossScaleOptimizer is the same as the format without a LossScaleOptimizer, except the loss scale is also stored. This means there is no dependency from the LossScaleOptimizer to the wrapped optimizer. Instead, the LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's perspective, by overriding all Trackable methods and delegating them to the wrapped optimizer. To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency on this class instead of the inner optimizer. When restored, this class will instead restore the slot variables of the inner optimizer. Since this class has no variables, it does not affect the checkpoint when saved. """ def __init__(self, optimizer): self._optimizer = optimizer def get_slot_names(self): return self._optimizer.get_slot_names() def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable ): return self._optimizer._create_or_restore_slot_variable( slot_variable_position, slot_name, variable ) def _create_loss_scale_optimizer_from_v1_loss_scale(optimizer, loss_scale): """Creates an LSO from a tf.compat.v1.mixed_precision.LossScale. This is only used to pass to `tf.__internal__.mixed_precision.register_loss_scale_wrapper` below, which is called so that `tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite` can wrap a TF-Keras optimizer with a LossScaleOptimizer. Args: optimizer: An OptimizerV2 instance. loss_scale: A `tf.compat.v1.mixed_precision.LossScale` instance Returns: A LossScaleOptimizer that wraps `optimizer` and uses the same loss scaling algorithm as `loss_scale`. """ if isinstance(loss_scale, (int, float)): return LossScaleOptimizer( optimizer, dynamic=False, initial_scale=loss_scale ) elif isinstance(loss_scale, tf.compat.v1.mixed_precision.FixedLossScale): ls_val = loss_scale._loss_scale_value return LossScaleOptimizer( optimizer, dynamic=False, initial_scale=ls_val ) elif loss_scale == "dynamic": return LossScaleOptimizer(optimizer) elif isinstance(loss_scale, tf.compat.v1.mixed_precision.DynamicLossScale): if loss_scale.multiplier != 2: raise ValueError( 'When passing a DynamicLossScale to "loss_scale", ' "DynamicLossScale.multiplier must be 2. Got: " f"{loss_scale}" ) return LossScaleOptimizer( optimizer, initial_scale=loss_scale.initial_loss_scale, dynamic_growth_steps=loss_scale.increment_period, ) elif isinstance(loss_scale, tf.compat.v1.mixed_precision.LossScale): raise TypeError( "Passing a LossScale that is not a FixedLossScale or a " f"DynamicLossScale is not supported. Got: {loss_scale}" ) else: raise ValueError( "Invalid value passed to loss_scale. loss_scale " 'must be the string "dynamic" (recommended), an int, ' "a float, a FixedLossScale, or a DynamicLossScale. Got " f"value: {loss_scale}" ) tf.__internal__.mixed_precision.register_loss_scale_wrapper( optimizer_v2.OptimizerV2, _create_loss_scale_optimizer_from_v1_loss_scale, LossScaleOptimizer, ) def _multiply_gradient(gradient, scale): """Multiply a (possibly sparse) gradient by the given scale factor.""" scale = tf.cast(scale, gradient.dtype) if isinstance(gradient, tf.IndexedSlices): return tf.IndexedSlices( gradient.values * scale, gradient.indices, dense_shape=gradient.dense_shape, ) else: return gradient * scale def strategy_supports_loss_scaling(): """Returns True if the current Strategy supports loss scaling.""" if not tf.distribute.has_strategy(): return True strategy = tf.distribute.get_strategy() # Strategies are supported if either there is only one replica or if # variables are replicated per device. Otherwise, the current model.fit() # implementation and most custom training loops incorrectly unscale the # gradients. Currently, gradients are unscaled once per compute replica, but # they should be unscaled once per variable replica. When there is one # variable replica for each compute replica, this works fine, but otherwise # issues will occur. # TODO(reedwm): Support all strategies. return ( isinstance( strategy, ( tf.distribute.MultiWorkerMirroredStrategy, tf.compat.v1.distribute.experimental.MultiWorkerMirroredStrategy, # noqa: E501 tf.distribute.OneDeviceStrategy, tf.compat.v1.distribute.OneDeviceStrategy, tf.distribute.MirroredStrategy, tf.compat.v1.distribute.MirroredStrategy, ), ) or dtensor_utils.running_with_dtensor_strategy() ) def _raise_if_strategy_unsupported(): """Raise an exception if the current strategy doesn't support loss scaling.""" if not strategy_supports_loss_scaling(): strategy = tf.distribute.get_strategy() if isinstance( strategy, ( tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy, tf.distribute.TPUStrategy, ), ): raise ValueError( "Loss scaling is not supported with TPUStrategy. Loss scaling " "is unnecessary with TPUs, since they support bfloat16 instead " "of float16 and bfloat16 does not require loss scaling. You " "should remove the use of the LossScaleOptimizer when TPUs are " "used." ) else: raise ValueError( "Loss scaling is not supported with the " "tf.distribute.Strategy: " f"{strategy.__class__.__name__}. Try using a different " "Strategy, e.g. a MirroredStrategy" )
tf-keras/tf_keras/mixed_precision/loss_scale_optimizer.py/0
{ "file_path": "tf-keras/tf_keras/mixed_precision/loss_scale_optimizer.py", "repo_id": "tf-keras", "token_count": 27294 }
182
# TF-Keras models # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "distribute_py_test") load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], default_visibility = [ "//tf_keras:friends", ], licenses = ["notice"], ) py_library( name = "sharpness_aware_minimization", srcs = ["sharpness_aware_minimization.py"], srcs_version = "PY3", deps = [ ":cloning", "//:expect_tensorflow_installed", "//tf_keras/engine:data_adapter", ], ) py_library( name = "models", srcs = [ "__init__.py", ], srcs_version = "PY3", deps = [ ":cloning", ":sharpness_aware_minimization", ], ) py_library( name = "cloning", srcs = [ "cloning.py", ], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras/engine", "//tf_keras/engine:base_layer", "//tf_keras/metrics", "//tf_keras/optimizers", "//tf_keras/saving", "//tf_keras/utils:generic_utils", "//tf_keras/utils:version_utils", ], ) tf_py_test( name = "cloning_test", size = "medium", srcs = ["cloning_test.py"], main = "cloning_test.py", python_version = "PY3", shard_count = 8, tags = [ "notsan", # b/67509773 ], deps = [ "//:expect_absl_installed", # absl/testing:parameterized "//:expect_numpy_installed", "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", ], ) distribute_py_test( name = "sharpness_aware_minimization_test", size = "medium", srcs = ["sharpness_aware_minimization_test.py"], shard_count = 8, tags = [ "multi_gpu", "nomultivm", "requires-net:ipv4", ], deps = [ ":sharpness_aware_minimization", "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/optimizers", "//tf_keras/testing_infra:test_combinations", ], )
tf-keras/tf_keras/models/BUILD/0
{ "file_path": "tf-keras/tf_keras/models/BUILD", "repo_id": "tf-keras", "token_count": 1128 }
183
# Description: # Contains the TF-Keras OptimizerV2 API (internal TensorFlow version). # Placeholder: load unaliased py_library load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test") package( # copybara:uncomment default_applicable_licenses = ["//tf_keras:license"], # TODO(scottzhu): Remove non-keras deps from TF. default_visibility = [ "//tf_keras:friends", "//third_party/tensorflow/cc/saved_model:__pkg__", # For unit tests. "//third_party/tensorflow/python/trackable:__pkg__", ], licenses = ["notice"], ) py_library( name = "optimizers", srcs = [ "adadelta.py", "adagrad.py", "adam.py", "adamax.py", "ftrl.py", "gradient_descent.py", "nadam.py", "optimizer_v2.py", "rmsprop.py", ], srcs_version = "PY3", deps = [ "//:expect_tensorflow_installed", "//tf_keras:backend", "//tf_keras:backend_config", "//tf_keras/engine:base_layer_utils", "//tf_keras/initializers", "//tf_keras/optimizers:utils", "//tf_keras/optimizers/schedules:learning_rate_schedule", "//tf_keras/utils:layer_utils", "//tf_keras/utils:tf_utils", ], ) cuda_py_test( name = "adagrad_test", size = "medium", srcs = ["adagrad_test.py"], shard_count = 4, deps = [ ":optimizers", "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", ], ) cuda_py_test( name = "adam_test", size = "medium", srcs = ["adam_test.py"], shard_count = 4, tags = [ "no_rocm", "no_windows", # TODO(b/171384138) ], deps = [ ":optimizers", "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", ], ) cuda_py_test( name = "adamax_test", size = "medium", srcs = ["adamax_test.py"], shard_count = 4, # TODO(b/168527439): invalid resource variable reference on GPU for TFRT. tags = ["no_rocm"], deps = [ ":optimizers", "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", ], ) cuda_py_test( name = "adadelta_test", size = "medium", srcs = ["adadelta_test.py"], shard_count = 4, # TODO(b/168527439): invalid resource variable reference on GPU for TFRT. deps = [ ":optimizers", "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", ], ) cuda_py_test( name = "ftrl_test", size = "medium", srcs = ["ftrl_test.py"], shard_count = 4, deps = [ ":optimizers", "//:expect_tensorflow_installed", ], ) cuda_py_test( name = "gradient_descent_test", size = "medium", srcs = ["gradient_descent_test.py"], shard_count = 4, deps = [ ":optimizers", "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", ], ) cuda_py_test( name = "nadam_test", size = "medium", srcs = ["nadam_test.py"], shard_count = 4, deps = [ ":optimizers", "//:expect_tensorflow_installed", ], ) cuda_py_test( name = "optimizer_v2_test", size = "medium", srcs = ["optimizer_v2_test.py"], shard_count = 8, tags = [ "no_windows", ], deps = [ ":optimizers", "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", "//tf_keras", "//tf_keras/testing_infra:test_combinations", ], ) cuda_py_test( name = "rmsprop_test", size = "medium", srcs = ["rmsprop_test.py"], shard_count = 2, # TODO(b/168527439): invalid resource variable reference on GPU for TFRT. deps = [ ":optimizers", "//:expect_absl_installed", # absl/testing:parameterized "//:expect_tensorflow_installed", "//tf_keras/testing_infra:test_combinations", ], )
tf-keras/tf_keras/optimizers/legacy/BUILD/0
{ "file_path": "tf-keras/tf_keras/optimizers/legacy/BUILD", "repo_id": "tf-keras", "token_count": 1984 }
184
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Version 2 of class Optimizer.""" import abc import contextlib import functools import warnings from copy import deepcopy import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras import initializers from tf_keras.engine import base_layer_utils from tf_keras.optimizers import utils as optimizer_utils from tf_keras.optimizers.schedules import learning_rate_schedule from tf_keras.utils import generic_utils from tf_keras.utils import layer_utils from tf_keras.utils import tf_inspect from tf_keras.utils import tf_utils # isort: off from tensorflow.python.util.tf_export import keras_export _DEFAULT_VALID_DTYPES = frozenset( [ tf.float16, tf.bfloat16, tf.float32, tf.float64, tf.complex64, tf.complex128, ] ) def _deduplicate_indexed_slices(values, indices): """Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index. """ unique_indices, new_index_positions = tf.unique(indices) summed_values = tf.math.unsorted_segment_sum( values, new_index_positions, tf.shape(unique_indices)[0] ) return (summed_values, unique_indices) class NullContextmanager: def __init__(self, *args, **kwargs): pass def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False # False values do not suppress exceptions def name_scope_only_in_function_or_graph(name): """Internal-only entry point for `name_scope*`. Enters a compat.v1.name_scope only when in a function or graph, not when running fully eagerly. Args: name: The name argument that is passed to the op function. Returns: `name_scope*` context manager. """ if not tf.executing_eagerly(): return tf.name_scope(name) else: return NullContextmanager() @keras_export( "keras.optimizers.legacy.Optimizer", v1=["keras.optimizers.Optimizer", "keras.optimizers.legacy.Optimizer"], ) class OptimizerV2(tf.__internal__.tracking.Trackable): """Base class for legacy TF-Keras optimizers. You should not use this class directly, but instead instantiate one of its subclasses such as `tf.keras.optimizers.legacy.SGD`, `tf.keras.optimizers.legacy.Adam`, etc. This is the default TF-Keras optimizer base class until v2.10 (included). In v2.11 and later, `tf.keras.optimizers.Optimizer` points to a new base class implementation. The legacy class won't be deleted in the future and will continue to be available at `tf.keras.optimizers.legacy.Optimizer`. ### Usage ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. var1 = tf.Variable(2.0) var2 = tf.Variable(5.0) loss = lambda: 3 * var1 * var1 + 2 * var2 * var2 # In graph mode, returns op that minimizes the loss by updating the listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Usage in custom training loops In TF-Keras models, sometimes variables are created when the model is first called, instead of construction time. Examples include 1) sequential models without input shape pre-defined, or 2) subclassed models. Pass var_list as callable in these cases. Example: ```python opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')) loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying them Calling `minimize()` takes care of both computing the gradients and applying them to the variables. If you want to process the gradients before applying them you can instead use the optimizer in three steps: 1. Compute the gradients with `tf.GradientTape`. 2. Process the gradients as you wish. 3. Apply the processed gradients with `apply_gradients()`. Example: ```python # Create an optimizer. opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1) # Compute the gradients for a list of variables. with tf.GradientTape() as tape: loss = <call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss, vars) # Process the gradients, for example cap them, etc. # capped_grads = [MyCapper(g) for g in grads] processed_grads = [process_gradient(g) for g in grads] # Ask the optimizer to apply the processed gradients. opt.apply_gradients(zip(processed_grads, var_list)) ``` ### Use with `tf.distribute.Strategy` This optimizer class is `tf.distribute.Strategy` aware, which means it automatically sums gradients across all replicas. To average gradients, you divide your loss by the global batch size, which is done automatically if you use `tf.keras` built-in training or evaluation loops. See the `reduction` argument of your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. To aggregate gradients yourself, call `apply_gradients` with `experimental_aggregate_gradients` set to False. This is useful if you need to process aggregated gradients. If you are not using these and you want to average gradients, you should use `tf.math.reduce_sum` to add up your per-example losses and then divide by the global batch size. Note that when using `tf.distribute.Strategy`, the first component of a tensor's shape is the *replica-local* batch size, which is off by a factor equal to the number of replicas being used to compute a single step. As a result, using `tf.math.reduce_mean` will give the wrong answer, resulting in gradients that can be many times too big. ### Variable Constraints All TF-Keras optimizers respect variable constraints. If constraint function is passed to any variable, the constraint will be applied to the variable after the gradient has been applied to the variable. Important: If gradient is sparse tensor, variable constraint is not supported. ### Thread Compatibility The entire optimizer is currently thread compatible, not thread-safe. The user needs to perform synchronization if necessary. ### Slots Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage additional variables associated with the variables to train. These are called <i>Slots</i>. Slots have names and you can ask the optimizer for the names of the slots that it uses. Once you have a slot name you can ask the optimizer for the variable it created to hold the slot value. This can be useful if you want to log debug a training algorithm, report stats about the slots, etc. ### Hyperparameters These are arguments passed to the optimizer subclass constructor (the `__init__` method), and then passed to `self._set_hyper()`. They can be either regular Python values (like 1.0), tensors, or callables. If they are callable, the callable will be called during `apply_gradients()` to get the value for the hyper parameter. Hyperparameters can be overwritten through user code: Example: ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 + 2 * var2 # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Callable learning rate Optimizer accepts a callable learning rate in two ways. The first way is through built-in or customized `tf.keras.optimizers.schedules.LearningRateSchedule`. The schedule will be called on each iteration with `schedule(iteration)`, a `tf.Variable` owned by the optimizer. Example: >>> var = tf.Variable(np.random.random(size=(1,))) >>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay( ... initial_learning_rate=.01, decay_steps=20, decay_rate=.1) >>> opt = tf.keras.optimizers.legacy.SGD(learning_rate=learning_rate) >>> loss = lambda: 3 * var >>> opt.minimize(loss, var_list=[var]) <tf.Variable... The second way is through a callable function that does not accept any arguments. Example: >>> var = tf.Variable(np.random.random(size=(1,))) >>> def lr_callable(): ... return .1 >>> opt = tf.keras.optimizers.legacy.SGD(learning_rate=lr_callable) >>> loss = lambda: 3 * var >>> opt.minimize(loss, var_list=[var]) <tf.Variable... ### Creating a custom optimizer If you intend to create your own optimization algorithm, simply inherit from this class and override the following methods: - `_resource_apply_dense` (update variable given gradient tensor is a dense `tf.Tensor`) - `_resource_apply_sparse` (update variable given gradient tensor is a sparse `tf.IndexedSlices`. The most common way for this to happen is if you are taking the gradient through a `tf.gather`.) - `_create_slots` (if your optimizer algorithm requires additional variables) - `get_config` (serialization of the optimizer, include all hyper parameters) """ # Subclasses should set this to True unless they override `apply_gradients` # with a version that does not have the `experimental_aggregate_gradients` # argument. Older versions of TF-Keras did not have this argument so custom # optimizers may have overridden `apply_gradients` without the # `experimental_aggregate_gradients` argument. TF-Keras only passes # `experimental_aggregate_gradients` if this attribute is True. # Note: This attribute will likely be removed in an upcoming release. _HAS_AGGREGATE_GRAD = False def __init__( self, name, gradient_aggregator=None, gradient_transformers=None, **kwargs, ): """Create a new Optimizer. This must be called by the constructors of subclasses. Note that Optimizer instances should not bind to a single graph, and so shouldn't keep Tensors as member variables. Generally you should be able to use the _set_hyper()/state.get_hyper() facility instead. This class is stateful and thread-compatible. Example of custom gradient transformations: ```python def my_gradient_transformer(grads_and_vars): # Simple example, double the gradients. return [(2. * g, v) for g, v in grads_and_vars] optimizer = tf.keras.optimizers.legacy.SGD( 1e-3, gradient_transformers=[my_gradient_transformer]) ``` Args: name: String. The name to use for momentum accumulator weights created by the optimizer. gradient_aggregator: The function to use to aggregate gradients across devices (when using `tf.distribute.Strategy`). If `None`, defaults to summing the gradients across devices. The function should accept and return a list of `(gradient, variable)` tuples. gradient_transformers: Optional. List of functions to use to transform gradients before applying updates to Variables. The functions are applied after `gradient_aggregator`. The functions should accept and return a list of `(gradient, variable)` tuples. **kwargs: keyword arguments. Allowed arguments are `clipvalue`, `clipnorm`, `global_clipnorm`. If `clipvalue` (float) is set, the gradient of each weight is clipped to be no higher than this value. If `clipnorm` (float) is set, the gradient of each weight is individually clipped so that its norm is no higher than this value. If `global_clipnorm` (float) is set the gradient of all weights is clipped so that their global norm is no higher than this value. Raises: ValueError: in case of any invalid argument. """ allowed_kwargs = { "clipnorm", "clipvalue", "lr", "decay", "global_clipnorm", } for k in kwargs: if k not in allowed_kwargs: raise TypeError( "Unexpected keyword argument " f"passed to optimizer: {str(k)}. Allowed kwargs are " f"{allowed_kwargs}." ) # checks that all keyword arguments are non-negative. if kwargs[k] is not None and kwargs[k] < 0: raise ValueError(f"Expected {k} >= 0, received: {kwargs[k]}") if k == "lr": warnings.warn( "The `lr` argument is deprecated, " "use `learning_rate` instead.", stacklevel=2, ) self._use_locking = True self._init_set_name(name) self._hyper = {} # dict: {variable name : {slot name : variable}} self._slots = {} self._slot_names = [] self._weights = [] self._iterations = None # For implementing Trackable. Stores information about how to restore # slot variables which have not yet been created # (trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ], ... }, # ... } self._deferred_slot_restorations = {} decay = kwargs.pop("decay", 0.0) if decay < 0.0: raise ValueError( f"decay cannot be less than 0. Received: decay={decay}." ) self._initial_decay = decay self._hypers_created = False # Store the distribution strategy object if the optimizer is created # inside strategy scope, so it could be used to create variables later. if tf.distribute.has_strategy(): self._distribution_strategy = tf.distribute.get_strategy() else: self._distribution_strategy = None # Configure gradient transformations. if gradient_aggregator is None: gradient_aggregator = optimizer_utils.all_reduce_sum_gradients self.gradient_aggregator = gradient_aggregator if gradient_transformers is None: gradient_transformers = [] self.gradient_transformers = gradient_transformers self.clipnorm = kwargs.pop("clipnorm", None) self.global_clipnorm = kwargs.pop("global_clipnorm", None) if self.clipnorm is not None and self.global_clipnorm is not None: raise ValueError( "Cannot accept both `clipnorm` and `global_clipnorm`. " "Received: `clipnorm`={}, `global_clipnorm`={}.".format( self.clipnorm, self.global_clipnorm ) ) self.clipvalue = kwargs.pop("clipvalue", None) def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): # DistributionStrategy singleton cannot be serialized if k == "_distribution_strategy": continue setattr(result, k, deepcopy(v, memo)) result._distribution_strategy = self._distribution_strategy return result @property def clipnorm(self): """`float` or `None`. If set, clips gradients to a maximum norm.""" return self._clipnorm @property def global_clipnorm(self): """`float` or `None`. If set, clips gradients to a maximum norm. Check `tf.clip_by_global_norm` for more details. """ return self._global_clipnorm @clipnorm.setter def clipnorm(self, val): if val is not None and self.gradient_transformers: raise ValueError( "`clipnorm` cannot be set when `gradient_transformers` " "is set. Instead, use the `gradient_transformers` to " "specify clipping and other transformations. Received: " f"val={val}, " f"gradient_transformers={self.gradient_transformers}." ) self._clipnorm = val self._clipnorm_fn = optimizer_utils.make_gradient_clipnorm_fn( self._clipnorm ) @global_clipnorm.setter def global_clipnorm(self, val): if val is not None and self.gradient_transformers: raise ValueError( "`global_clipnorm` cannot be set when " "`gradient_transformers` " "is set. Instead, use the `gradient_transformers` to " "specify clipping and other transformations. Received: " f"val={val}, " f"gradient_transformers={self.gradient_transformers}." ) self._global_clipnorm = val self._global_clipnorm_fn = ( optimizer_utils.make_global_gradient_clipnorm_fn( self._global_clipnorm ) ) @property def clipvalue(self): """`float` or `None`. If set, clips gradients to a maximum value.""" return self._clipvalue @clipvalue.setter def clipvalue(self, val): if val is not None and self.gradient_transformers: raise ValueError( "`clipvalue` cannot be set when `gradient_transformers` " "is set. Instead, use the `gradient_transformers` to " "specify clipping and other transformations. Received: " f"val={val}, " f"gradient_transformers={self.gradient_transformers}." ) self._clipvalue = val self._clipvalue_fn = optimizer_utils.make_gradient_clipvalue_fn( self._clipvalue ) def _transform_loss(self, loss): """Called in `.minimize` to transform loss before computing gradients.""" return loss def _get_gradients(self, tape, loss, var_list, grad_loss=None): """Called in `minimize` to compute gradients from loss.""" grads = tape.gradient(loss, var_list, grad_loss) return list(zip(grads, var_list)) def _transform_unaggregated_gradients(self, grads_and_vars): """Called in `apply_gradients` before gradient aggregation.""" return grads_and_vars def _aggregate_gradients(self, grads_and_vars): """Called in `apply_gradients` to aggregate gradients across devices. Note that user subclasses may override this, so the interface should not be changed. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: A list of (aggregrated_gradient, variable) pairs. By default, this calls `self.gradient_aggregator`. """ return self.gradient_aggregator(grads_and_vars) def _transform_gradients(self, grads_and_vars): """Called in `apply_gradients` after aggregation.""" if self._clipvalue is not None: grads_and_vars = self._clipvalue_fn(grads_and_vars) if self._clipnorm is not None: grads_and_vars = self._clipnorm_fn(grads_and_vars) if self._global_clipnorm is not None: grads_and_vars = self._global_clipnorm_fn(grads_and_vars) for fn in self.gradient_transformers: grads_and_vars = fn(grads_and_vars) return grads_and_vars def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None): """Minimize `loss` by updating `var_list`. This method simply computes gradient using `tf.GradientTape` and calls `apply_gradients()`. If you want to process the gradient before applying then call `tf.GradientTape` and `apply_gradients()` explicitly instead of using this function. Args: loss: `Tensor` or callable. If a callable, `loss` should take no arguments and return the value to minimize. If a `Tensor`, the `tape` argument must be passed. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` since the variables are created at the first time `loss` is called. grad_loss: (Optional). A `Tensor` holding the gradient computed for `loss`. name: (Optional) str. Name for the returned operation. tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`, the tape that computed the `loss` must be provided. Returns: An `Operation` that updates the variables in `var_list`. The `iterations` will be automatically increased by 1. Raises: ValueError: If some of the variables are not `Variable` objects. """ grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss, tape=tape ) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): """Compute gradients of `loss` for the variables in `var_list`. This is the first part of `minimize()`. It returns a list of (gradient, variable) pairs where "gradient" is the gradient for "variable". Note that "gradient" can be a `Tensor`, an `IndexedSlices`, or `None` if there is no gradient for the given variable. Args: loss: `Tensor` or callable. If a callable, `loss` should take no arguments and return the value to minimize. If a `Tensor`, the `tape` argument must be passed. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` and the variables are created at the first time when `loss` is called. grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`. tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`, the tape that computed the `loss` must be provided. Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be `None`. Raises: TypeError: If `var_list` contains anything else than `Variable` objects. ValueError: If some arguments are invalid, or var_list is None. """ # TODO(joshl): Test that we handle weight decay in a reasonable way. if not callable(loss) and tape is None: raise ValueError( "`tape` is required when a `Tensor` loss is passed. " f"Received: loss={loss}, tape={tape}." ) tape = tape if tape is not None else tf.GradientTape() if callable(loss): with tape: if not callable(var_list): tape.watch(var_list) loss = loss() if callable(var_list): var_list = var_list() with tape: loss = self._transform_loss(loss) var_list = tf.nest.flatten(var_list) with tf.name_scope(self._name + "/gradients"): grads_and_vars = self._get_gradients( tape, loss, var_list, grad_loss ) self._assert_valid_dtypes( [ v for g, v in grads_and_vars if g is not None and v.dtype != tf.resource ] ) return grads_and_vars def apply_gradients( self, grads_and_vars, name=None, experimental_aggregate_gradients=True ): """Apply gradients to variables. This is the second part of `minimize()`. It returns an `Operation` that applies gradients. The method sums gradients from all replicas in the presence of `tf.distribute.Strategy` by default. You can aggregate gradients yourself by passing `experimental_aggregate_gradients=False`. Example: ```python grads = tape.gradient(loss, vars) grads = tf.distribute.get_replica_context().all_reduce('sum', grads) # Processing aggregated gradients. optimizer.apply_gradients(zip(grads, vars), experimental_aggregate_gradients=False) ``` Args: grads_and_vars: List of (gradient, variable) pairs. name: Optional name for the returned operation. When `None`, uses the name passed to the `Optimizer` constructor. Defaults to `None`. experimental_aggregate_gradients: Whether to sum gradients from different replicas in the presence of `tf.distribute.Strategy`. If False, it's user responsibility to aggregate the gradients. Default to `True`. Returns: An `Operation` that applies the specified gradients. The `iterations` will be automatically increased by 1. Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If none of the variables have gradients. RuntimeError: If called in a cross-replica context. """ grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars) var_list = [v for (_, v) in grads_and_vars] with tf.name_scope(self._name): # Create iteration if necessary. with tf.init_scope(): self._create_all_weights(var_list) if not grads_and_vars: # Distribution strategy does not support reducing an empty list # of gradients return tf.no_op() if tf.distribute.in_cross_replica_context(): raise RuntimeError( "`apply_gradients() cannot be called in cross-replica " "context. Use `tf.distribute.Strategy.run` to enter " "replica context. For more information, please see the " "docstring of `tf.distribute.get_replica_context`." ) strategy = tf.distribute.get_strategy() if ( not experimental_aggregate_gradients and strategy and isinstance( strategy, ( tf.compat.v1.distribute.experimental.ParameterServerStrategy, # noqa: E501 tf.distribute.experimental.ParameterServerStrategy, tf.distribute.experimental.CentralStorageStrategy, tf.compat.v1.distribute.experimental.CentralStorageStrategy, # noqa: E501 ), ) ): raise NotImplementedError( "`experimental_aggregate_gradients=False is not supported " "for ParameterServerStrategy and CentralStorageStrategy. " f"Used: strategy={strategy}." ) apply_state = self._prepare(var_list) if experimental_aggregate_gradients: grads_and_vars = self._transform_unaggregated_gradients( grads_and_vars ) grads_and_vars = self._aggregate_gradients(grads_and_vars) grads_and_vars = self._transform_gradients(grads_and_vars) return tf.__internal__.distribute.interim.maybe_merge_call( functools.partial( self._distributed_apply, apply_state=apply_state ), strategy, grads_and_vars, name=name, ) def _distributed_apply( self, distribution, grads_and_vars, apply_state, name ): """`apply_gradients` using a `DistributionStrategy`.""" def apply_grad_to_update_var(var, grad): """Apply gradient to variable.""" if isinstance(var, tf.Tensor): raise NotImplementedError( "Updating a `Tensor` is not implemented. " f"Received: var={var}." ) apply_kwargs = {} if isinstance(grad, tf.IndexedSlices): if var.constraint is not None: raise RuntimeError( "Cannot use a constraint function on a sparse " f"variable. Received: grad={grad}, " f"var.constraint={var.constraint}." ) if "apply_state" in self._sparse_apply_args: apply_kwargs["apply_state"] = apply_state return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices, **apply_kwargs ) if "apply_state" in self._dense_apply_args: apply_kwargs["apply_state"] = apply_state update_op = self._resource_apply_dense(grad, var, **apply_kwargs) if var.constraint is not None: with tf.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op eagerly_outside_functions = ( tf.compat.v1.executing_eagerly_outside_functions() ) update_ops = [] with name_scope_only_in_function_or_graph(name or self._name): for grad, var in grads_and_vars: # Colocate the update with variables to avoid unnecessary # communication delays. See b/136304694. with distribution.extended.colocate_vars_with(var): with name_scope_only_in_function_or_graph( "update" if eagerly_outside_functions else "update_" + var.op.name ): update_op = distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False, ) if tf.distribute.in_cross_replica_context(): # In cross-replica context, extended.update returns # a list of update ops from all replicas # (group=False). update_ops.extend(update_op) else: # In replica context, extended.update return the # single update op of current replica. update_ops.append(update_op) any_symbolic = any( isinstance(i, tf.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops ) if not tf.executing_eagerly() or any_symbolic: # If the current context is graph mode or any of the update ops # are symbolic then the step update should be carried out under # a graph context. (eager updates execute immediately) with backend._current_graph(update_ops).as_default(): with tf.control_dependencies([tf.group(update_ops)]): return self.iterations.assign_add(1, read_value=False) return self.iterations.assign_add(1) def get_gradients(self, loss, params): """Returns gradients of `loss` with respect to `params`. Should be used only in legacy v1 graph mode. Args: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented). """ params = tf.nest.flatten(params) with backend.get_graph().as_default(), backend.name_scope( self._name + "/gradients" ): grads = tf.compat.v1.gradients(loss, params) for grad, param in zip(grads, params): if grad is None: raise ValueError( "Variable {} has `None` for gradient. " "Please make sure that all of your ops have a " "gradient defined (i.e. are differentiable). " "Common ops without gradient: " "K.argmax, K.round, K.eval.".format(param) ) return grads def get_updates(self, loss, params): grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes( [ v for g, v in grads_and_vars if g is not None and v.dtype != tf.resource ] ) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): """set hyper `name` to value. value can be callable, tensor, numeric.""" if isinstance(value, tf.__internal__.tracking.Trackable): self._track_trackable(value, name, overwrite=True) if name not in self._hyper: self._hyper[name] = value else: prev_value = self._hyper[name] if ( callable(prev_value) or isinstance( prev_value, ( tf.Tensor, int, float, learning_rate_schedule.LearningRateSchedule, ), ) or isinstance( value, learning_rate_schedule.LearningRateSchedule ) ): self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value = value() if dtype: return tf.cast(value, dtype) else: return value def _create_slots(self, var_list): pass def _create_slots_for_sharded_variables(self, var_list): """Add ShardedVariables to slots to later reconstruct for checkpointing. ShardedVariables don't have slot variables created for them; their shards do. This function allows users to call get_slot with a ShardedVariable input and receive a ShardedVariable output containing the appropriate slot vars. Iterate over the variables to find shards, and aggregate the sharded containers in a set. Add these ShardedVariables to _slots so that get_slot can retrieve the proper slot variables for their component shards, and reconstruct those into a ShardedVariable. Args: var_list: list or tuple of `Variable` objects that will be minimized using this optimizer. """ sharded_vars = set() for var in var_list: if getattr(var, "_sharded_container", False): sharded_vars.add(var._sharded_container()) for sharded_var in sharded_vars: sharded_key = _var_key(sharded_var) slot_dict = {} for slot in self.get_slot_names(): slot_dict[slot] = sharded_var self._slots[sharded_key] = slot_dict def _create_all_weights(self, var_list): """Creates all weights, including iterations, hyperparameters and slot vars. This will add newly created variables to `optimizer.weights`. New variables are only created when this method is called the first time, or when called with different variables in the var_list. Args: var_list: list or tuple of `Variable` objects that will be minimized using this optimizer. """ _ = self.iterations self._create_hypers() self._create_slots(var_list) self._create_slots_for_sharded_variables(var_list) def __getattribute__(self, name): """Overridden to support hyperparameter access.""" try: return super().__getattribute__(name) except AttributeError as e: # Needed to avoid infinite recursion with __setattr__. if name == "_hyper": raise e # Backwards compatibility with TF-Keras optimizers. if name == "lr": name = "learning_rate" if name in self._hyper: return self._get_hyper(name) raise e def __dir__(self): result = set(super().__dir__()) if "_hyper" in result: result |= self._hyper.keys() if "learning_rate" in self._hyper.keys(): result.add("lr") return list(result) def __setattr__(self, name, value): """Override setattr to support dynamic hyperparameter setting.""" # Backwards compatibility with TF-Keras optimizers. if name == "lr": name = "learning_rate" if hasattr(self, "_hyper") and name in self._hyper: self._set_hyper(name, value) else: super().__setattr__(name, value) def get_slot_names(self): """A list of names for this optimizer's slots.""" return self._slot_names def add_slot(self, var, slot_name, initializer="zeros", shape=None): """Add a new slot variable for `var`. A slot variable is an additional variable associated with `var` to train. It is allocated and managed by optimizers, e.g. `Adam`. Args: var: a `Variable` object. slot_name: name of the slot variable. initializer: initializer of the slot variable shape: (Optional) shape of the slot variable. If not set, it will default to the shape of `var`. Returns: A slot variable. """ if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if weight is None: if isinstance(initializer, str) or callable(initializer): initializer = initializers.get(initializer) if isinstance( initializer, tf.__internal__.tracking.CheckpointInitialValueCallable, ) or (shape is not None): slot_shape = shape else: slot_shape = var.shape initial_value = functools.partial( initializer, shape=slot_shape, dtype=var.dtype ) else: initial_value = initializer with self._distribution_strategy_scope(): strategy = tf.distribute.get_strategy() if not strategy.extended.variable_created_in_scope(var): raise ValueError( "Trying to create optimizer slot variable under the " "scope for tf.distribute.Strategy ({}), which is " "different from the scope used for the original " "variable ({}). Make sure the slot variables are " "created under the same strategy scope. This may " "happen if you're restoring from a checkpoint " "outside the scope.".format(strategy, var) ) with strategy.extended.colocate_vars_with(var): weight = tf.Variable( name=f"{var._shared_name}/{slot_name}", dtype=var.dtype, trainable=False, initial_value=initial_value, ) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight ) self._weights.append(weight) return weight def get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key] slot_variable = slot_dict[slot_name] if isinstance( slot_variable, tf.__internal__.distribute.ShardedVariable ): # Construct a ShardedVariable that points to the input # ShardedVariable's component shard's slot variables. shard_vars = [] for shard in slot_variable.variables: slot_shard = self.get_slot(shard, slot_name) shard_vars.append(slot_shard) slot_variable = tf.__internal__.distribute.ShardedVariable( shard_vars, name=slot_variable.name ) return slot_variable def _prepare(self, var_list): keys = set() for var in var_list: if isinstance(var, tf.distribute.DistributedValues): var_devices = var._devices else: var_devices = [var.device] var_dtype = var.dtype.base_dtype for var_device in var_devices: keys.add((var_device, var_dtype)) apply_state = {} for var_device, var_dtype in keys: apply_state[(var_device, var_dtype)] = {} with tf.device(var_device): self._prepare_local(var_device, var_dtype, apply_state) return apply_state def _prepare_local(self, var_device, var_dtype, apply_state): if "learning_rate" in self._hyper: lr_t = tf.identity(self._decayed_lr(var_dtype)) apply_state[(var_device, var_dtype)]["lr_t"] = lr_t def _fallback_apply_state(self, var_device, var_dtype): """Compatibility for subclasses that don't pass apply_state through.""" apply_state = {(var_device, var_dtype): {}} self._prepare_local(var_device, var_dtype, apply_state) return apply_state[(var_device, var_dtype)] def _create_hypers(self): if self._hypers_created: return with self._distribution_strategy_scope(): # Iterate hyper values deterministically. for name, value in sorted(self._hyper.items()): if isinstance(value, (tf.Tensor, tf.Variable)) or callable( value ): # The check for `callable` covers the usage when `value` is # a `LearningRateSchedule`, in which case it does not need # to create a variable. continue else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, ) self._hypers_created = True @property def iterations(self): """Variable. The number of training steps this Optimizer has run.""" if self._iterations is None: with self._distribution_strategy_scope(): self._iterations = self.add_weight( "iter", shape=[], dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, ) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable): if self._iterations is not None: raise RuntimeError( "Cannot set `iterations` to a new Variable after " "the Optimizer weights have been created. Here it is " f"attempting to set `iterations` to {variable}." ) self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): """Get decayed learning rate as a Tensor with dtype=var_dtype.""" lr_t = self._get_hyper("learning_rate", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = tf.cast(self.iterations, var_dtype) lr_t = tf.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.0: local_step = tf.cast(self.iterations, var_dtype) decay_t = tf.cast(self._initial_decay, var_dtype) lr_t = lr_t / (1.0 + decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self): """Returns the config of the optimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. The same optimizer can be reinstantiated later (without any saved state) from this configuration. Returns: Python dictionary. """ config = {"name": self._name} if self.clipnorm is not None: config["clipnorm"] = self.clipnorm if self.clipvalue is not None: config["clipvalue"] = self.clipvalue if self.global_clipnorm is not None: config["global_clipnorm"] = self.global_clipnorm return config @classmethod def from_config(cls, config, custom_objects=None): """Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance. """ if "lr" in config: config["learning_rate"] = config.pop("lr") if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects ) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): """Serialize a hyperparameter that can be a float, callable, or Tensor.""" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if tf.is_tensor(value): return backend.get_value(value) return value def variables(self): """Returns variables of this Optimizer based on the order created.""" return self._weights @property def weights(self): """Returns variables of this Optimizer based on the order created.""" return self._weights def get_weights(self): """Returns the current weights of the optimizer. The weights of an optimizer are its state (ie, variables). This function returns the weight values associated with this optimizer as a list of Numpy arrays. The first value is always the iterations count of the optimizer, followed by the optimizer's state variables in the order they were created. The returned list can in turn be used to load state into similarly parameterized optimizers. For example, the RMSprop optimizer for this simple model returns a list of three values-- the iteration count, followed by the root-mean-square value of the kernel and bias of the single Dense layer: >>> opt = tf.keras.optimizers.legacy.RMSprop() >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> m.compile(opt, loss='mse') >>> data = np.arange(100).reshape(5, 20) >>> labels = np.zeros(5) >>> results = m.fit(data, labels) # Training. >>> len(opt.get_weights()) 3 Returns: Weights values as a list of numpy arrays. """ params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer. def set_weights(self, weights): """Set the weights of the optimizer. The weights of an optimizer are its state (ie, variables). This function takes the weight values associated with this optimizer as a list of Numpy arrays. The first value is always the iterations count of the optimizer, followed by the optimizer's state variables in the order they are created. The passed values are used to set the new state of the optimizer. For example, the RMSprop optimizer for this simple model takes a list of three values-- the iteration count, followed by the root-mean-square value of the kernel and bias of the single Dense layer: >>> opt = tf.keras.optimizers.legacy.RMSprop() >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> m.compile(opt, loss='mse') >>> data = np.arange(100).reshape(5, 20) >>> labels = np.zeros(5) >>> results = m.fit(data, labels) # Training. >>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])] >>> opt.set_weights(new_weights) >>> opt.iterations <tf.Variable 'RMSprop/iter:0' shape=() dtype=int64, numpy=10> Args: weights: weight values as a list of numpy arrays. """ params = self.weights if len(params) != len(weights): raise ValueError( f"You called `set_weights(weights)` on optimizer {self._name} " f"with a weight list of length {str(len(weights))}, " f"but the optimizer was expecting {str(len(params))} " f"weights. Provided weights: {str(weights)[:50]}..." ) if not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( f"Optimizer weight shape {str(pv.shape)} " "not compatible with " f"provided weight shape {str(w.shape)}." ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight( self, name, shape, dtype=None, initializer="zeros", trainable=None, synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.VariableAggregation.NONE, ): if dtype is None: dtype = tf.float32 if isinstance(initializer, str) or callable(initializer): initializer = initializers.get(initializer) if synchronization == tf.VariableSynchronization.ON_READ: if trainable: raise ValueError( "Synchronization value can be set to " "VariableSynchronization.ON_READ only for non-trainable " "variables. You have specified trainable=True and " "synchronization=VariableSynchronization.ON_READ." ) else: # Set trainable to be false when variable is to be synced on # read. trainable = False elif trainable is None: trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation, ) backend.track_variable(variable) return variable def _init_set_name(self, name, zero_based=True): if not name: self._name = backend.unique_object_name( generic_utils.to_snake_case(self.__class__.__name__), zero_based=zero_based, ) else: self._name = name def _assert_valid_dtypes(self, tensors): """Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type. """ valid_dtypes = self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError( "Invalid type {} for {}, expected: {}.".format( dtype, t.name, [v for v in valid_dtypes] ) ) def _valid_dtypes(self): """Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients. """ return _DEFAULT_VALID_DTYPES def _call_if_callable(self, param): """Call the function if param is callable.""" return param() if callable(param) else param def _resource_apply_dense(self, grad, handle, apply_state): """Add ops to apply dense gradients to the variable `handle`. Args: grad: a `Tensor` representing the gradient. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. apply_state: A dict which is used across multiple apply calls. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError( "`_resource_apply_dense` must be implemented in subclasses." ) def _resource_apply_sparse_duplicate_indices( self, grad, handle, indices, **kwargs ): """Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. **kwargs: May optionally contain `apply_state` Returns: An `Operation` which updates the value of the variable. """ summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices ) return self._resource_apply_sparse( summed_grad, handle, unique_indices, **kwargs ) def _resource_apply_sparse(self, grad, handle, indices, apply_state): """Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. apply_state: A dict which is used across multiple apply calls. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError( "`_resource_apply_sparse` Must be implemented in subclasses." ) def _resource_scatter_add(self, x, i, v): with tf.control_dependencies( [ tf.raw_ops.ResourceScatterAdd( resource=x.handle, indices=i, updates=v ) ] ): return x.value() def _resource_scatter_update(self, x, i, v): with tf.control_dependencies( [ tf.raw_ops.ResourceScatterUpdate( resource=x.handle, indices=i, updates=v ) ] ): return x.value() @property @layer_utils.cached_per_instance def _dense_apply_args(self): return tf_inspect.getfullargspec(self._resource_apply_dense).args @property @layer_utils.cached_per_instance def _sparse_apply_args(self): return tf_inspect.getfullargspec(self._resource_apply_sparse).args # --------------- # For implementing the trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): """Restore a newly created slot variable's value.""" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {} ).pop(variable_key, []) # Iterate over restores, highest restore UID first to minimize the # number of assignments. deferred_restorations.sort( key=lambda position: position.restore_uid, reverse=True ) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable ): """Returns the slot variable that should have a value restored into it. It is up to the caller to restore the value into the slot variable if a valid slot variable is returned. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but `variable` has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to be restored. slot_name: The name of this `Optimizer`'s slot to restore into. variable: The variable object this slot is being created for. Returns: A slot variable that should have a value restored into it, or None if a slot variable should not be restored at this time. """ variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if ( slot_variable is None and tf.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation if there is an active variable # creator scope. Generally we'd like to eagerly create/restore slot # variables when possible, but this may mean that scopes intended to # catch `variable` also catch its eagerly created slot variable # unintentionally (specifically make_template would add a dependency # on a slot variable if not for this case). Deferring is mostly # harmless (aside from double initialization), and makes variable # creator scopes behave the same way they do when graph building. # # One notable case is with distribution strategy, which uses # variable creator scope but always desires the `variable` and the # slot to use the same scope, thus we can safely eagerly # create/restore slot variables. and ( not tf.compat.v1.get_default_graph()._variable_creator_stack or self._distribution_strategy ) ): initializer = ( tf.__internal__.tracking.CheckpointInitialValueCallable( checkpoint_position=slot_variable_position ) ) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name, shape=slot_variable_position.value_shape(), ) # Slot variables are not owned by any one object (because we don't # want to save the slot variable if the optimizer is saved without # the non-slot variable, or if the non-slot variable is saved # without the optimizer; it's a dependency hypergraph with edges of # the form (optimizer, non-slot variable, variable)). So we don't # _track_ slot variables anywhere, and instead special-case this # dependency and otherwise pretend it's a normal graph. if slot_variable is not None: # For sharded variables, we need the logic in get_slot to combine # slot variables for its shards if (slot_variable is variable) and ( isinstance(variable, tf.__internal__.distribute.ShardedVariable) ): return self.get_slot(variable, slot_name) # If we've either made this slot variable, or if we've pulled out an # existing slot variable, we should restore it. return slot_variable else: # We didn't make the slot variable. Defer restoring until it gets # created normally. We keep a list rather than the one with the # highest restore UID in case slot variables have their own # dependencies, in which case those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {} ).setdefault(variable_key, []).append(slot_variable_position) return None @contextlib.contextmanager def _distribution_strategy_scope(self): """Returns the `tf.distribute.Strategy` this optimizer was created under.""" if self._distribution_strategy and not tf.distribute.has_strategy(): with self._distribution_strategy.scope(): yield self._distribution_strategy.scope() else: yield def _var_key(var): """Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable. """ # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() elif ( tf_utils.is_extension_type(var) and hasattr(var, "handle") and hasattr(var.handle, "_distributed_container") ): # For ResourceVariables, the _distributed_container attribute # is added to their handle tensors. var = var.handle._distributed_container() if getattr(var, "_in_graph_mode", False): return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): """Get the slot key for the variable: var_name/slot_name.""" name = _var_key(var) return name + "/" + slot_name class RestoredOptimizer(OptimizerV2): """A non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables and hyperparameters when an optimizer is restored from a SavedModel. These variables may be referenced in functions along with ops created by the original optimizer, but currently we do not support using the optimizer object itself (e.g. through `apply_gradients`). """ # TODO(allenl): Make the restored optimizer functional by tracing its apply # methods. def __init__(self): super().__init__("RestoredOptimizer") self._hypers_created = True def get_config(self): # TODO(allenl): Save and restore the Optimizer's config raise NotImplementedError( "Restoring functional Optimizers from SavedModels is not currently " "supported. Please file a feature request if this limitation " "bothers you." ) # When `keras_2` is installed in same env, it raises assertion for duplicate # registration with same name. Rename the symbol in this case. try: tf.__internal__.saved_model.load.register_revived_type( "optimizer", lambda obj: isinstance(obj, OptimizerV2), versions=[ tf.__internal__.saved_model.load.VersionedTypeRegistration( object_factory=lambda proto: RestoredOptimizer(), version=2, min_producer_version=1, min_consumer_version=1, setter=RestoredOptimizer._set_hyper, ) ], ) except AssertionError: tf.__internal__.saved_model.load.register_revived_type( "tf_keras_optimizer", lambda obj: isinstance(obj, OptimizerV2), versions=[ tf.__internal__.saved_model.load.VersionedTypeRegistration( object_factory=lambda proto: RestoredOptimizer(), version=2, min_producer_version=1, min_consumer_version=1, setter=RestoredOptimizer._set_hyper, ) ], )
tf-keras/tf_keras/optimizers/legacy/optimizer_v2.py/0
{ "file_path": "tf-keras/tf_keras/optimizers/legacy/optimizer_v2.py", "repo_id": "tf-keras", "token_count": 29935 }
185
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for sequence data preprocessing utils.""" import math import numpy as np import tensorflow.compat.v2 as tf from tf_keras.preprocessing import sequence class TestSequence(tf.test.TestCase): def test_make_sampling_table(self): a = sequence.make_sampling_table(3) self.assertAllClose( a, np.asarray([0.00315225, 0.00315225, 0.00547597]), rtol=0.1 ) def test_skipgrams(self): # test with no window size and binary labels couples, labels = sequence.skipgrams(np.arange(3), vocabulary_size=3) for couple in couples: self.assertIn(couple[0], [0, 1, 2]) self.assertIn(couple[1], [0, 1, 2]) # test window size and categorical labels couples, labels = sequence.skipgrams( np.arange(5), vocabulary_size=5, window_size=1, categorical=True ) for couple in couples: self.assertLessEqual(couple[0] - couple[1], 3) for label in labels: self.assertLen(label, 2) def test_remove_long_seq(self): maxlen = 5 seq = [ [1, 2, 3], [1, 2, 3, 4, 5, 6], ] label = ["a", "b"] new_seq, new_label = sequence._remove_long_seq(maxlen, seq, label) self.assertEqual(new_seq, [[1, 2, 3]]) self.assertEqual(new_label, ["a"]) def test_TimeseriesGenerator(self): data = np.array([[i] for i in range(50)]) targets = np.array([[i] for i in range(50)]) data_gen = sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, batch_size=2 ) self.assertLen(data_gen, 20) self.assertAllClose( data_gen[0][0], np.array([[[0], [2], [4], [6], [8]], [[1], [3], [5], [7], [9]]]), ) self.assertAllClose(data_gen[0][1], np.array([[10], [11]])) self.assertAllClose( data_gen[1][0], np.array([[[2], [4], [6], [8], [10]], [[3], [5], [7], [9], [11]]]), ) self.assertAllClose(data_gen[1][1], np.array([[12], [13]])) data_gen = sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, reverse=True, batch_size=2, ) self.assertLen(data_gen, 20) self.assertAllClose( data_gen[0][0], np.array([[[8], [6], [4], [2], [0]], [[9], [7], [5], [3], [1]]]), ) self.assertAllClose(data_gen[0][1], np.array([[10], [11]])) data_gen = sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, shuffle=True, batch_size=1, ) batch = data_gen[0] r = batch[1][0][0] self.assertAllClose( batch[0], np.array([[[r - 10], [r - 8], [r - 6], [r - 4], [r - 2]]]) ) self.assertAllClose( batch[1], np.array( [ [r], ] ), ) data_gen = sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, stride=2, batch_size=2 ) self.assertLen(data_gen, 10) self.assertAllClose( data_gen[1][0], np.array( [[[4], [6], [8], [10], [12]], [[6], [8], [10], [12], [14]]] ), ) self.assertAllClose(data_gen[1][1], np.array([[14], [16]])) data_gen = sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, start_index=10, end_index=30, batch_size=2, ) self.assertLen(data_gen, 6) self.assertAllClose( data_gen[0][0], np.array( [[[10], [12], [14], [16], [18]], [[11], [13], [15], [17], [19]]] ), ) self.assertAllClose(data_gen[0][1], np.array([[20], [21]])) data = np.array( [np.random.random_sample((1, 2, 3, 4)) for i in range(50)] ) targets = np.array( [np.random.random_sample((3, 2, 1)) for i in range(50)] ) data_gen = sequence.TimeseriesGenerator( data, targets, length=10, sampling_rate=2, start_index=10, end_index=30, batch_size=2, ) self.assertLen(data_gen, 6) self.assertAllClose( data_gen[0][0], np.array([np.array(data[10:19:2]), np.array(data[11:20:2])]), ) self.assertAllClose( data_gen[0][1], np.array([targets[20], targets[21]]) ) with self.assertRaisesRegex( ValueError, r"`start_index\+length=50 > end_index=49` is disallowed" ): sequence.TimeseriesGenerator(data, targets, length=50) def test_TimeSeriesGenerator_doesnt_miss_any_sample(self): x = np.array([[i] for i in range(10)]) for length in range(3, 10): g = sequence.TimeseriesGenerator(x, x, length=length, batch_size=1) expected = max(0, len(x) - length) actual = len(g) self.assertEqual(expected, actual) if len(g) > 0: # All elements in range(length, 10) should be used as current # step expected = np.arange(length, 10).reshape(-1, 1) y = np.concatenate([g[ix][1] for ix in range(len(g))], axis=0) self.assertAllClose(y, expected) x = np.array([[i] for i in range(23)]) strides = (1, 1, 5, 7, 3, 5, 3) lengths = (3, 3, 4, 3, 1, 3, 7) batch_sizes = (6, 6, 6, 5, 6, 6, 6) shuffles = (False, True, True, False, False, False, False) for stride, length, batch_size, shuffle in zip( strides, lengths, batch_sizes, shuffles ): g = sequence.TimeseriesGenerator( x, x, length=length, sampling_rate=1, stride=stride, start_index=0, end_index=None, shuffle=shuffle, reverse=False, batch_size=batch_size, ) if shuffle: # all batches have the same size when shuffle is True. expected_sequences = ( math.ceil((23 - length) / float(batch_size * stride)) * batch_size ) else: # last batch will be different if `(samples - length) / stride` # is not a multiple of `batch_size`. expected_sequences = math.ceil((23 - length) / float(stride)) expected_batches = math.ceil(expected_sequences / float(batch_size)) y = [g[ix][1] for ix in range(len(g))] actual_sequences = sum(len(y_) for y_ in y) actual_batches = len(y) self.assertEqual(expected_sequences, actual_sequences) self.assertEqual(expected_batches, actual_batches) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/preprocessing/sequence_test.py/0
{ "file_path": "tf-keras/tf_keras/preprocessing/sequence_test.py", "repo_id": "tf-keras", "token_count": 4088 }
186
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras model saving code.""" import os import tensorflow.compat.v2 as tf from tf_keras import backend from tf_keras.saving import object_registration from tf_keras.saving.legacy import hdf5_format from tf_keras.saving.legacy import saving_utils from tf_keras.saving.legacy import serialization from tf_keras.saving.legacy.saved_model import load as saved_model_load from tf_keras.saving.legacy.saved_model import save as saved_model_save from tf_keras.saving.legacy.saved_model.utils import keras_option_scope from tf_keras.utils import io_utils from tf_keras.utils import traceback_utils try: import h5py except ImportError: h5py = None @traceback_utils.filter_traceback def save_model( model, filepath, overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None, save_traces=True, ): """Saves a model as a TensorFlow SavedModel or HDF5 file. See the [Serialization and Saving guide](https://keras.io/guides/serialization_and_saving/) for details. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> model.save('/tmp/model') >>> loaded_model = tf.keras.models.load_model('/tmp/model') >>> x = tf.random.uniform((10, 3)) >>> assert np.allclose(model.predict(x), loaded_model.predict(x)) Note that `model.save()` is an alias for `tf.keras.models.save_model()`. The SavedModel and HDF5 file contains: - the model's configuration (topology) - the model's weights - the model's optimizer's state (if any) Thus models can be reinstantiated in the exact same state, without any of the code used for model definition or training. Note that the model weights may have different scoped names after being loaded. Scoped names include the model/layer names, such as `"dense_1/kernel:0"`. It is recommended that you use the layer properties to access specific variables, e.g. `model.get_layer("dense_1").kernel`. __SavedModel serialization format__ TF-Keras SavedModel uses `tf.saved_model.save` to save the model and all trackable objects attached to the model (e.g. layers and variables). The model config, weights, and optimizer are saved in the SavedModel. Additionally, for every TF-Keras layer attached to the model, the SavedModel stores: * the config and metadata -- e.g. name, dtype, trainable status * traced call and loss functions, which are stored as TensorFlow subgraphs. The traced functions allow the SavedModel format to save and load custom layers without the original class definition. You can choose to not save the traced functions by disabling the `save_traces` option. This will decrease the time it takes to save the model and the amount of disk space occupied by the output SavedModel. If you enable this option, then you _must_ provide all custom class definitions when loading the model. See the `custom_objects` argument in `tf.keras.models.load_model`. Args: model: TF-Keras model instance to be saved. filepath: One of the following: - String or `pathlib.Path` object, path where to save the model - `h5py.File` object where to save the model overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user with a manual prompt. include_optimizer: If True, save optimizer's state together. save_format: Either 'tf' or 'h5', indicating whether to save the model to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in TF 1.X. signatures: Signatures to save with the SavedModel. Applicable to the 'tf' format only. Please see the `signatures` argument in `tf.saved_model.save` for details. options: (only applies to SavedModel format) `tf.saved_model.SaveOptions` object that specifies options for saving to SavedModel. save_traces: (only applies to SavedModel format) When enabled, the SavedModel will store the function traces for each layer. This can be disabled, so that only the configs of each layer are stored. Defaults to `True`. Disabling this will decrease serialization time and reduce file size, but it requires that all custom layers/models implement a `get_config()` method. Raises: ImportError: If save format is hdf5, and h5py is not available. """ from tf_keras.engine import sequential default_format = "tf" if tf.__internal__.tf2.enabled() else "h5" save_format = save_format or default_format filepath = io_utils.path_to_string(filepath) # If the user has not already called fit or built the underlying metrics, we # should do that before saving to ensure the metric names have all # appropriate name transformations applied. saving_utils.try_build_compiled_arguments(model) if ( save_format == "h5" or (h5py is not None and isinstance(filepath, h5py.File)) or saving_utils.is_hdf5_filepath(filepath) ): # TODO(b/130258301): add utility method for detecting model type. if not model._is_graph_network and not isinstance( model, sequential.Sequential ): raise NotImplementedError( "Saving the model to HDF5 format requires the model to be a " "Functional model or a Sequential model. It does not work for " "subclassed models, because such models are defined via the " "body of a Python method, which isn't safely serializable. " "Consider saving to the Tensorflow SavedModel format (by " 'setting save_format="tf") or using `save_weights`.' ) hdf5_format.save_model_to_hdf5( model, filepath, overwrite, include_optimizer ) else: with serialization.SharedObjectSavingScope(): with keras_option_scope( save_traces=save_traces, in_tf_saved_model_scope=True ): saved_model_save.save( model, filepath, overwrite, include_optimizer, signatures, options, save_traces, ) @traceback_utils.filter_traceback def load_model(filepath, custom_objects=None, compile=True, options=None): """Loads a model saved via `model.save()`. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> model.save('/tmp/model') >>> loaded_model = tf.keras.models.load_model('/tmp/model') >>> x = tf.random.uniform((10, 3)) >>> assert np.allclose(model.predict(x), loaded_model.predict(x)) Note that the model weights may have different scoped names after being loaded. Scoped names include the model/layer names, such as `"dense_1/kernel:0"`. It is recommended that you use the layer properties to access specific variables, e.g. `model.get_layer("dense_1").kernel`. Args: filepath: One of the following: - String or `pathlib.Path` object, path to the saved model - `h5py.File` object from which to load the model custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. compile: Boolean, whether to compile the model after loading. options: Optional `tf.saved_model.LoadOptions` object that specifies options for loading from SavedModel. Returns: A TF-Keras model instance. If the original model was compiled, and saved with the optimizer, then the returned model will be compiled. Otherwise, the model will be left uncompiled. In the case that an uncompiled model is returned, a warning is displayed if the `compile` argument is set to `True`. Raises: ImportError: if loading from an hdf5 file and h5py is not available. IOError: In case of an invalid savefile. """ with serialization.SharedObjectLoadingScope(): custom_objects = custom_objects or {} tlco = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__ gco = object_registration._GLOBAL_CUSTOM_OBJECTS custom_objects = {**custom_objects, **tlco, **gco} with object_registration.CustomObjectScope(custom_objects): with keras_option_scope( save_traces=False, in_tf_saved_model_scope=True ): with tf.__internal__.load_context(options): filepath_str = io_utils.path_to_string(filepath) if isinstance(filepath_str, str): if not tf.io.gfile.exists(filepath_str): raise IOError( f"No file or directory found at {filepath_str}" ) if tf.io.gfile.isdir(filepath_str): return saved_model_load.load( filepath_str, compile, options ) else: if h5py is None: raise ImportError( "Filepath looks like a hdf5 file but h5py" "is not available." f" filepath={filepath_str}" ) return hdf5_format.load_model_from_hdf5( tf.io.gfile.GFile(filepath_str, mode="rb"), custom_objects, compile, ) elif h5py is not None and isinstance(filepath, h5py.File): return hdf5_format.load_model_from_hdf5( filepath, custom_objects, compile ) raise IOError( "Unable to load model. Filepath is not an hdf5 file (or h5py is not " f"available) or SavedModel. Received: filepath={filepath}" ) def save_weights( model, filepath, overwrite=True, save_format=None, options=None ): """Saves all layer weights. Either saves in HDF5 or in TensorFlow format based on the `save_format` argument. When saving in HDF5 format, the weight file has: - `layer_names` (attribute), a list of strings (ordered names of model layers). - For every layer, a `group` named `layer.name` - For every such layer group, a group attribute `weight_names`, a list of strings (ordered names of weights tensor of the layer). - For every weight in the layer, a dataset storing the weight value, named after the weight tensor. When saving in TensorFlow format, all objects referenced by the network are saved in the same format as `tf.train.Checkpoint`, including any `Layer` instances or `Optimizer` instances assigned to object attributes. For networks constructed from inputs and outputs using `tf.keras.Model(inputs, outputs)`, `Layer` instances used by the network are tracked/saved automatically. For user-defined classes which inherit from `tf.keras.Model`, `Layer` instances must be assigned to object attributes, typically in the constructor. See the documentation of `tf.train.Checkpoint` and `tf.keras.Model` for details. While the formats are the same, do not mix `save_weights` and `tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should be loaded using `Model.load_weights`. Checkpoints saved using `tf.train.Checkpoint.save` should be restored using the corresponding `tf.train.Checkpoint.restore`. Prefer `tf.train.Checkpoint` over `save_weights` for training checkpoints. The TensorFlow format matches objects and variables by starting at a root object, `self` for `save_weights`, and greedily matching attribute names. For `Model.save` this is the `Model`, and for `Checkpoint.save` this is the `Checkpoint` even if the `Checkpoint` has a model attached. This means saving a `tf.keras.Model` using `save_weights` and loading into a `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match the `Model`'s variables. See the [guide to training checkpoints]( https://www.tensorflow.org/guide/checkpoint) for details on the TensorFlow format. Args: filepath: String or PathLike, path to the file to save the weights to. When saving in TensorFlow format, this is the prefix used for checkpoint files (multiple files are generated). Note that the '.h5' suffix causes weights to be saved in HDF5 format. overwrite: Whether to silently overwrite any existing file at the target location, or provide the user with a manual prompt. save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or '.keras' will default to HDF5 if `save_format` is `None`. Otherwise `None` defaults to 'tf'. options: Optional `tf.train.CheckpointOptions` object that specifies options for saving weights. Raises: ImportError: If `h5py` is not available when attempting to save in HDF5 format. """ model._assert_weights_created() filepath = io_utils.path_to_string(filepath) filepath_is_h5 = saving_utils.is_hdf5_filepath(filepath) if save_format is None: if filepath_is_h5: save_format = "h5" else: save_format = "tf" else: user_format = save_format.lower().strip() if user_format in ("tensorflow", "tf"): save_format = "tf" elif user_format in ("hdf5", "h5", "keras"): save_format = "h5" else: raise ValueError( f"Unknown format. Received: `save_format`={save_format}. " 'Was expecting one of {"tf", "h5"}.' ) if save_format == "tf" and filepath_is_h5: raise ValueError( 'save_weights got save_format="tf"/"tensorflow", but the ' f"filepath ({filepath}) looks like an HDF5 file. " 'Omit the ".h5"/".keras" when saving in TensorFlow format.' ) if save_format == "h5" and h5py is None: raise ImportError( "`save_weights` requires h5py when saving in hdf5, but h5py is " "not available. Try installing h5py package." ) if save_format == "tf": check_filepath = filepath + ".index" else: check_filepath = filepath # If file exists and should not be overwritten: if not overwrite and os.path.isfile(check_filepath): proceed = io_utils.ask_to_proceed_with_overwrite(check_filepath) if not proceed: return if save_format == "h5": with h5py.File(filepath, "w") as f: hdf5_format.save_weights_to_hdf5_group(f, model) else: if not tf.executing_eagerly(): # Call `get_session` to initialize any uninitialized variables. backend.get_session() model._checkpoint.write(filepath, options=options) # Record this checkpoint so it's visible from # tf.train.latest_checkpoint. tf.__internal__.train.update_checkpoint_state( save_dir=os.path.dirname(filepath), model_checkpoint_path=filepath, save_relative_paths=True, all_model_checkpoint_paths=[filepath], ) def load_weights( model, filepath, by_name=False, skip_mismatch=False, options=None ): """Loads all layer weights, either from a SavedModel or H5 weights file. If `by_name` is False weights are loaded based on the network's topology. This means the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. Only topological loading (`by_name=False`) is supported when loading weights from the TensorFlow format. Note that topological loading differs slightly between TensorFlow and HDF5 formats for user-defined classes inheriting from `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the TensorFlow format loads based on the object-local names of attributes to which layers are assigned in the `Model`'s constructor. Args: filepath: String, path to the weights file to load. For weight files in TensorFlow format, this is the file prefix (the same as was passed to `save_weights`). This can also be a path to a SavedModel saved from `model.save`. by_name: Boolean, whether to load weights by name or by topological order. Only topological loading is supported for weight files in TensorFlow format. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name=True`). options: Optional `tf.train.CheckpointOptions` object that specifies options for loading weights. Returns: When loading a weight file in TensorFlow format, returns the same status object as `tf.train.Checkpoint.restore`. When graph building, restore ops are run automatically as soon as the network is built (on first call for user-defined classes inheriting from `Model`, immediately if it is already built). When loading weights in HDF5 format, returns `None`. Raises: ImportError: If `h5py` is not available and the weight file is in HDF5 format. ValueError: If `skip_mismatch` is set to `True` when `by_name` is `False`. """ if backend.is_tpu_strategy(model._distribution_strategy): if model._distribution_strategy.extended.steps_per_run > 1 and ( not saving_utils.is_hdf5_filepath(filepath) ): spr = model._distribution_strategy.extended.steps_per_run raise ValueError( "Load weights is not implemented with TPUStrategy " "with `steps_per_run` greater than 1. The " f"`steps_per_run` is {spr}" ) if skip_mismatch and not by_name: raise ValueError( "When calling model.load_weights, skip_mismatch can only be " "set to True when by_name is True." ) filepath, save_format = _detect_save_format(filepath) if save_format == "tf": status = model._checkpoint.read(filepath, options) if by_name: raise NotImplementedError( "Weights may only be loaded based on topology into Models " "when loading TensorFlow-formatted weights " "(got by_name=True to load_weights)." ) if not tf.executing_eagerly(): session = backend.get_session() # Restore existing variables (if any) immediately, and set up a # streaming restore for any variables created in the future. tf.__internal__.tracking.streaming_restore( status=status, session=session ) status.assert_nontrivial_match() else: status = None if h5py is None: raise ImportError( "`load_weights` requires h5py package when loading weights " "from HDF5. Try installing h5py." ) if not model._is_graph_network and not model.built: raise ValueError( "Unable to load weights saved in HDF5 format into a " "subclassed Model which has not created its variables yet. " "Call the Model first, then load the weights." ) model._assert_weights_created() with h5py.File(filepath, "r") as f: if "layer_names" not in f.attrs and "model_weights" in f: f = f["model_weights"] if by_name: hdf5_format.load_weights_from_hdf5_group_by_name( f, model, skip_mismatch ) else: hdf5_format.load_weights_from_hdf5_group(f, model) # Perform any layer defined finalization of the layer state. for layer in model.layers: layer.finalize_state() return status def _detect_save_format(filepath): """Returns path to weights file and save format.""" filepath = io_utils.path_to_string(filepath) if saving_utils.is_hdf5_filepath(filepath): return filepath, "h5" # Filepath could be a TensorFlow checkpoint file prefix or SavedModel # directory. It's possible for filepath to be both a prefix and directory. # Prioritize checkpoint over SavedModel. if _is_readable_tf_checkpoint(filepath): save_format = "tf" elif tf.saved_model.contains_saved_model(filepath): ckpt_path = os.path.join( filepath, tf.saved_model.VARIABLES_DIRECTORY, tf.saved_model.VARIABLES_FILENAME, ) if _is_readable_tf_checkpoint(ckpt_path): filepath = ckpt_path save_format = "tf" else: raise ValueError( "Unable to load weights. filepath {} appears to be a " "SavedModel directory, but checkpoint either doesn't " "exist, or is incorrectly formatted.".format(filepath) ) else: # Not a TensorFlow checkpoint. This filepath is likely an H5 file that # doesn't have the hdf5/keras extensions. save_format = "h5" return filepath, save_format def _is_readable_tf_checkpoint(filepath): try: tf.compat.v1.train.NewCheckpointReader(filepath) return True except tf.errors.DataLossError: # The checkpoint is not readable in TensorFlow format. return False # Inject the load_model function to keras_deps to remove the dependency # from TFLite to TF-Keras. tf.__internal__.register_load_model_function(load_model)
tf-keras/tf_keras/saving/legacy/save.py/0
{ "file_path": "tf-keras/tf_keras/saving/legacy/save.py", "repo_id": "tf-keras", "token_count": 9559 }
187
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes and functions implementing to Network SavedModel serialization.""" from tf_keras.saving.legacy.saved_model import constants from tf_keras.saving.legacy.saved_model import model_serialization # FunctionalModel serialization is pretty much the same as Model serialization. class NetworkSavedModelSaver(model_serialization.ModelSavedModelSaver): """Network serialization.""" @property def object_identifier(self): return constants.NETWORK_IDENTIFIER
tf-keras/tf_keras/saving/legacy/saved_model/network_serialization.py/0
{ "file_path": "tf-keras/tf_keras/saving/legacy/saved_model/network_serialization.py", "repo_id": "tf-keras", "token_count": 300 }
188
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python-based idempotent model-saving functionality.""" import datetime import io import json import re import tempfile import threading import warnings import zipfile import numpy as np import tensorflow.compat.v2 as tf import tf_keras as keras from tf_keras import losses from tf_keras.engine import base_layer from tf_keras.optimizers import optimizer from tf_keras.saving.serialization_lib import ObjectSharingScope from tf_keras.saving.serialization_lib import deserialize_keras_object from tf_keras.saving.serialization_lib import serialize_keras_object from tf_keras.utils import generic_utils from tf_keras.utils import io_utils try: import h5py except ImportError: h5py = None # isort: off _CONFIG_FILENAME = "config.json" _METADATA_FILENAME = "metadata.json" _VARS_FNAME = "model.weights" # Will become e.g. "model.weights.h5" _ASSETS_DIRNAME = "assets" # A temporary flag to enable the new idempotent saving framework. _SAVING_V3_ENABLED = threading.local() _SAVING_V3_ENABLED.value = True ATTR_SKIPLIST = frozenset( { "_callable_losses", "_captured_weight_regularizer", "_checkpoint_dependencies", "_layer_checkpoint_dependencies", "_deferred_dependencies", "_eager_losses", "_inbound_nodes", "_inbound_nodes_value", "_output_layers", "_input_layers", "_keras_api_names", "_keras_api_names_v1", "_name_based_restores", "_outbound_nodes", "_outbound_nodes_value", "_saved_model_arg_spec", "_self_name_based_restores", "_self_saveable_object_factories", "_self_tracked_trackables", "_saved_model_inputs_spec", "_self_unconditional_checkpoint_dependencies", "_self_unconditional_deferred_dependencies", "_self_unconditional_dependency_names", "_tf_api_names", "_tf_api_names_v1", "_trainable_weights", "_non_trainable_weights", "_unconditional_checkpoint_dependencies", "_unconditional_dependency_names", "_updates", "_layer_call_argspecs", "inbound_nodes", "outbound_nodes", "input_shape", "output_shape", "submodules", "weights", "non_trainable_weights", "trainable_weights", "variables", "non_trainable_variables", "trainable_variables", "updates", # Would raise a warning if visited. "state_updates", # Would raise a warning if visited. } ) def save_model(model, filepath, weights_format="h5"): """Save a zip-archive representing a TF-Keras model to the given filepath. The zip-based archive contains the following structure: - JSON-based configuration file (config.json): Records of model, layer, and other trackables' configuration. - NPZ-based trackable state files, found in respective directories, such as model/states.npz, model/dense_layer/states.npz, etc. - Metadata file. The states of TF-Keras trackables (layers, optimizers, loss, and metrics) are automatically saved as long as they can be discovered through the attributes returned by `dir(Model)`. Typically, the state includes the variables associated with the trackable, but some specially purposed layers may contain more such as the vocabularies stored in the hashmaps. The trackables define how their states are saved by exposing `save_state()` and `load_state()` APIs. For the case of layer states, the variables will be visited as long as they are either 1) referenced via layer attributes, or 2) referenced via a container (list, tuple, or dict), and the container is referenced via a layer attribute. """ filepath = str(filepath) if not filepath.endswith(".keras"): raise ValueError( "Invalid `filepath` argument: expected a `.keras` extension. " f"Received: filepath={filepath}" ) if weights_format == "h5" and h5py is None: raise ImportError("h5py must be installed in order to save a model.") if not model.built: warnings.warn( "You are saving a model that has not yet been built. " "It might not contain any weights yet. " "Consider building the model first by calling it " "on some data.", stacklevel=2, ) saving_v3_enabled_value = getattr(_SAVING_V3_ENABLED, "value", False) _SAVING_V3_ENABLED.value = True with ObjectSharingScope(): serialized_model_dict = serialize_keras_object(model) config_json = json.dumps(serialized_model_dict) metadata_json = json.dumps( { "keras_version": keras.__version__, "date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"), } ) # TODO(rameshsampath): Need a better logic for local vs remote path if is_remote_path(filepath): # Remote path. Zip to local memory byte io and copy to remote zip_filepath = io.BytesIO() else: zip_filepath = filepath try: with zipfile.ZipFile(zip_filepath, "w") as zf: with zf.open(_METADATA_FILENAME, "w") as f: f.write(metadata_json.encode()) with zf.open(_CONFIG_FILENAME, "w") as f: f.write(config_json.encode()) if weights_format == "h5": weights_store = H5IOStore( _VARS_FNAME + ".h5", archive=zf, mode="w" ) elif weights_format == "npz": weights_store = NpzIOStore( _VARS_FNAME + ".npz", archive=zf, mode="w" ) else: raise ValueError( "Unknown `weights_format` argument. " "Expected 'h5' or 'npz'. " f"Received: weights_format={weights_format}" ) asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="w") _save_state( model, weights_store=weights_store, assets_store=asset_store, inner_path="", visited_trackables=set(), ) weights_store.close() asset_store.close() if is_remote_path(filepath): with tf.io.gfile.GFile(filepath, "wb") as f: f.write(zip_filepath.getvalue()) except Exception as e: raise e finally: _SAVING_V3_ENABLED.value = saving_v3_enabled_value def load_model(filepath, custom_objects=None, compile=True, safe_mode=True): """Load a zip archive representing a TF-Keras model.""" filepath = str(filepath) if not filepath.endswith(".keras"): raise ValueError( "Invalid filename: expected a `.keras` extension. " f"Received: filepath={filepath}" ) saving_v3_enabled_value = getattr(_SAVING_V3_ENABLED, "value", False) _SAVING_V3_ENABLED.value = True try: with tf.io.gfile.GFile( filepath, mode="r+b" ) as gfile_handle, zipfile.ZipFile(gfile_handle, "r") as zf: with zf.open(_CONFIG_FILENAME, "r") as f: config_json = f.read() # Note: we should NOT use a custom JSON decoder. Anything that # needs custom decoding must be handled in deserialize_keras_object. config_dict = json.loads(config_json) if not compile: # Disable compilation config_dict["compile_config"] = None # Construct the model from the configuration file in the archive. with ObjectSharingScope(): model = deserialize_keras_object( config_dict, custom_objects, safe_mode=safe_mode ) all_filenames = zf.namelist() if _VARS_FNAME + ".h5" in all_filenames: weights_store = H5IOStore( _VARS_FNAME + ".h5", archive=zf, mode="r" ) elif _VARS_FNAME + ".npz" in all_filenames: weights_store = NpzIOStore( _VARS_FNAME + ".npz", archive=zf, mode="r" ) else: raise ValueError( f"Expected a {_VARS_FNAME}.h5 or {_VARS_FNAME}.npz file." ) if len(all_filenames) > 3: asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="r") else: asset_store = None _load_state( model, weights_store=weights_store, assets_store=asset_store, inner_path="", visited_trackables=set(), ) weights_store.close() if asset_store: asset_store.close() except Exception as e: raise e else: return model finally: _SAVING_V3_ENABLED.value = saving_v3_enabled_value def save_weights_only(model, filepath): """Save only the weights of a model to a target filepath (.weights.h5). Note: only supports h5 for now. """ # TODO: if h5 filepath is remote, create the file in a temporary directory # then upload it filepath = str(filepath) if not filepath.endswith(".weights.h5"): raise ValueError( "Invalid `filepath` argument: expected a `.weights.h5` extension. " f"Received: filepath={filepath}" ) weights_store = H5IOStore(filepath, mode="w") _save_state( model, weights_store=weights_store, assets_store=None, inner_path="", visited_trackables=set(), ) weights_store.close() def load_weights_only(model, filepath, skip_mismatch=False): """Load the weights of a model from a filepath (.keras or .weights.h5). Note: only supports h5 for now. """ temp_dir = None archive = None filepath = str(filepath) if filepath.endswith(".weights.h5"): # TODO: download file if h5 filepath is remote weights_store = H5IOStore(filepath, mode="r") elif filepath.endswith(".keras"): archive = zipfile.ZipFile(filepath, "r") weights_store = H5IOStore( _VARS_FNAME + ".h5", archive=archive, mode="r" ) _load_state( model, weights_store=weights_store, assets_store=None, inner_path="", skip_mismatch=skip_mismatch, visited_trackables=set(), ) weights_store.close() if temp_dir and tf.io.gfile.exists(temp_dir): tf.io.gfile.rmtree(temp_dir) if archive: archive.close() def is_remote_path(filepath): if re.match(r"^(/cns|/cfs|/gcs|.*://).*$", str(filepath)): return True return False def _write_to_zip_recursively(zipfile_to_save, system_path, zip_path): if not tf.io.gfile.isdir(system_path): zipfile_to_save.write(system_path, zip_path) else: for file_name in tf.io.gfile.listdir(system_path): system_file_path = tf.io.gfile.join(system_path, file_name) zip_file_path = tf.io.gfile.join(zip_path, file_name) _write_to_zip_recursively( zipfile_to_save, system_file_path, zip_file_path ) def _walk_trackable(trackable): for child_attr in sorted(dir(trackable), reverse=True): if child_attr.startswith("__") or child_attr in ATTR_SKIPLIST: continue try: child_obj = getattr(trackable, child_attr) except Exception: # Avoid raising the exception when visiting the attributes. continue yield child_attr, child_obj def _save_state( trackable, weights_store, assets_store, inner_path, visited_trackables ): # If the trackable has already been saved, skip it. if id(trackable) in visited_trackables: return if hasattr(trackable, "save_own_variables") and weights_store: trackable.save_own_variables(weights_store.make(inner_path)) if hasattr(trackable, "save_assets") and assets_store: trackable.save_assets(assets_store.make(inner_path)) visited_trackables.add(id(trackable)) # Recursively save state of children trackables (layers, optimizers, etc.) for child_attr, child_obj in _walk_trackable(trackable): if _is_keras_trackable(child_obj): _save_state( child_obj, weights_store, assets_store, inner_path=tf.io.gfile.join(inner_path, child_attr), visited_trackables=visited_trackables, ) elif isinstance(child_obj, (list, dict, tuple, set)): _save_container_state( child_obj, weights_store, assets_store, inner_path=tf.io.gfile.join(inner_path, child_attr), visited_trackables=visited_trackables, ) def _load_state( trackable, weights_store, assets_store, inner_path, skip_mismatch=False, visited_trackables=None, ): if visited_trackables and id(trackable) in visited_trackables: return if hasattr(trackable, "load_own_variables") and weights_store: if skip_mismatch: try: trackable.load_own_variables(weights_store.get(inner_path)) except Exception as e: warnings.warn( f"Could not load weights in object {trackable}. " "Skipping object. " f"Exception encountered: {e}", stacklevel=2, ) else: trackable.load_own_variables(weights_store.get(inner_path)) if hasattr(trackable, "load_assets") and assets_store: if skip_mismatch: try: trackable.load_assets(assets_store.get(inner_path)) except Exception as e: warnings.warn( f"Could not load assets in object {trackable}. " "Skipping object. " f"Exception encountered: {e}", stacklevel=2, ) else: trackable.load_assets(assets_store.get(inner_path)) if visited_trackables is not None: visited_trackables.add(id(trackable)) # Recursively load states for TF-Keras trackables such as layers/optimizers. for child_attr, child_obj in _walk_trackable(trackable): if _is_keras_trackable(child_obj): _load_state( child_obj, weights_store, assets_store, inner_path=tf.io.gfile.join(inner_path, child_attr), skip_mismatch=skip_mismatch, visited_trackables=visited_trackables, ) elif isinstance(child_obj, (list, dict, tuple, set)): _load_container_state( child_obj, weights_store, assets_store, inner_path=tf.io.gfile.join(inner_path, child_attr), skip_mismatch=skip_mismatch, visited_trackables=visited_trackables, ) def _save_container_state( container, weights_store, assets_store, inner_path, visited_trackables ): used_names = {} if isinstance(container, dict): container = list(container.values()) for trackable in container: if _is_keras_trackable(trackable): # Keeps layer name indexing in proper order # when duplicate layers are in container. if id(trackable) in visited_trackables: continue # Do NOT address the trackable via `trackable.name`, since # names are usually autogenerated and thus not reproducible # (i.e. they may vary across two instances of the same model). name = generic_utils.to_snake_case(trackable.__class__.__name__) if name in used_names: used_names[name] += 1 name = f"{name}_{used_names[name]}" else: used_names[name] = 0 _save_state( trackable, weights_store, assets_store, inner_path=tf.io.gfile.join(inner_path, name), visited_trackables=visited_trackables, ) def _load_container_state( container, weights_store, assets_store, inner_path, skip_mismatch, visited_trackables, ): used_names = {} if isinstance(container, dict): container = list(container.values()) for trackable in container: if _is_keras_trackable(trackable): # Keeps layer name indexing in proper order # when duplicate layers are in container. if visited_trackables and id(trackable) in visited_trackables: continue # Do NOT address the trackable via `trackable.name`, since # names are usually autogenerated and thus not reproducible # (i.e. they may vary across two instances of the same model). name = generic_utils.to_snake_case(trackable.__class__.__name__) if name in used_names: used_names[name] += 1 name = f"{name}_{used_names[name]}" else: used_names[name] = 0 _load_state( trackable, weights_store, assets_store, inner_path=tf.io.gfile.join(inner_path, name), skip_mismatch=skip_mismatch, visited_trackables=visited_trackables, ) class DiskIOStore: """Asset store backed by disk storage. If `archive` is specified, then `root_path` refers to the filename inside the archive. If `archive` is not specified, then `root_path` refers to the full path of the target directory. """ def __init__(self, root_path, archive=None, mode=None): self.mode = mode self.root_path = root_path self.archive = archive self.tmp_dir = None if self.archive: self.tmp_dir = get_temp_dir() if self.mode == "r": self.archive.extractall(path=self.tmp_dir) self.working_dir = tf.io.gfile.join(self.tmp_dir, self.root_path) if self.mode == "w": tf.io.gfile.makedirs(self.working_dir) else: if mode == "r": self.working_dir = root_path else: self.tmp_dir = get_temp_dir() self.working_dir = tf.io.gfile.join( self.tmp_dir, self.root_path ) tf.io.gfile.makedirs(self.working_dir) def make(self, path): if not path: return self.working_dir path = tf.io.gfile.join(self.working_dir, path) if not tf.io.gfile.exists(path): tf.io.gfile.makedirs(path) return path def get(self, path): if not path: return self.working_dir path = tf.io.gfile.join(self.working_dir, path) if tf.io.gfile.exists(path): return path return None def close(self): if self.mode == "w" and self.archive: _write_to_zip_recursively( self.archive, self.working_dir, self.root_path ) if self.tmp_dir and tf.io.gfile.exists(self.tmp_dir): tf.io.gfile.rmtree(self.tmp_dir) class H5IOStore: def __init__(self, root_path, archive=None, mode="r"): """Numerical variable store backed by HDF5. If `archive` is specified, then `root_path` refers to the filename inside the archive. If `archive` is not specified, then `root_path` refers to the path of the h5 file on disk. """ self.root_path = root_path self.mode = mode self.archive = archive self.io_file = None if self.archive: if self.mode == "w": self.io_file = io.BytesIO() else: self.io_file = self.archive.open(self.root_path, "r") self.h5_file = h5py.File(self.io_file, mode=self.mode) else: self.h5_file = h5py.File(root_path, mode=self.mode) def make(self, path): if not path: return self.h5_file.create_group("vars") return self.h5_file.create_group(path).create_group("vars") def get(self, path): if not path: return self.h5_file["vars"] if path in self.h5_file and "vars" in self.h5_file[path]: return self.h5_file[path]["vars"] return {} def close(self): self.h5_file.close() if self.mode == "w" and self.archive: self.archive.writestr(self.root_path, self.io_file.getvalue()) if self.io_file: self.io_file.close() class NpzIOStore: def __init__(self, root_path, archive=None, mode="r"): """Numerical variable store backed by NumPy.savez/load. If `archive` is specified, then `root_path` refers to the filename inside the archive. If `archive` is not specified, then `root_path` refers to the path of the npz file on disk. """ self.root_path = root_path self.mode = mode self.archive = archive if mode == "w": self.contents = {} else: if self.archive: self.f = archive.open(root_path, mode="r") else: self.f = open(root_path, mode="rb") self.contents = np.load(self.f, allow_pickle=True) def make(self, path): if not path: self.contents["__root__"] = {} return self.contents["__root__"] self.contents[path] = {} return self.contents[path] def get(self, path): if not path: if "__root__" in self.contents: return dict(self.contents["__root__"]) return {} if path in self.contents: return self.contents[path].tolist() return {} def close(self): if self.mode == "w": if self.archive: self.f = self.archive.open( self.root_path, mode="w", force_zip64=True ) else: self.f = open(self.root_path, mode="wb") np.savez(self.f, **self.contents) self.f.close() def get_temp_dir(): temp_dir = tempfile.mkdtemp() testfile = tempfile.TemporaryFile(dir=temp_dir) testfile.close() return temp_dir def _is_keras_trackable(obj): from tf_keras.metrics import base_metric # To avoid circular import return isinstance( obj, ( base_layer.Layer, optimizer.Optimizer, base_metric.Metric, losses.Loss, ), ) def saving_v3_enabled(): return getattr(_SAVING_V3_ENABLED, "value", True) # Some debugging utilities. def _print_h5_file(h5_file, prefix="", action=None): if not prefix: print(f"Keras weights file ({h5_file}) {action}:") if not hasattr(h5_file, "keys"): return for key in h5_file.keys(): print(f"...{prefix}{key}") _print_h5_file(h5_file[key], prefix=prefix + "...") def _print_zip_file(zipfile, action): io_utils.print_msg(f"Keras model archive {action}:") # Same as `ZipFile.printdir()` except for using Keras' printing utility. io_utils.print_msg( "%-46s %19s %12s" % ("File Name", "Modified ", "Size") ) for zinfo in zipfile.filelist: date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] io_utils.print_msg( "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size) )
tf-keras/tf_keras/saving/saving_lib.py/0
{ "file_path": "tf-keras/tf_keras/saving/saving_lib.py", "repo_id": "tf-keras", "token_count": 11633 }
189
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for custom training loops.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils class LayerWithLosses(keras.layers.Layer): def build(self, input_shape): self.v = self.add_weight( name="hey", shape=(), initializer="ones", regularizer=keras.regularizers.l1(100), ) def call(self, inputs): self.add_loss(tf.reduce_sum(inputs)) return self.v * inputs class LayerWithMetrics(keras.layers.Layer): def build(self, input_shape): self.mean = keras.metrics.Mean(name="mean_object") def call(self, inputs): self.add_metric( tf.reduce_mean(inputs), name="mean_tensor", aggregation="mean" ) self.add_metric(self.mean(inputs)) return inputs class LayerWithTrainingArg(keras.layers.Layer): def call(self, inputs, training=None): self.training = training if training: return inputs else: return 0.0 * inputs def add_loss_step(defun): optimizer = keras.optimizers.legacy.adam.Adam() model = test_utils.get_model_from_layers( [LayerWithLosses()], input_shape=(10,) ) def train_step(x): with tf.GradientTape() as tape: model(x) assert len(model.losses) == 2 loss = tf.reduce_sum(model.losses) gradients = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(gradients, model.trainable_weights)) return loss if defun: train_step = tf.function(train_step) x = tf.ones((10, 10)) return train_step(x) def batch_norm_step(defun): optimizer = keras.optimizers.legacy.adadelta.Adadelta() model = test_utils.get_model_from_layers( [ keras.layers.BatchNormalization(momentum=0.9), keras.layers.Dense( 1, kernel_initializer="zeros", activation="softmax" ), ], input_shape=(10,), ) def train_step(x, y): with tf.GradientTape() as tape: y_pred = model(x, training=True) loss = keras.losses.binary_crossentropy(y, y_pred) gradients = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(gradients, model.trainable_weights)) return loss, model(x, training=False) if defun: train_step = tf.function(train_step) x, y = tf.ones((10, 10)), tf.ones((10, 1)) return train_step(x, y) def add_metric_step(defun): optimizer = keras.optimizers.legacy.rmsprop.RMSprop() model = test_utils.get_model_from_layers( [ LayerWithMetrics(), keras.layers.Dense( 1, kernel_initializer="zeros", activation="softmax" ), ], input_shape=(10,), ) def train_step(x, y): with tf.GradientTape() as tape: y_pred_1 = model(x) y_pred_2 = model(2 * x) y_pred = y_pred_1 + y_pred_2 loss = keras.losses.mean_squared_error(y, y_pred) gradients = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(gradients, model.trainable_weights)) assert len(model.metrics) == 2 return [m.result() for m in model.metrics] if defun: train_step = tf.function(train_step) x, y = tf.ones((10, 10)), tf.zeros((10, 1)) metrics = train_step(x, y) assert np.allclose(metrics[0], 1.5) assert np.allclose(metrics[1], 1.5) return metrics @test_combinations.run_with_all_model_types class CustomTrainingLoopTest(test_combinations.TestCase): @parameterized.named_parameters( ("add_loss_step", add_loss_step), ("add_metric_step", add_metric_step), ("batch_norm_step", batch_norm_step), ) def test_eager_and_tf_function(self, train_step): eager_result = train_step(defun=False) fn_result = train_step(defun=True) self.assertAllClose(eager_result, fn_result) @parameterized.named_parameters(("eager", False), ("defun", True)) def test_training_arg_propagation(self, defun): model = test_utils.get_model_from_layers( [LayerWithTrainingArg()], input_shape=(1,) ) def train_step(x): return model(x), model(x, training=False), model(x, training=True) if defun: train_step = tf.function(train_step) x = tf.ones((1, 1)) results = train_step(x) self.assertAllClose(results[0], tf.zeros((1, 1))) self.assertAllClose(results[1], tf.zeros((1, 1))) self.assertAllClose(results[2], tf.ones((1, 1))) @parameterized.named_parameters(("eager", False), ("defun", True)) def test_learning_phase_propagation(self, defun): class MyModel(keras.layers.Layer): def __init__(self): super().__init__() self.layer = LayerWithTrainingArg() def call(self, inputs): return self.layer(inputs) model = MyModel() def train_step(x): no_learning_phase_out = model(x) self.assertFalse(model.layer.training) with keras.backend.learning_phase_scope(0): inf_learning_phase_out = model(x) self.assertEqual(model.layer.training, 0) with keras.backend.learning_phase_scope(1): train_learning_phase_out = model(x) self.assertEqual(model.layer.training, 1) return [ no_learning_phase_out, inf_learning_phase_out, train_learning_phase_out, ] if defun: train_step = tf.function(train_step) x = tf.ones((1, 1)) results = train_step(x) self.assertAllClose(results[0], tf.zeros((1, 1))) self.assertAllClose(results[1], tf.zeros((1, 1))) self.assertAllClose(results[2], tf.ones((1, 1))) @parameterized.named_parameters(("eager", False), ("defun", True)) def test_training_arg_priorities(self, defun): class MyModel(keras.layers.Layer): def __init__(self): super().__init__() self.layer = LayerWithTrainingArg() def call(self, inputs, training=False): return self.layer(inputs) model = MyModel() def train_step(x): explicit_out = model(x, training=True) default_out = model(x) with keras.backend.learning_phase_scope(1): parent_out = model(x, training=False) lr_out = model(x) return [explicit_out, default_out, parent_out, lr_out] if defun: train_step = tf.function(train_step) x = tf.ones((1, 1)) results = train_step(x) self.assertAllClose(results[0], tf.ones((1, 1))) self.assertAllClose(results[1], tf.zeros((1, 1))) self.assertAllClose(results[2], tf.zeros((1, 1))) self.assertAllClose(results[3], tf.ones((1, 1))) if __name__ == "__main__": tf.compat.v1.enable_eager_execution() tf.test.main()
tf-keras/tf_keras/tests/custom_training_loop_test.py/0
{ "file_path": "tf-keras/tf_keras/tests/custom_training_loop_test.py", "repo_id": "tf-keras", "token_count": 3638 }
190
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests temporal sample weights correctness using TF-Keras model.""" import numpy as np import tensorflow.compat.v2 as tf from tf_keras import layers from tf_keras import metrics from tf_keras.optimizers import legacy as optimizer_legacy from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils class Bias(layers.Layer): """Layer that add a bias to its inputs.""" def build(self, input_shape): self.bias = self.add_weight("bias", (1,), initializer="zeros") def call(self, inputs): return inputs + self.bias def compute_output_shape(self, input_shape): return input_shape def get_multi_io_temporal_model(): timesteps = 2 inp_1 = layers.Input(shape=(1,), name="input_1") inp_2 = layers.Input(shape=(1,), name="input_2") x = layers.RepeatVector(timesteps) out_1 = layers.TimeDistributed(Bias(), name="output_1") out_2 = layers.TimeDistributed(Bias(), name="output_2") branch_a = [inp_1, x, out_1] branch_b = [inp_2, x, out_2] return test_utils.get_multi_io_model(branch_a, branch_b) def get_compiled_multi_io_model_temporal(sample_weight_mode): model = get_multi_io_temporal_model() model.compile( optimizer=optimizer_legacy.gradient_descent.SGD(0.1), loss="mae", metrics=[metrics.MeanAbsoluteError(name="mae")], weighted_metrics=[metrics.MeanAbsoluteError(name="mae_2")], sample_weight_mode=sample_weight_mode, run_eagerly=test_utils.should_run_eagerly(), ) return model def run_with_different_sample_weight_mode_inputs(fn, partial_sw=True): """Executes the given function with different sample weight mode inputs. Args: fn: Training or eval function to execute. partial_sw: Boolean flag to indicate whether temporal sample weight mode should be set partially just for one output. """ model = get_compiled_multi_io_model_temporal(sample_weight_mode="temporal") fn(model) model = get_compiled_multi_io_model_temporal( sample_weight_mode=["temporal", "temporal"] ) fn(model) model = get_compiled_multi_io_model_temporal( sample_weight_mode={"output_1": "temporal", "output_2": "temporal"} ) fn(model) if partial_sw: model = get_compiled_multi_io_model_temporal( sample_weight_mode=[None, "temporal"] ) fn(model) # TODO(b/129700800): Enable after bug is fixed. # model = get_compiled_multi_io_model_temporal(sample_weight_mode={ # 'output_2': 'temporal' # }) # fn(model) @test_combinations.run_with_all_model_types(exclude_models=["sequential"]) @test_combinations.run_all_keras_modes(always_skip_v1=True) class TestMetricsCorrectnessMultiIOTemporal(test_combinations.TestCase): def custom_generator_multi_io_temporal(self, sample_weights=None): """Generator for getting data for temporal multi io model. Args: sample_weights: List of sample_weights. Yields: Tuple of inputs, label, sample weights data. """ batch_size = 3 num_samples = 3 iteration = 0 while True: batch_index = iteration * batch_size % num_samples iteration += 1 start = batch_index end = start + batch_size x = [self.x[start:end], self.x[start:end]] y = [self.y1[start:end], self.y2[start:end]] if sample_weights: sw = tf.nest.map_structure( lambda w: w[start:end], sample_weights ) else: sw = None yield x, y, sw def setUp(self): super(TestMetricsCorrectnessMultiIOTemporal, self).setUp() self.x = np.asarray([[0.0], [1.0], [2.0]]) self.y1 = np.asarray([[[0.5], [1.0]], [[2.0], [2.5]], [[3.5], [2.5]]]) self.y2 = np.asarray([[[0.5], [1.5]], [[2.0], [1.5]], [[3.5], [3.0]]]) # Without weights: # Epoch 1 - bias = 0 # y_pred_1 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]] # y_pred_2 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]] # mae (y1 - y_pred_1) = [[[.5], [1.]], [[1.], [1.5]], [[1.5], [.5]]] # mae = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1 # mae_2 (y2 - y_pred_2) = [[[.5], [1.5]], [[1.], [.5]], [[1.5], [1.]]] # mae_2 = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1 # Epoch 2 - bias = 0.1 (2/2 * 0.1) # y_pred_1 = [[[.1], [.1]], [[1.1], [1.1]], [[2.1], [2.1]]] # y_pred_2 = [[[.1], [.1]], [[1.1], [1.1]], [[2.1], [2.1]]] # mae (y1 - y_pred_1) = [[[.4], [.9]], [[.9], [1.4]], [[1.4], [.4]]] # mae = [[2.7/3, 2.7/3]] = [[0.9, 0.9]] = 1.8/2 = 0.9 # mae_2 (y2 - y_pred_2) = [[[.4], [1.4]], [[.9], [.4]], [[1.4], [.9]]] # mae_2 = [[2.7/3, 2.7/3]] = [[0.9, 0.9]] = 1.8/2 = # 0.9 self.expected_fit_result = { "output_1_mae": [1, 0.9], "output_2_mae": [1, 0.9], "output_1_mae_2": [1, 0.9], "output_2_mae_2": [1, 0.9], "loss": [2.0, 1.8], "output_1_loss": [1, 0.9], "output_2_loss": [1, 0.9], } self.sample_weight_1 = np.asarray([[0.5, 2.0], [0.5, 2.0], [0.5, 2.0]]) self.sample_weight_2 = np.asarray([[2.0, 0.5], [2.0, 0.5], [2.0, 0.5]]) # With weights: # Epoch 1 # y_pred_1 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]] # y_pred_2 = [[[0.], [0.]], [[1.], [1.]], [[2.], [2.]]] # mae (y1 - y_pred_1) = [[[.5], [1.]], [[1.], [1.5]], [[1.5], [.5]]] # with weights = [[[.5 * .5], [1 * 2]], # [[1 * .5], [1.5 * 2]], # [[1.5 * .5], [.5 * 2]]] # mae (w/o weights) = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1 # mae (weighted mean) = [[1.5/1.5, 6/6]] = [[1, 1]] = 2/2 = 1 # mae (sum over bs) = [[1.5/3, 6/3]] = [[.5, 2]] = 2.5/2 = 1.25 # mae_2 (y2 - y_pred_2) = [[[.5], [1.5]], [[1.], [.5]], [[1.5], [1.]]] # with weights = [[[.5 * 2], [1.5 * .5]], # [[1. * 2], [.5 * .5]], # [[1.5 * 2], [1. * .5]]] # mae_2 (w/o weights) = [[3/3, 3/3]] = [[1, 1]] = 2/2 = 1 # mae_2 (weighted mean) = [[6/6, 1.5/1.5]] = [[1, 1]] = 2/2 = 1 # mae_2 (sum over bs) = [[6/3, 1.5/3]] = [[2, .5]] = 2.5/2 = 1.25 # Epoch 2 - bias = 0.125 (2.5/2 * 0.1) # y_pred_1 = [[[0.125], [0.125]], [[1.125], [1.125]], [[2.125], # [2.125]]] # y_pred_2 = [[[0.125], [0.125]], [[1.125], [1.125]], [[2.125], # [2.125]]] # mae (y1 - y_pred_1) = [[[.375], [.875]], # [[.875], [1.375]], # [[1.375], [.375]]] # with weights = [[[.375 * .5], [.875 * 2.]], # [[.875 * .5], [1.375 * 2.]], # [[1.375 * .5], [.375 * 2.]]] # mae (w/o weights) = [[2.625/3, 2.625/3]] = (.875+.875)/2 = .875 # mae (weighted mean) = [[1.3125/1.5, 5.25/6]] = (.875+.875)/2 = .875 # mae (sum over bs) = [[1.3125/3, 5.25/3]] = (0.4375+1.75)/2 = # 1.09375 # mae_2 (y2 - y_pred_2) = [[[.375], [1.375]], # [[.875], [.375]], # [[1.375], [.875]]] # with weights = [[[.375 * 2.], [1.375 * .5]], # [[.875 * 2.], [.375 * .5]], # [[1.375 * 2.], [.875 * .5]]] # mae_2 (w/o weights) = [[2.625/3, 2.625/3]] = (.875+.875)/2 = .875 # mae_2 (weighted mean) = [[5.25/6, 1.3125/1.5]] = (.875+.875)/2 = # .875 # mae_2 (sum over bs) = [[5.25/3, 1.3125/3]] = (1.75+0.4375)/2 = # 1.09375 self.expected_fit_result_with_weights = { "output_1_mae": [1, 0.875], "output_2_mae": [1, 0.875], "output_1_mae_2": [1, 0.875], "output_2_mae_2": [1, 0.875], "loss": [2.5, 2.1875], "output_1_loss": [1.25, 1.09375], "output_2_loss": [1.25, 1.09375], } self.expected_fit_result_with_weights_output_2 = { "output_1_mae": [1.0, 0.9], "output_2_mae": [1, 0.875], "output_1_mae_2": [1.0, 0.9], "output_2_mae_2": [1.0, 0.875], "loss": [2.25, 1.99375], "output_1_loss": [1.0, 0.9], "output_2_loss": [1.25, 1.09375], } # In the order: 'loss', 'output_1_loss', 'output_2_loss', # 'output_1_mae', 'output_1_mae_2', # 'output_2_mae', 'output_2_mae_2' self.expected_batch_result_with_weights = [ 2.1875, 1.09375, 1.09375, 0.875, 0.875, 0.875, 0.875, ] self.expected_batch_result_with_weights_output_2 = [ 1.99375, 0.9, 1.09375, 0.9, 0.9, 0.875, 0.875, ] self.expected_batch_result = [1.8, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9] def test_fit(self): def _train_and_assert(model): history = model.fit( [self.x, self.x], [self.y1, self.y2], batch_size=3, epochs=2, shuffle=False, ) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) run_with_different_sample_weight_mode_inputs(_train_and_assert) def test_fit_with_sample_weight(self): def _train_and_assert(model): history = model.fit( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, batch_size=3, epochs=2, shuffle=False, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) run_with_different_sample_weight_mode_inputs( _train_and_assert, partial_sw=False ) def test_fit_with_partial_sample_weight(self): def _train_and_assert(model): history = model.fit( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, batch_size=3, epochs=2, shuffle=False, ) for ( key, value, ) in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) run_with_different_sample_weight_mode_inputs(_train_and_assert) def test_eval(self): def _eval_and_assert(model): model.train_on_batch([self.x, self.x], [self.y1, self.y2]) eval_result = model.evaluate( [self.x, self.x], [self.y1, self.y2], batch_size=3 ) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) run_with_different_sample_weight_mode_inputs(_eval_and_assert) def test_eval_with_sample_weight(self): def _eval_and_assert(model): model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) eval_result = model.evaluate( [self.x, self.x], [self.y1, self.y2], batch_size=3, sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights, 1e-3 ) run_with_different_sample_weight_mode_inputs( _eval_and_assert, partial_sw=False ) def test_eval_with_partial_sample_weight(self): def _eval_and_assert(model): model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, ) eval_result = model.evaluate( [self.x, self.x], [self.y1, self.y2], batch_size=3, sample_weight={ "output_2": self.sample_weight_2, }, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights_output_2, 1e-3, ) run_with_different_sample_weight_mode_inputs(_eval_and_assert) def test_train_on_batch(self): def _train_and_assert(model): for _ in range(2): result = model.train_on_batch( [self.x, self.x], [self.y1, self.y2] ) self.assertAllClose(result, self.expected_batch_result, 1e-3) run_with_different_sample_weight_mode_inputs(_train_and_assert) def test_train_on_batch_with_sample_weight(self): def _train_and_assert(model): for _ in range(2): result = model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights, 1e-3 ) run_with_different_sample_weight_mode_inputs( _train_and_assert, partial_sw=False ) def test_train_on_batch_with_partial_sample_weight(self): def _train_and_assert(model): for _ in range(2): result = model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights_output_2, 1e-3 ) run_with_different_sample_weight_mode_inputs(_train_and_assert) def test_test_on_batch(self): def _test_and_assert(model): model.train_on_batch([self.x, self.x], [self.y1, self.y2]) result = model.test_on_batch([self.x, self.x], [self.y1, self.y2]) self.assertAllClose(result, self.expected_batch_result, 1e-3) run_with_different_sample_weight_mode_inputs(_test_and_assert) def test_test_on_batch_with_sample_weight(self): def _test_and_assert(model): model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) result = model.test_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights, 1e-3 ) run_with_different_sample_weight_mode_inputs( _test_and_assert, partial_sw=False ) def test_test_on_batch_with_partial_sample_weight(self): def _test_and_assert(model): model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, ) result = model.test_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, ) self.assertAllClose( result, self.expected_batch_result_with_weights_output_2, 1e-3 ) run_with_different_sample_weight_mode_inputs(_test_and_assert) def test_fit_generator(self): def _train_and_assert(model): history = model.fit_generator( self.custom_generator_multi_io_temporal(), steps_per_epoch=1, epochs=2, ) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) run_with_different_sample_weight_mode_inputs(_train_and_assert) def test_fit_generator_with_sample_weight(self): def _train_and_assert(model): history = model.fit_generator( self.custom_generator_multi_io_temporal( sample_weights=[self.sample_weight_1, self.sample_weight_2] ), steps_per_epoch=1, epochs=2, ) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) run_with_different_sample_weight_mode_inputs( _train_and_assert, partial_sw=False ) def test_fit_generator_with_partial_sample_weight(self): def _train_and_assert(model): history = model.fit_generator( self.custom_generator_multi_io_temporal( sample_weights={"output_2": self.sample_weight_2} ), steps_per_epoch=1, epochs=2, ) for ( key, value, ) in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) run_with_different_sample_weight_mode_inputs(_train_and_assert) def test_eval_generator(self): def _test_and_assert(model): model.train_on_batch([self.x, self.x], [self.y1, self.y2]) eval_result = model.evaluate_generator( self.custom_generator_multi_io_temporal(), steps=1 ) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) run_with_different_sample_weight_mode_inputs(_test_and_assert) def test_eval_generator_with_sample_weight(self): def _test_and_assert(model): model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_1": self.sample_weight_1, "output_2": self.sample_weight_2, }, ) eval_result = model.evaluate_generator( self.custom_generator_multi_io_temporal( sample_weights=[self.sample_weight_1, self.sample_weight_2] ), steps=2, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights, 1e-3 ) run_with_different_sample_weight_mode_inputs( _test_and_assert, partial_sw=False ) def test_eval_generator_with_partial_sample_weight(self): def _test_and_assert(model): model.train_on_batch( [self.x, self.x], [self.y1, self.y2], sample_weight={ "output_2": self.sample_weight_2, }, ) eval_result = model.evaluate_generator( self.custom_generator_multi_io_temporal( sample_weights={"output_2": self.sample_weight_2} ), steps=2, ) self.assertAllClose( eval_result, self.expected_batch_result_with_weights_output_2, 1e-3, ) run_with_different_sample_weight_mode_inputs(_test_and_assert) def test_error_on_fit_with_class_weight(self): def _train_and_assert(model): with self.assertRaises(ValueError): model.fit( [self.x, self.x], [self.y1, self.y2], class_weight={"output_1": {0.5: 0.5, 2.0: 0.5, 3.5: 0.5}}, batch_size=3, epochs=2, shuffle=False, ) run_with_different_sample_weight_mode_inputs(_train_and_assert) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/tests/temporal_sample_weights_correctness_test.py/0
{ "file_path": "tf-keras/tf_keras/tests/temporal_sample_weights_correctness_test.py", "repo_id": "tf-keras", "token_count": 12449 }
191
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for metrics_utils.""" import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from tf_keras import backend from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils from tf_keras.utils import metrics_utils @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class RaggedSizeOpTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters( [ {"x_list": [1], "y_list": [2]}, {"x_list": [1, 2], "y_list": [2, 3]}, {"x_list": [1, 2, 4], "y_list": [2, 3, 5]}, {"x_list": [[1, 2], [3, 4]], "y_list": [[2, 3], [5, 6]]}, ] ) def test_passing_dense_tensors(self, x_list, y_list): x = tf.constant(x_list) y = tf.constant(y_list) [x, y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values( [x, y] ) x.shape.assert_is_compatible_with(y.shape) @parameterized.parameters( [ { "x_list": [1], }, { "x_list": [1, 2], }, { "x_list": [1, 2, 4], }, { "x_list": [[1, 2], [3, 4]], }, ] ) def test_passing_one_dense_tensor(self, x_list): x = tf.constant(x_list) [x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x]) @parameterized.parameters( [ {"x_list": [1], "y_list": [2]}, {"x_list": [1, 2], "y_list": [2, 3]}, {"x_list": [1, 2, 4], "y_list": [2, 3, 5]}, {"x_list": [[1, 2], [3, 4]], "y_list": [[2, 3], [5, 6]]}, {"x_list": [[1, 2], [3, 4], [1]], "y_list": [[2, 3], [5, 6], [3]]}, {"x_list": [[1, 2], [], [1]], "y_list": [[2, 3], [], [3]]}, ] ) def test_passing_both_ragged(self, x_list, y_list): x = tf.ragged.constant(x_list) y = tf.ragged.constant(y_list) [x, y], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values( [x, y] ) x.shape.assert_is_compatible_with(y.shape) @parameterized.parameters( [ { "x_list": [1], }, { "x_list": [1, 2], }, { "x_list": [1, 2, 4], }, { "x_list": [[1, 2], [3, 4]], }, { "x_list": [[1, 2], [3, 4], [1]], }, { "x_list": [[1, 2], [], [1]], }, ] ) def test_passing_one_ragged(self, x_list): x = tf.ragged.constant(x_list) [x], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values([x]) @parameterized.parameters( [ {"x_list": [1], "y_list": [2], "mask_list": [0]}, {"x_list": [1, 2], "y_list": [2, 3], "mask_list": [0, 1]}, {"x_list": [1, 2, 4], "y_list": [2, 3, 5], "mask_list": [1, 1, 1]}, { "x_list": [[1, 2], [3, 4]], "y_list": [[2, 3], [5, 6]], "mask_list": [[1, 1], [0, 1]], }, { "x_list": [[1, 2], [3, 4], [1]], "y_list": [[2, 3], [5, 6], [3]], "mask_list": [[1, 1], [0, 0], [1]], }, { "x_list": [[1, 2], [], [1]], "y_list": [[2, 3], [], [3]], "mask_list": [[1, 1], [], [0]], }, ] ) def test_passing_both_ragged_with_mask(self, x_list, y_list, mask_list): x = tf.ragged.constant(x_list) y = tf.ragged.constant(y_list) mask = tf.ragged.constant(mask_list) [ x, y, ], mask = metrics_utils.ragged_assert_compatible_and_get_flat_values( [x, y], mask ) x.shape.assert_is_compatible_with(y.shape) y.shape.assert_is_compatible_with(mask.shape) @parameterized.parameters( [ {"x_list": [1], "mask_list": [0]}, {"x_list": [1, 2], "mask_list": [0, 1]}, {"x_list": [1, 2, 4], "mask_list": [1, 1, 1]}, {"x_list": [[1, 2], [3, 4]], "mask_list": [[1, 1], [0, 1]]}, { "x_list": [[1, 2], [3, 4], [1]], "mask_list": [[1, 1], [0, 0], [1]], }, {"x_list": [[1, 2], [], [1]], "mask_list": [[1, 1], [], [0]]}, ] ) def test_passing_one_ragged_with_mask(self, x_list, mask_list): x = tf.ragged.constant(x_list) mask = tf.ragged.constant(mask_list) [x], mask = metrics_utils.ragged_assert_compatible_and_get_flat_values( [x], mask ) x.shape.assert_is_compatible_with(mask.shape) @parameterized.parameters( [ {"x_list": [[[1, 3]]], "y_list": [[2, 3]]}, ] ) def test_failing_different_ragged_and_dense_ranks(self, x_list, y_list): x = tf.ragged.constant(x_list) y = tf.ragged.constant(y_list) with self.assertRaises(ValueError): [ x, y, ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values( [x, y] ) @parameterized.parameters( [ {"x_list": [[[1, 3]]], "y_list": [[[2, 3]]], "mask_list": [[0, 1]]}, ] ) def test_failing_different_mask_ranks(self, x_list, y_list, mask_list): x = tf.ragged.constant(x_list) y = tf.ragged.constant(y_list) mask = tf.ragged.constant(mask_list) with self.assertRaises(ValueError): [ x, y, ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values( [x, y], mask ) # we do not support such cases that ragged_ranks are different but overall # dimension shapes and sizes are identical due to adding too much # performance overheads to the overall use cases. def test_failing_different_ragged_ranks(self): dt = tf.constant([[[1, 2]]]) # adding a ragged dimension x = tf.RaggedTensor.from_row_splits(dt, row_splits=[0, 1]) y = tf.ragged.constant([[[[1, 2]]]]) with self.assertRaises(ValueError): [ x, y, ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values( [x, y] ) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"])) class FilterTopKTest(tf.test.TestCase, parameterized.TestCase): def test_one_dimensional(self): x = tf.constant([0.3, 0.1, 0.2, -0.5, 42.0]) top_1 = self.evaluate(metrics_utils._filter_top_k(x=x, k=1)) top_2 = self.evaluate(metrics_utils._filter_top_k(x=x, k=2)) top_3 = self.evaluate(metrics_utils._filter_top_k(x=x, k=3)) self.assertAllClose( top_1, [ metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF, 42.0, ], ) self.assertAllClose( top_2, [ 0.3, metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF, 42.0, ], ) self.assertAllClose( top_3, [0.3, metrics_utils.NEG_INF, 0.2, metrics_utils.NEG_INF, 42.0], ) def test_three_dimensional(self): x = tf.constant( [ [[0.3, 0.1, 0.2], [-0.3, -0.2, -0.1]], [[5.0, 0.2, 42.0], [-0.3, -0.6, -0.99]], ] ) top_2 = self.evaluate(metrics_utils._filter_top_k(x=x, k=2)) self.assertAllClose( top_2, [ [ [0.3, metrics_utils.NEG_INF, 0.2], [metrics_utils.NEG_INF, -0.2, -0.1], ], [ [5.0, metrics_utils.NEG_INF, 42.0], [-0.3, -0.6, metrics_utils.NEG_INF], ], ], ) def test_handles_dynamic_shapes(self): # See b/150281686. # GOOGLE_INTERNAL def _identity(x): return x def _filter_top_k(x): # This loses the static shape. x = tf.numpy_function(_identity, (x,), tf.float32) return metrics_utils._filter_top_k(x=x, k=2) x = tf.constant([0.3, 0.1, 0.2, -0.5, 42.0]) top_2 = self.evaluate(_filter_top_k(x)) self.assertAllClose( top_2, [ 0.3, metrics_utils.NEG_INF, metrics_utils.NEG_INF, metrics_utils.NEG_INF, 42.0, ], ) class MatchesMethodsTest(tf.test.TestCase, parameterized.TestCase): def test_sparse_categorical_matches(self): matches_method = metrics_utils.sparse_categorical_matches # Test return tensor is type float y_true = tf.constant(np.random.randint(0, 7, (6,))) y_pred = tf.constant(np.random.random((6, 7))) self.assertEqual(matches_method(y_true, y_pred).dtype, backend.floatx()) # Tests that resulting Tensor always has same shape as y_true. Tests # from 1 dim to 4 dims dims = [] for _ in range(4): dims.append(np.random.randint(1, 7)) y_true = tf.constant(np.random.randint(0, 7, dims)) y_pred = tf.constant(np.random.random(dims + [3])) self.assertEqual(matches_method(y_true, y_pred).shape, y_true.shape) # Test correctness if the shape of y_true is (num_samples,) y_true = tf.constant([1.0, 0.0, 0.0, 0.0]) y_pred = tf.constant([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]]) self.assertAllEqual( matches_method(y_true, y_pred), [0.0, 1.0, 1.0, 1.0] ) # Test correctness if the shape of y_true is (num_samples, 1) y_true = tf.constant([[1.0], [0.0], [0.0], [0.0]]) y_pred = tf.constant([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]]) self.assertAllEqual( matches_method(y_true, y_pred), [[0.0], [1.0], [1.0], [1.0]] ) # Test correctness if the shape of y_true is (batch_size, seq_length) # and y_pred is (batch_size, seq_length, num_classes) y_pred = tf.constant( [ [[0.2, 0.3, 0.1], [0.1, 0.2, 0.7]], [[0.3, 0.2, 0.1], [0.7, 0.2, 0.1]], ] ) y_true = tf.constant([[1, 0], [1, 0]]) self.assertAllEqual( matches_method(y_true, y_pred), [[1.0, 0.0], [0.0, 1.0]] ) def test_sparse_top_k_categorical_matches(self): matches_method = metrics_utils.sparse_top_k_categorical_matches # Test return tensor is type float y_true = tf.constant(np.random.randint(0, 7, (6,))) y_pred = tf.constant(np.random.random((6, 7)), dtype=tf.float32) self.assertEqual( matches_method(y_true, y_pred, 1).dtype, backend.floatx() ) # Tests that resulting Tensor always has same shape as y_true. Tests # from 1 dim to 4 dims dims = [] for _ in range(4): dims.append(np.random.randint(1, 7)) y_true = tf.constant(np.random.randint(0, 7, dims)) y_pred = tf.constant(np.random.random(dims + [3]), dtype=tf.float32) self.assertEqual( matches_method(y_true, y_pred, 1).shape, y_true.shape ) # Test correctness if the shape of y_true is (num_samples,) for k = # 1,2,3 y_true = tf.constant([1.0, 0.0, 0.0, 0.0]) y_pred = tf.constant( [[0.7, 0.2, 0.1], [0.5, 0.3, 0.2], [0.6, 0.3, 0.1], [0.0, 0.1, 0.9]] ) self.assertAllEqual( matches_method(y_true, y_pred, 1), [0.0, 1.0, 1.0, 0.0] ) self.assertAllEqual( matches_method(y_true, y_pred, 2), [1.0, 1.0, 1.0, 0.0] ) self.assertAllEqual( matches_method(y_true, y_pred, 3), [1.0, 1.0, 1.0, 1.0] ) # Test correctness if the shape of y_true is (num_samples, 1) # for k = 1,2,3 y_true = tf.constant([[1.0], [0.0], [0.0], [0.0]]) y_pred = tf.constant( [[0.7, 0.2, 0.1], [0.5, 0.3, 0.2], [0.6, 0.3, 0.1], [0.0, 0.1, 0.9]] ) self.assertAllEqual( matches_method(y_true, y_pred, 1), [[0.0], [1.0], [1.0], [0.0]] ) self.assertAllEqual( matches_method(y_true, y_pred, 2), [[1.0], [1.0], [1.0], [0.0]] ) self.assertAllEqual( matches_method(y_true, y_pred, 3), [[1.0], [1.0], [1.0], [1.0]] ) # Test correctness if the shape of y_true is (batch_size, seq_length) # and y_pred is (batch_size, seq_length, num_classes) for k = 1,2,3 y_pred = tf.constant( [ [[0.2, 0.3, 0.1], [0.1, 0.2, 0.7]], [[0.3, 0.2, 0.1], [0.7, 0.2, 0.1]], ] ) y_true = tf.constant([[1, 0], [1, 0]]) self.assertAllEqual( matches_method(y_true, y_pred, 1), [[1.0, 0.0], [0.0, 1.0]] ) self.assertAllEqual( matches_method(y_true, y_pred, 2), [[1.0, 0.0], [1.0, 1.0]] ) self.assertAllEqual( matches_method(y_true, y_pred, 3), [[1.0, 1.0], [1.0, 1.0]] ) def test_binary_matches(self): matches_method = metrics_utils.binary_matches # Test return tensor is type float y_true = tf.constant(np.random.random((6, 7))) y_pred = tf.constant(np.random.random((6, 7))) self.assertEqual( matches_method(y_true, y_pred, 0.5).dtype, backend.floatx() ) # Tests that resulting Tensor always has same shape as y_true. Tests # from 1 dim to 4 dims. dims = [] for _ in range(4): dims.append(np.random.randint(1, 7)) y_true = y_pred = tf.constant(np.random.random(dims)) self.assertEqual( matches_method(y_true, y_pred, 0.0).shape, y_true.shape ) # Testing for correctness shape (num_samples, 1) y_true = tf.constant([[1.0], [0.0], [1.0], [1.0]]) y_pred = tf.constant([[0.75], [0.2], [0.2], [0.75]]) self.assertAllEqual( matches_method(y_true, y_pred, 0.5), [[1.0], [1.0], [0.0], [1.0]] ) # Testing for correctness shape (num_samples,) y_true = tf.constant([1.0, 0.0, 1.0, 1.0]) y_pred = tf.constant([0.75, 0.2, 0.2, 0.75]) self.assertAllEqual( matches_method(y_true, y_pred, 0.5), [1.0, 1.0, 0.0, 1.0] ) # Testing for correctness batches of sequences # shape (num_samples, seq_len) y_true = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]]) y_pred = tf.constant( [[0.75, 0.2], [0.2, 0.75], [0.2, 0.75], [0.75, 0.2]] ) self.assertAllEqual( matches_method(y_true, y_pred, 0.5), [[1.0, 1.0], [1.0, 1.0], [0.0, 0.0], [1.0, 1.0]], ) @test_utils.run_v2_only class UpdateConfusionMatrixVarTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): self.tp = metrics_utils.ConfusionMatrix.TRUE_POSITIVES self.tn = metrics_utils.ConfusionMatrix.TRUE_NEGATIVES self.fp = metrics_utils.ConfusionMatrix.FALSE_POSITIVES self.fn = metrics_utils.ConfusionMatrix.FALSE_NEGATIVES self.variables_to_update = { self.tp: tf.Variable([0], dtype=tf.float32), self.tn: tf.Variable([0], dtype=tf.float32), self.fp: tf.Variable([0], dtype=tf.float32), self.fn: tf.Variable([0], dtype=tf.float32), } def test_without_sample_weight(self): y_true = tf.constant([[1, 1, 0], [0, 0, 1]]) y_pred = tf.constant([[0.8, 0.7, 0.1], [0.1, 0.6, 0.4]]) thresholds = [0.5] metrics_utils.update_confusion_matrix_variables( variables_to_update=self.variables_to_update, y_true=y_true, y_pred=y_pred, thresholds=thresholds, ) self.assertEqual(self.variables_to_update[self.tp].numpy()[0], 2) self.assertEqual(self.variables_to_update[self.tn].numpy()[0], 2) self.assertEqual(self.variables_to_update[self.fp].numpy()[0], 1) self.assertEqual(self.variables_to_update[self.fn].numpy()[0], 1) def test_with_sample_weight(self): y_true = tf.constant([[1, 1, 0], [0, 0, 1]]) y_pred = tf.constant([[0.8, 0.7, 0.1], [0.1, 0.6, 0.4]]) thresholds = [0.5] sample_weight = [2, 1] metrics_utils.update_confusion_matrix_variables( variables_to_update=self.variables_to_update, y_true=y_true, y_pred=y_pred, thresholds=thresholds, sample_weight=sample_weight, ) self.assertEqual(self.variables_to_update[self.tp].numpy()[0], 4) self.assertEqual(self.variables_to_update[self.tn].numpy()[0], 3) self.assertEqual(self.variables_to_update[self.fp].numpy()[0], 1) self.assertEqual(self.variables_to_update[self.fn].numpy()[0], 1) def test_with_class_id(self): y_true = tf.constant([[1, 1, 0], [0, 0, 1]]) y_pred = tf.constant([[0.8, 0.7, 0.1], [0.1, 0.6, 0.4]]) thresholds = [0.5] class_id = 2 metrics_utils.update_confusion_matrix_variables( variables_to_update=self.variables_to_update, y_true=y_true, y_pred=y_pred, thresholds=thresholds, class_id=class_id, ) self.assertEqual(self.variables_to_update[self.tp].numpy()[0], 0) self.assertEqual(self.variables_to_update[self.tn].numpy()[0], 1) self.assertEqual(self.variables_to_update[self.fp].numpy()[0], 0) self.assertEqual(self.variables_to_update[self.fn].numpy()[0], 1) def test_with_sample_weight_and_classid(self): y_true = tf.constant([[1, 1, 0], [0, 0, 1]]) y_pred = tf.constant([[0.8, 0.7, 0.1], [0.1, 0.6, 0.4]]) thresholds = [0.5] sample_weight = [2, 1] class_id = 2 metrics_utils.update_confusion_matrix_variables( variables_to_update=self.variables_to_update, y_true=y_true, y_pred=y_pred, thresholds=thresholds, sample_weight=sample_weight, class_id=class_id, ) self.assertEqual(self.variables_to_update[self.tp].numpy()[0], 0) self.assertEqual(self.variables_to_update[self.tn].numpy()[0], 2) self.assertEqual(self.variables_to_update[self.fp].numpy()[0], 0) self.assertEqual(self.variables_to_update[self.fn].numpy()[0], 1) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/utils/metrics_utils_test.py/0
{ "file_path": "tf-keras/tf_keras/utils/metrics_utils_test.py", "repo_id": "tf-keras", "token_count": 11043 }
192
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for timed_threads.""" import time import tensorflow.compat.v2 as tf from absl import logging import tf_keras as keras from tf_keras.testing_infra import test_combinations from tf_keras.testing_infra import test_utils from tf_keras.utils import timed_threads @test_utils.run_v2_only class TimedThreadTest(test_combinations.TestCase): def test_timed_thread_run(self): class LogThread(timed_threads.TimedThread): def on_interval(self): logging.info("Thread Run") log_thread = LogThread(interval=0.1) with self.assertLogs(level="INFO") as logs: log_thread.start() time.sleep(1) self.assertTrue(log_thread.is_alive()) log_thread.stop() self.assertIn("INFO:absl:Thread Run", logs.output) time.sleep(0.1) self.assertFalse(log_thread.is_alive()) def test_timed_thread_restart(self): # Verfiy that thread can be started and stopped multiple times. class LogThread(timed_threads.TimedThread): def on_interval(self): logging.info("Thread Run") log_thread = LogThread(interval=0.1) for _ in range(2): self.assertFalse(log_thread.is_alive()) with self.assertLogs(level="INFO") as logs: log_thread.start() time.sleep(1) self.assertTrue(log_thread.is_alive()) log_thread.stop() self.assertIn("INFO:absl:Thread Run", logs.output) time.sleep(0.1) self.assertFalse(log_thread.is_alive()) def test_timed_thread_running_warning(self): # Verfiy thread start warning if its already running class LogThread(timed_threads.TimedThread): def on_interval(self): logging.info("Thread Run") log_thread = LogThread(interval=0.1) self.assertFalse(log_thread.is_alive()) with self.assertLogs(level="INFO") as logs: log_thread.start() time.sleep(1) self.assertTrue(log_thread.is_alive()) self.assertIn("INFO:absl:Thread Run", logs.output) with self.assertLogs(level="WARNING") as logs: log_thread.start() self.assertIn( "WARNING:absl:Thread is already running.", logs.output ) self.assertTrue(log_thread.is_alive()) log_thread.stop() time.sleep(0.1) self.assertFalse(log_thread.is_alive()) def test_timed_thread_callback_model_fit(self): class LogThreadCallback( timed_threads.TimedThread, keras.callbacks.Callback ): def __init__(self, interval): self._epoch = 0 timed_threads.TimedThread.__init__(self, interval=interval) keras.callbacks.Callback.__init__(self) def on_interval(self): if self._epoch: # Verify that `model` is accessible. _ = self.model.optimizer.iterations.numpy() logging.info(f"Thread Run Epoch: {self._epoch}") def on_epoch_begin(self, epoch, logs=None): self._epoch = epoch time.sleep(1) x = tf.random.normal((32, 2)) y = tf.ones((32, 1), dtype=tf.float32) model = keras.Sequential([keras.layers.Dense(1)]) model.compile(loss="mse") with self.assertLogs(level="INFO") as logs, LogThreadCallback( interval=0.1 ) as log_thread_callback: self.assertIsNone(log_thread_callback.model) model.fit(x, y, epochs=2, callbacks=[log_thread_callback]) self.assertIsNotNone(log_thread_callback.model) self.assertIn("INFO:absl:Thread Run Epoch: 1", logs.output) if __name__ == "__main__": tf.test.main()
tf-keras/tf_keras/utils/timed_threads_test.py/0
{ "file_path": "tf-keras/tf_keras/utils/timed_threads_test.py", "repo_id": "tf-keras", "token_count": 2035 }
193
# Release v1.1.0 ## Breaking changes * This only affect you if you use `BertTokenizer` or `BertEncoder` in AutoKeras explicity. You are not affected if you only use `BertBlock`, `TextClassifier` or `TextRegressor`. Removed the AutoKeras implementation of `BertTokenizer` and `BertEncoder`. Use `keras-nlp` implementation instead. ## New features ## Bug fixes * Now also support `numpy>=1.24`.
autokeras/RELEASE.md/0
{ "file_path": "autokeras/RELEASE.md", "repo_id": "autokeras", "token_count": 134 }
0
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import keras_tuner import pytest import tensorflow as tf from tensorflow import keras from tensorflow import nest from autokeras import blocks from autokeras import test_utils def test_resnet_build_return_tensor(): block = blocks.ResNetBlock() outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_resnet_v1_return_tensor(): block = blocks.ResNetBlock(version="v1") outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_efficientnet_b0_return_tensor(): block = blocks.EfficientNetBlock(version="b0", pretrained=False) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_resnet_pretrained_build_return_tensor(): block = blocks.ResNetBlock(pretrained=True) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_resnet_pretrained_with_one_channel_input(): block = blocks.ResNetBlock(pretrained=True) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(28, 28, 1), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_resnet_pretrained_error_with_two_channels(): block = blocks.ResNetBlock(pretrained=True) with pytest.raises(ValueError) as info: block.build( keras_tuner.HyperParameters(), keras.Input(shape=(224, 224, 2), dtype=tf.float32), ) assert "When pretrained is set to True" in str(info.value) def test_resnet_deserialize_to_resnet(): serialized_block = blocks.serialize(blocks.ResNetBlock()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.ResNetBlock) def test_resnet_get_config_has_all_attributes(): block = blocks.ResNetBlock() config = block.get_config() assert test_utils.get_func_args(blocks.ResNetBlock.__init__).issubset( config.keys() ) def test_resnet_wrong_version_error(): with pytest.raises(ValueError) as info: blocks.ResNetBlock(version="abc") assert "Expect version to be" in str(info.value) def test_efficientnet_wrong_version_error(): with pytest.raises(ValueError) as info: blocks.EfficientNetBlock(version="abc") assert "Expect version to be" in str(info.value) def test_xception_build_return_tensor(): block = blocks.XceptionBlock() outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 2), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_xception_pretrained_build_return_tensor(): block = blocks.XceptionBlock(pretrained=True) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_xception_pretrained_with_one_channel_input(): block = blocks.XceptionBlock(pretrained=True) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(224, 224, 1), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_xception_pretrained_error_with_two_channels(): block = blocks.XceptionBlock(pretrained=True) with pytest.raises(ValueError) as info: block.build( keras_tuner.HyperParameters(), keras.Input(shape=(224, 224, 2), dtype=tf.float32), ) assert "When pretrained is set to True" in str(info.value) def test_xception_deserialize_to_xception(): serialized_block = blocks.serialize(blocks.XceptionBlock()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.XceptionBlock) def test_xception_get_config_has_all_attributes(): block = blocks.XceptionBlock() config = block.get_config() assert test_utils.get_func_args(blocks.XceptionBlock.__init__).issubset( config.keys() ) def test_conv_build_return_tensor(): block = blocks.ConvBlock() outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_conv_with_small_image_size_return_tensor(): block = blocks.ConvBlock() outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(10, 10, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_conv_build_with_dropout_return_tensor(): block = blocks.ConvBlock(dropout=0.5) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 32, 3), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_conv_deserialize_to_conv(): serialized_block = blocks.serialize(blocks.ConvBlock()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.ConvBlock) def test_conv_get_config_has_all_attributes(): block = blocks.ConvBlock() config = block.get_config() assert test_utils.get_func_args(blocks.ConvBlock.__init__).issubset( config.keys() ) def test_rnn_build_return_tensor(): block = blocks.RNNBlock(bidirectional=False) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32, 10), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_rnn_input_shape_one_dim_error(): block = blocks.RNNBlock() with pytest.raises(ValueError) as info: block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32,), dtype=tf.float32), ) assert "Expect the input tensor of RNNBlock" in str(info.value) def test_rnn_deserialize_to_rnn(): serialized_block = blocks.serialize(blocks.RNNBlock()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.RNNBlock) def test_rnn_get_config_has_all_attributes(): block = blocks.RNNBlock() config = block.get_config() assert test_utils.get_func_args(blocks.RNNBlock.__init__).issubset( config.keys() ) def test_dense_build_return_tensor(): block = blocks.DenseBlock( num_units=keras_tuner.engine.hyperparameters.Choice( "num_units", [10, 20] ) ) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32,), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_dense_build_with_dropout_return_tensor(): block = blocks.DenseBlock(dropout=0.5) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32,), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_dense_build_with_bn_return_tensor(): block = blocks.DenseBlock(use_batchnorm=True) outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32,), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_dense_deserialize_to_dense(): serialized_block = blocks.serialize(blocks.DenseBlock()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.DenseBlock) def test_dense_get_config_has_all_attributes(): block = blocks.DenseBlock() config = block.get_config() assert test_utils.get_func_args(blocks.DenseBlock.__init__).issubset( config.keys() ) def test_embed_build_return_tensor(): block = blocks.Embedding() outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(32,), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_embed_deserialize_to_embed(): serialized_block = blocks.serialize(blocks.Embedding()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.Embedding) def test_embed_get_config_has_all_attributes(): block = blocks.Embedding() config = block.get_config() assert test_utils.get_func_args(blocks.Embedding.__init__).issubset( config.keys() ) def test_transformer_build_return_tensor(): block = blocks.Transformer() outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(64,), dtype=tf.float32), ) assert len(nest.flatten(outputs)) == 1 def test_transformer_deserialize_to_transformer(): serialized_block = blocks.serialize(blocks.Transformer()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.Transformer) def test_transformer_get_config_has_all_attributes(): block = blocks.Transformer() config = block.get_config() assert test_utils.get_func_args(blocks.Transformer.__init__).issubset( config.keys() ) def test_multi_head_restore_head_size(): block = blocks.basic.MultiHeadSelfAttention(head_size=16) block = blocks.basic.MultiHeadSelfAttention.from_config(block.get_config()) assert block.head_size == 16 def test_bert_build_return_tensor(): block = blocks.BertBlock() outputs = block.build( keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string) ) assert len(nest.flatten(outputs)) == 1 def test_bert_deserialize_to_transformer(): serialized_block = blocks.serialize(blocks.BertBlock()) block = blocks.deserialize(serialized_block) assert isinstance(block, blocks.BertBlock) def test_bert_get_config_has_all_attributes(): block = blocks.BertBlock() config = block.get_config() assert test_utils.get_func_args(blocks.BertBlock.__init__).issubset( config.keys() )
autokeras/autokeras/blocks/basic_test.py/0
{ "file_path": "autokeras/autokeras/blocks/basic_test.py", "repo_id": "autokeras", "token_count": 4154 }
1
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from tensorflow import keras from autokeras.engine import io_hypermodel from autokeras.utils import types def serialize_metrics(metrics): serialized = [] for metric in metrics: if isinstance(metric, str): serialized.append([metric]) else: serialized.append(keras.metrics.serialize(metric)) return serialized def deserialize_metrics(metrics): deserialized = [] for metric in metrics: if isinstance(metric, list): deserialized.append(metric[0]) else: deserialized.append(keras.metrics.deserialize(metric)) return deserialized def serialize_loss(loss): if isinstance(loss, str): return [loss] return keras.losses.serialize(loss) def deserialize_loss(loss): if isinstance(loss, list): return loss[0] return keras.losses.deserialize(loss) class Head(io_hypermodel.IOHyperModel): """Base class for the heads, e.g. classification, regression. # Arguments loss: A Keras loss function. Defaults to None. If None, the loss will be inferred from the AutoModel. metrics: A list of Keras metrics. Defaults to None. If None, the metrics will be inferred from the AutoModel. """ def __init__( self, loss: Optional[types.LossType] = None, metrics: Optional[types.MetricsType] = None, **kwargs ): super().__init__(**kwargs) self.loss = loss if metrics is None: metrics = [] self.metrics = metrics # Mark if the head should directly output the input tensor. def get_config(self): config = super().get_config() config.update( { "loss": serialize_loss(self.loss), "metrics": serialize_metrics(self.metrics), } ) return config @classmethod def from_config(cls, config): config["loss"] = deserialize_loss(config["loss"]) config["metrics"] = deserialize_metrics(config["metrics"]) return super().from_config(config) def build(self, hp, inputs=None): raise NotImplementedError
autokeras/autokeras/engine/head.py/0
{ "file_path": "autokeras/autokeras/engine/head.py", "repo_id": "autokeras", "token_count": 1094 }
2
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd import autokeras as ak from autokeras import test_utils def test_text_and_structured_data(tmp_path): # Prepare the data. num_instances = 80 x_text = test_utils.generate_text_data(num_instances) x_structured_data = pd.read_csv(test_utils.TRAIN_CSV_PATH) x_structured_data = x_structured_data[:num_instances] y_classification = test_utils.generate_one_hot_labels( num_instances=num_instances, num_classes=3 ) y_regression = test_utils.generate_data( num_instances=num_instances, shape=(1,) ) # Build model and train. structured_data_input = ak.StructuredDataInput() structured_data_output = ak.CategoricalToNumerical()(structured_data_input) structured_data_output = ak.DenseBlock()(structured_data_output) text_input = ak.TextInput() outputs1 = ak.TextToIntSequence()(text_input) outputs1 = ak.Embedding()(outputs1) outputs1 = ak.ConvBlock(separable=True)(outputs1) outputs1 = ak.SpatialReduction()(outputs1) outputs2 = ak.TextToNgramVector()(text_input) outputs2 = ak.DenseBlock()(outputs2) text_output = ak.Merge()((outputs1, outputs2)) merged_outputs = ak.Merge()((structured_data_output, text_output)) regression_outputs = ak.RegressionHead()(merged_outputs) classification_outputs = ak.ClassificationHead()(merged_outputs) automodel = ak.AutoModel( inputs=[text_input, structured_data_input], directory=tmp_path, outputs=[regression_outputs, classification_outputs], max_trials=2, tuner=ak.Hyperband, seed=test_utils.SEED, ) automodel.fit( (x_text, x_structured_data), (y_regression, y_classification), validation_split=0.2, epochs=1, ) def test_image_blocks(tmp_path): num_instances = 10 x_train = test_utils.generate_data( num_instances=num_instances, shape=(28, 28) ) y_train = np.random.randint(0, 10, num_instances) input_node = ak.ImageInput() output = ak.Normalization()(input_node) output = ak.ImageAugmentation()(output) outputs1 = ak.ResNetBlock(version="v2")(output) outputs2 = ak.XceptionBlock()(output) output_node = ak.Merge()((outputs1, outputs2)) output_node = ak.ClassificationHead()(output_node) automodel = ak.AutoModel( inputs=input_node, outputs=output_node, directory=tmp_path, max_trials=1, seed=test_utils.SEED, ) automodel.fit( x_train, y_train, validation_data=(x_train, y_train), epochs=1 )
autokeras/autokeras/integration_tests/functional_api_test.py/0
{ "file_path": "autokeras/autokeras/integration_tests/functional_api_test.py", "repo_id": "autokeras", "token_count": 1253 }
3
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from typing import Optional from typing import Type from typing import Union from autokeras import auto_model from autokeras import blocks from autokeras import nodes as input_module from autokeras.engine import tuner from autokeras.tuners import greedy from autokeras.tuners import task_specific from autokeras.utils import types class SupervisedTextPipeline(auto_model.AutoModel): def __init__(self, outputs, **kwargs): super().__init__( inputs=input_module.TextInput(), outputs=outputs, **kwargs ) class TextClassifier(SupervisedTextPipeline): """AutoKeras text classification class. # Arguments num_classes: Int. Defaults to None. If None, it will be inferred from the data. multi_label: Boolean. Defaults to False. loss: A Keras loss function. Defaults to use 'binary_crossentropy' or 'categorical_crossentropy' based on the number of classes. metrics: A list of Keras metrics. Defaults to use 'accuracy'. project_name: String. The name of the AutoModel. Defaults to 'text_classifier'. max_trials: Int. The maximum number of different Keras Models to try. The search may finish before reaching the max_trials. Defaults to 100. directory: String. The path to a directory for storing the search outputs. Defaults to None, which would create a folder with the name of the AutoModel in the current directory. objective: String. Name of model metric to minimize or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'. tuner: String or subclass of AutoTuner. If string, it should be one of 'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a subclass of AutoTuner. If left unspecified, it uses a task specific tuner, which first evaluates the most commonly used models for the task before exploring other models. overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing project of the same name if one is found. Otherwise, overwrites the project. seed: Int. Random seed. max_model_size: Int. Maximum number of scalars in the parameters of a model. Models larger than this are rejected. **kwargs: Any arguments supported by AutoModel. """ def __init__( self, num_classes: Optional[int] = None, multi_label: bool = False, loss: types.LossType = None, metrics: Optional[types.MetricsType] = None, project_name: str = "text_classifier", max_trials: int = 100, directory: Union[str, Path, None] = None, objective: str = "val_loss", tuner: Union[str, Type[tuner.AutoTuner]] = None, overwrite: bool = False, seed: Optional[int] = None, max_model_size: Optional[int] = None, **kwargs ): if tuner is None: tuner = task_specific.TextClassifierTuner super().__init__( outputs=blocks.ClassificationHead( num_classes=num_classes, multi_label=multi_label, loss=loss, metrics=metrics, ), max_trials=max_trials, directory=directory, project_name=project_name, objective=objective, tuner=tuner, overwrite=overwrite, seed=seed, max_model_size=max_model_size, **kwargs ) def fit( self, x=None, y=None, epochs=None, callbacks=None, validation_split=0.2, validation_data=None, **kwargs ): """Search for the best model and hyperparameters for the AutoModel. It will search for the best model based on the performances on validation data. # Arguments x: numpy.ndarray or tensorflow.Dataset. Training data x. The input data should be numpy.ndarray or tf.data.Dataset. The data should be one dimensional. Each element in the data should be a string which is a full sentence. y: numpy.ndarray or tensorflow.Dataset. Training data y. It can be raw labels, one-hot encoded if more than two classes, or binary encoded for binary classification. epochs: Int. The number of epochs to train each model during the search. If unspecified, by default we train for a maximum of 1000 epochs, but we stop training if the validation loss stops improving for 10 epochs (unless you specified an EarlyStopping callback as part of the callbacks argument, in which case the EarlyStopping callback you specified will determine early stopping). callbacks: List of Keras callbacks to apply during training and validation. validation_split: Float between 0 and 1. Defaults to 0.2. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset. The best model found would be fit on the entire dataset including the validation data. validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`. The type of the validation data should be the same as the training data. The best model found would be fit on the training dataset without the validation data. **kwargs: Any arguments supported by [keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit). # Returns history: A Keras History object corresponding to the best model. Its History.history attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). """ history = super().fit( x=x, y=y, epochs=epochs, callbacks=callbacks, validation_split=validation_split, validation_data=validation_data, **kwargs ) return history class TextRegressor(SupervisedTextPipeline): """AutoKeras text regression class. # Arguments output_dim: Int. The number of output dimensions. Defaults to None. If None, it will be inferred from the data. loss: A Keras loss function. Defaults to use 'mean_squared_error'. metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'. project_name: String. The name of the AutoModel. Defaults to 'text_regressor'. max_trials: Int. The maximum number of different Keras Models to try. The search may finish before reaching the max_trials. Defaults to 100. directory: String. The path to a directory for storing the search outputs. Defaults to None, which would create a folder with the name of the AutoModel in the current directory. objective: String. Name of model metric to minimize or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'. tuner: String or subclass of AutoTuner. If string, it should be one of 'greedy', 'bayesian', 'hyperband' or 'random'. It can also be a subclass of AutoTuner. If left unspecified, it uses a task specific tuner, which first evaluates the most commonly used models for the task before exploring other models. overwrite: Boolean. Defaults to `False`. If `False`, reloads an existing project of the same name if one is found. Otherwise, overwrites the project. seed: Int. Random seed. max_model_size: Int. Maximum number of scalars in the parameters of a model. Models larger than this are rejected. **kwargs: Any arguments supported by AutoModel. """ def __init__( self, output_dim: Optional[int] = None, loss: types.LossType = "mean_squared_error", metrics: Optional[types.MetricsType] = None, project_name: str = "text_regressor", max_trials: int = 100, directory: Union[str, Path, None] = None, objective: str = "val_loss", tuner: Union[str, Type[tuner.AutoTuner]] = None, overwrite: bool = False, seed: Optional[int] = None, max_model_size: Optional[int] = None, **kwargs ): if tuner is None: tuner = greedy.Greedy super().__init__( outputs=blocks.RegressionHead( output_dim=output_dim, loss=loss, metrics=metrics ), max_trials=max_trials, directory=directory, project_name=project_name, objective=objective, tuner=tuner, overwrite=overwrite, seed=seed, max_model_size=max_model_size, **kwargs ) def fit( self, x=None, y=None, epochs=None, callbacks=None, validation_split=0.2, validation_data=None, **kwargs ): """Search for the best model and hyperparameters for the AutoModel. It will search for the best model based on the performances on validation data. # Arguments x: numpy.ndarray or tensorflow.Dataset. Training data x. The input data should be numpy.ndarray or tf.data.Dataset. The data should be one dimensional. Each element in the data should be a string which is a full sentence. y: numpy.ndarray or tensorflow.Dataset. Training data y. The targets passing to the head would have to be tf.data.Dataset, np.ndarray, pd.DataFrame or pd.Series. It can be single-column or multi-column. The values should all be numerical. epochs: Int. The number of epochs to train each model during the search. If unspecified, by default we train for a maximum of 1000 epochs, but we stop training if the validation loss stops improving for 10 epochs (unless you specified an EarlyStopping callback as part of the callbacks argument, in which case the EarlyStopping callback you specified will determine early stopping). callbacks: List of Keras callbacks to apply during training and validation. validation_split: Float between 0 and 1. Defaults to 0.2. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset. The best model found would be fit on the entire dataset including the validation data. validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`. The type of the validation data should be the same as the training data. The best model found would be fit on the training dataset without the validation data. **kwargs: Any arguments supported by [keras.Model.fit](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit). # Returns history: A Keras History object corresponding to the best model. Its History.history attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). """ history = super().fit( x=x, y=y, epochs=epochs, callbacks=callbacks, validation_split=validation_split, validation_data=validation_data, **kwargs ) return history
autokeras/autokeras/tasks/text.py/0
{ "file_path": "autokeras/autokeras/tasks/text.py", "repo_id": "autokeras", "token_count": 5700 }
4
# Copyright 2020 The AutoKeras Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import tensorflow as tf from sklearn.datasets import load_files from tensorflow.keras.datasets import cifar10 from tensorflow.keras.datasets import mnist import autokeras as ak def imdb_raw(num_instances=100): dataset = tf.keras.utils.get_file( fname="aclImdb.tar.gz", origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", extract=True, ) # set path to dataset IMDB_DATADIR = os.path.join(os.path.dirname(dataset), "aclImdb") classes = ["pos", "neg"] train_data = load_files( os.path.join(IMDB_DATADIR, "train"), shuffle=True, categories=classes ) test_data = load_files( os.path.join(IMDB_DATADIR, "test"), shuffle=False, categories=classes ) x_train = np.array(train_data.data) y_train = np.array(train_data.target) x_test = np.array(test_data.data) y_test = np.array(test_data.target) if num_instances is not None: x_train = x_train[:num_instances] y_train = y_train[:num_instances] x_test = x_test[:num_instances] y_test = y_test[:num_instances] return (x_train, y_train), (x_test, y_test) def test_mnist_accuracy_over_98(tmp_path): (x_train, y_train), (x_test, y_test) = mnist.load_data() clf = ak.ImageClassifier(max_trials=1, directory=tmp_path) clf.fit(x_train, y_train, epochs=10) accuracy = clf.evaluate(x_test, y_test)[1] assert accuracy >= 0.98 def test_cifar10_accuracy_over_93(tmp_path): (x_train, y_train), (x_test, y_test) = cifar10.load_data() clf = ak.ImageClassifier(max_trials=3, directory=tmp_path) clf.fit(x_train, y_train, epochs=5) accuracy = clf.evaluate(x_test, y_test)[1] assert accuracy >= 0.93 def test_imdb_accuracy_over_92(tmp_path): (x_train, y_train), (x_test, y_test) = imdb_raw(num_instances=None) clf = ak.TextClassifier(max_trials=3, directory=tmp_path) clf.fit(x_train, y_train, batch_size=6, epochs=1) accuracy = clf.evaluate(x_test, y_test)[1] assert accuracy >= 0.92 def test_titaninc_accuracy_over_77(tmp_path): TRAIN_DATA_URL = ( "https://storage.googleapis.com/tf-datasets/titanic/train.csv" ) TEST_DATA_URL = ( "https://storage.googleapis.com/tf-datasets/titanic/eval.csv" ) train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL) test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL) clf = ak.StructuredDataClassifier(max_trials=10, directory=tmp_path) clf.fit(train_file_path, "survived") accuracy = clf.evaluate(test_file_path, "survived")[1] assert accuracy >= 0.77
autokeras/benchmark/performance.py/0
{ "file_path": "autokeras/benchmark/performance.py", "repo_id": "autokeras", "token_count": 1342 }
5
<jupyter_start><jupyter_code>!pip install autokeras import numpy as np import autokeras as ak<jupyter_output><empty_output><jupyter_text>In this tutorial we are making use of the[AutoModel](/auto_model/automodel-class) API to show how to handle multi-modal data and multi-task. What is multi-modal?Multi-modal data means each data instance has multiple forms of information.For example, a photo can be saved as a image. Besides the image, it may alsohave when and where it was taken as its attributes, which can be represented asstructured data. What is multi-task?Multi-task here we refer to we want to predict multiple targets with the sameinput features. For example, we not only want to classify an image according toits content, but we also want to regress its quality as a float number between0 and 1.The following diagram shows an example of multi-modal and multi-task neuralnetwork model.graph TD id1(ImageInput) --> id3(Some Neural Network Model) id2(StructuredDataInput) --> id3 id3 --> id4(ClassificationHead) id3 --> id5(RegressionHead)It has two inputs the images and the structured data. Each image is associatedwith a set of attributes in the structured data. From these data, we are tryingto predict the classification label and the regression value at the same time. Data PreparationTo illustrate our idea, we generate some random image and structured data asthe multi-modal data.<jupyter_code>num_instances = 100 # Generate image data. image_data = np.random.rand(num_instances, 32, 32, 3).astype(np.float32) # Generate structured data. structured_data = np.random.rand(num_instances, 20).astype(np.float32)<jupyter_output><empty_output><jupyter_text>We also generate some multi-task targets for classification and regression.<jupyter_code># Generate regression targets. regression_target = np.random.rand(num_instances, 1).astype(np.float32) # Generate classification labels of five classes. classification_target = np.random.randint(5, size=num_instances)<jupyter_output><empty_output><jupyter_text>Build and Train the ModelThen we initialize the multi-modal and multi-task model with[AutoModel](/auto_model/automodel-class).Since this is just a demo, we use small amount of `max_trials` and `epochs`.<jupyter_code># Initialize the multi with multiple inputs and outputs. model = ak.AutoModel( inputs=[ak.ImageInput(), ak.StructuredDataInput()], outputs=[ ak.RegressionHead(metrics=["mae"]), ak.ClassificationHead( loss="categorical_crossentropy", metrics=["accuracy"] ), ], overwrite=True, max_trials=2, ) # Fit the model with prepared data. model.fit( [image_data, structured_data], [regression_target, classification_target], epochs=3, )<jupyter_output><empty_output><jupyter_text>Validation DataBy default, AutoKeras use the last 20% of training data as validation data.As shown in the example below, you can use `validation_split` to specify thepercentage.<jupyter_code>model.fit( [image_data, structured_data], [regression_target, classification_target], # Split the training data and use the last 15% as validation data. validation_split=0.15, epochs=2, )<jupyter_output><empty_output><jupyter_text>You can also use your own validation setinstead of splitting it from the training data with `validation_data`.<jupyter_code>split = 20 image_val = image_data[split:] structured_val = structured_data[split:] regression_val = regression_target[split:] classification_val = classification_target[split:] image_data = image_data[:split] structured_data = structured_data[:split] regression_target = regression_target[:split] classification_target = classification_target[:split] model.fit( [image_data, structured_data], [regression_target, classification_target], # Use your own validation set. validation_data=( [image_val, structured_val], [regression_val, classification_val], ), epochs=2, )<jupyter_output><empty_output><jupyter_text>Customized Search SpaceYou can customize your search space.The following figure shows the search space we want to define.graph LR id1(ImageInput) --> id2(Normalization) id2 --> id3(Image Augmentation) id3 --> id4(Convolutional) id3 --> id5(ResNet V2) id4 --> id6(Merge) id5 --> id6 id7(StructuredDataInput) --> id8(CategoricalToNumerical) id8 --> id9(DenseBlock) id6 --> id10(Merge) id9 --> id10 id10 --> id11(Classification Head) id10 --> id12(Regression Head)<jupyter_code>input_node1 = ak.ImageInput() output_node = ak.Normalization()(input_node1) output_node = ak.ImageAugmentation()(output_node) output_node1 = ak.ConvBlock()(output_node) output_node2 = ak.ResNetBlock(version="v2")(output_node) output_node1 = ak.Merge()([output_node1, output_node2]) input_node2 = ak.StructuredDataInput() output_node = ak.CategoricalToNumerical()(input_node2) output_node2 = ak.DenseBlock()(output_node) output_node = ak.Merge()([output_node1, output_node2]) output_node1 = ak.ClassificationHead()(output_node) output_node2 = ak.RegressionHead()(output_node) auto_model = ak.AutoModel( inputs=[input_node1, input_node2], outputs=[output_node1, output_node2], overwrite=True, max_trials=2, ) image_data = np.random.rand(num_instances, 32, 32, 3).astype(np.float32) structured_data = np.random.rand(num_instances, 20).astype(np.float32) regression_target = np.random.rand(num_instances, 1).astype(np.float32) classification_target = np.random.randint(5, size=num_instances) auto_model.fit( [image_data, structured_data], [classification_target, regression_target], batch_size=32, epochs=3, )<jupyter_output><empty_output>
autokeras/docs/ipynb/multi.ipynb/0
{ "file_path": "autokeras/docs/ipynb/multi.ipynb", "repo_id": "autokeras", "token_count": 1893 }
6
"""shell pip install autokeras """ import numpy as np import tensorflow as tf from tensorflow.keras.datasets import mnist import autokeras as ak """ ## A Simple Example The first step is to prepare your data. Here we use the MNIST dataset as an example """ (x_train, y_train), (x_test, y_test) = mnist.load_data() print(x_train.shape) # (60000, 28, 28) print(y_train.shape) # (60000,) print(y_train[:3]) # array([7, 2, 1], dtype=uint8) """ The second step is to run the ImageClassifier. It is recommended have more trials for more complicated datasets. This is just a quick demo of MNIST, so we set max_trials to 1. For the same reason, we set epochs to 10. You can also leave the epochs unspecified for an adaptive number of epochs. """ # Initialize the image classifier. clf = ak.ImageClassifier(overwrite=True, max_trials=1) # Feed the image classifier with training data. clf.fit(x_train, y_train, epochs=10) # Predict with the best model. predicted_y = clf.predict(x_test) print(predicted_y) # Evaluate the best model with testing data. print(clf.evaluate(x_test, y_test)) """ ## Validation Data By default, AutoKeras use the last 20% of training data as validation data. As shown in the example below, you can use validation_split to specify the percentage. """ clf.fit( x_train, y_train, # Split the training data and use the last 15% as validation data. validation_split=0.15, epochs=10, ) """ You can also use your own validation set instead of splitting it from the training data with validation_data. """ split = 50000 x_val = x_train[split:] y_val = y_train[split:] x_train = x_train[:split] y_train = y_train[:split] clf.fit( x_train, y_train, # Use your own validation set. validation_data=(x_val, y_val), epochs=10, ) """ ## Customized Search Space For advanced users, you may customize your search space by using AutoModel instead of ImageClassifier. You can configure the ImageBlock for some high-level configurations, e.g., block_type for the type of neural network to search, normalize for whether to do data normalization, augment for whether to do data augmentation. You can also do not specify these arguments, which would leave the different choices to be tuned automatically. See the following example for detail. """ input_node = ak.ImageInput() output_node = ak.ImageBlock( # Only search ResNet architectures. block_type="resnet", # Normalize the dataset. normalize=True, # Do not do data augmentation. augment=False, )(input_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 ) clf.fit(x_train, y_train, epochs=10) """ The usage of AutoModel is similar to the functional API of Keras. Basically, you are building a graph, whose edges are blocks and the nodes are intermediate outputs of blocks. To add an edge from input_node to output_node with output_node = ak.[some_block]([block_args])(input_node). You can even also use more fine grained blocks to customize the search space even further. See the following example. """ input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node) output_node = ak.ResNetBlock(version="v2")(output_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel( inputs=input_node, outputs=output_node, overwrite=True, max_trials=1 ) clf.fit(x_train, y_train, epochs=10) """ ## Data Format The AutoKeras ImageClassifier is quite flexible for the data format. For the image, it accepts data formats both with and without the channel dimension. The images in the MNIST dataset do not have the channel dimension. Each image is a matrix with shape (28, 28). AutoKeras also accepts images of three dimensions with the channel dimension at last, e.g., (32, 32, 3), (28, 28, 1). For the classification labels, AutoKeras accepts both plain labels, i.e. strings or integers, and one-hot encoded encoded labels, i.e. vectors of 0s and 1s. So if you prepare your data in the following way, the ImageClassifier should still work. """ (x_train, y_train), (x_test, y_test) = mnist.load_data() # Reshape the images to have the channel dimension. x_train = x_train.reshape(x_train.shape + (1,)) x_test = x_test.reshape(x_test.shape + (1,)) # One-hot encode the labels. eye = np.eye(10) y_train = eye[y_train] y_test = eye[y_test] print(x_train.shape) # (60000, 28, 28, 1) print(y_train.shape) # (60000, 10) print(y_train[:3]) # array([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], # [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], # [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]]) """ We also support using tf.data.Dataset format for the training data. """ train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,))) test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,))) clf = ak.ImageClassifier(overwrite=True, max_trials=1) # Feed the tensorflow Dataset to the classifier. clf.fit(train_set, epochs=10) # Predict with the best model. predicted_y = clf.predict(test_set) # Evaluate the best model with testing data. print(clf.evaluate(test_set)) """ ## Reference [ImageClassifier](/image_classifier), [AutoModel](/auto_model/#automodel-class), [ImageBlock](/block/#imageblock-class), [Normalization](/block/#normalization-class), [ImageAugmentation](/block/#image-augmentation-class), [ResNetBlock](/block/#resnetblock-class), [ImageInput](/node/#imageinput-class), [ClassificationHead](/block/#classificationhead-class). """
autokeras/docs/py/image_classification.py/0
{ "file_path": "autokeras/docs/py/image_classification.py", "repo_id": "autokeras", "token_count": 1905 }
7
""" Search for a good model for the [MNIST](https://keras.io/datasets/#mnist-database-of-handwritten-digits) dataset. """ from tensorflow.keras.datasets import mnist import autokeras as ak # Prepare the dataset. (x_train, y_train), (x_test, y_test) = mnist.load_data() print(x_train.shape) # (60000, 28, 28) print(y_train.shape) # (60000,) print(y_train[:3]) # array([7, 2, 1], dtype=uint8) # Initialize the ImageClassifier. clf = ak.ImageClassifier(max_trials=3) # Search for the best model. clf.fit(x_train, y_train, epochs=10) # Evaluate on the testing data. print("Accuracy: {accuracy}".format(accuracy=clf.evaluate(x_test, y_test)))
autokeras/examples/mnist.py/0
{ "file_path": "autokeras/examples/mnist.py", "repo_id": "autokeras", "token_count": 258 }
8
isort -c . if ! [ $? -eq 0 ] then echo "Please run \"sh shell/format.sh\" to format the code." exit 1 fi flake8 . if ! [ $? -eq 0 ] then echo "Please fix the code style issue." exit 1 fi black --check . if ! [ $? -eq 0 ] then echo "Please run \"sh shell/format.sh\" to format the code." exit 1 fi for i in $(find autokeras benchmark -name '*.py') # or whatever other pattern... do if ! grep -q Copyright $i then echo "Please run \"sh shell/format.sh\" to format the code." exit 1 fi done
autokeras/shell/lint.sh/0
{ "file_path": "autokeras/shell/lint.sh", "repo_id": "autokeras", "token_count": 187 }
9
# Best Practices for Managing Keras Projects on GitHub This document describes the best practices for managing the projects under "keras-team" on GitHub which use GitHub as the source of truth, including [keras-tuner](https://github.com/keras-team/keras-tuner), [autokeras](https://github.com/keras-team/autokeras), [keras-cv](https://github.com/keras-team/keras-cv), [keras-nlp](https://github.com/keras-team/keras-nlp), and maybe more in the future. It covers linting, formating, testing, continuous integration, issues and pull requests tagging, and so on. The goal of this document is to: * Improve the overall quality of the projects. The fact that projects all follow the same standard for dev process, which may evolve through time, will ensure the quality from all aspects. * Unify the external contributing experience. The external open-source contributors may contribute to multiple Keras projects by submitting issues or pull requests. They don't need to learn from different contributing guides. * Save time for the project leads. They save time by copying and pasting the same setup and by avoiding the listed caveats. ## Testing ### Testing framework We use [pytest](https://docs.pytest.org/en/6.2.x/) for writing tests for the projects, which is the most widely used testing framework for Python in the OSS world. The configuration of pytest is [here](https://github.com/keras-team/keras-tuner/blob/1.1.0/setup.cfg#L4-L16). ### File locations for the tests Unit tests should be contained in sibling files, relative to the class or utility files they are testing. The name of a test file should follow the pattern of `*_test.py`. For example, the tests for `/keras_tuner/engine/hyperparameters.py` are in `/keras_tuner/engine/hyperparameters_tests.py`. Integration tests may be contained in their own `/keras_tuner/integration_tests` directory, as they may require extra files such as data. While our unit test placement is not suggested in the [good practices of pytest](https://docs.pytest.org/en/6.2.x/goodpractices.html) doc, we recommend this approach to improve the discoverability of the unit tests for new contributors. This discoverability doubles up as a method of documentation; when users want to see what `util.utility_function()` does, they can simply open the conveniently located sibling file, `util_test.py`. ### Test Coverage We use [CodeCov](https://about.codecov.io/) to track the test coverage.You may also refer to [these settings](https://github.com/keras-team/keras-tuner/blob/1.1.0/setup.cfg#L24-L28) in `setup.cfg`. We will see more about it in the continuous integration section. Pytest CodeCov supports a wildcard exclude field, which should be set to include `*_test.py`, as to ensure that tests are not included in the code coverage count. ### Useful code snippets Fix the random seed for all tests: [Link1](https://github.com/keras-team/keras-tuner/blob/1.1.0/tests/conftest.py#L8-L17), [Link2](https://github.com/keras-team/keras-tuner/blob/master/tests/unit_tests/randomness_test.py), [Link3](https://www.tensorflow.org/api_docs/python/tf/keras/utils/set_random_seed). Create a temporary path for testing: [Link](https://docs.pytest.org/en/6.2.x/tmpdir.html). ## Code styles ### Importing Keras modules For projects based on Keras and TensorFlow, top-level imports are encouraged, like shows in the following example. ```py import tensorflow as tf from tensorflow import keras ``` Exceptions may be acceptable when the module appeared too many times in the code, like `keras.layers`. ### Linting and formatting We use [black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), [flake8](https://flake8.pycqa.org/en/latest/) to lint and format the code. black is to generally format the code. isort is to sort the imports. flake8 is for some additional checks that black doesn't do, like the long lines with a single string. You can see the relevant sections of [setup.cfg](https://github.com/keras-team/keras-tuner/blob/1.1.0/setup.cfg) for the detailed configuration of these tools. The user does not need to know how to use these tools to lint or format the code. We provide them with two shell scripts: [`/shell/lint.sh`](https://github.com/keras-team/keras-tuner/blob/master/shell/lint.sh) and [`/shell/format.sh`](https://github.com/keras-team/keras-tuner/blob/master/shell/format.sh). In these scripts, we also check and add the Apache 2.0 License head to every file. ## Releasing ### Release setups The version number of the package is stored only in `/package_name/__init__.py` with a single line of `__version__ = 'master'` on the master branch. [example](https://github.com/keras-team/keras-tuner/blob/1e13aabe5b6659340a8ee81328805479a57b2105/keras_tuner/__init__.py#L35) We also need the `setup.py` file for the PyPI release. [example](https://github.com/keras-team/keras-tuner/blob/1e13aabe5b6659340a8ee81328805479a57b2105/setup.py) For the `setup.py` file to grab the current version number from `/package_name/__init__.py`, we need additional lines in `setup.cfg`. [example](https://github.com/keras-team/keras-tuner/blob/1.1.0/setup.cfg#L1-L2) ### Draft a new release For releasing a new version of the package, please following these steps: * Create a new branch from the master branch. * Modify the `__version__` value in the new branch. * Create a new release on GitHub. [Official tutorial](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository) Note that the continuous integration will upload it to PyPI automatically. ### Excluding Sibling Test Unit tests are hosted in sibling files relative to the files containing the code they are testing. `SetupTools.find_packages()` supports an [exclude field](https://github.com/pypa/setuptools/blob/f838bc6a170046c9fdfc2251e5466040a669ca12/setuptools/__init__.py#L52). This field should contain `*_test.py` to ensure that tests are not packaged with the release. ## Continuous integration We use [GitHub Actions](https://github.com/features/actions) for continuous integrations. It automates running tests, checking the code styles, uploading test coverages to CodeCov, and uploading new releases to PyPI. You can refer to [this file](https://github.com/keras-team/keras-tuner/blob/master/.github/workflows/actions.yml) for how to set it up. We use a single YAML file for all the GitHub Actions to avoid installing the dependencies multiple times. To use this setup, you also need to upload your CodeCov and PyPI credentials to the project. Here is the [official tutorial](https://docs.github.com/en/actions/security-guides/encrypted-secrets#creating-encrypted-secrets-for-a-repository). Make sure you follow the naming of the following secrets for the GitHub Actions YAML file to work. Name the CodeCov token as `CODECOV_TOKEN`. Name the PyPI username and password as `PYPI_USERNAME` and `PYPI_PASSWORD`. We should also test against tf-nightly every day to discover bugs and incompatible issues early and well before the stable release of TensorFlow. The CI setup for it is [here](https://github.com/keras-team/keras-tuner/blob/master/.github/workflows/nightly.yml). ## Contributing experience We will have a common CONTRIBUTING.md in `keras-team/governance` to be distributed to the other repos. This [GitHub Action](https://github.com/marketplace/actions/file-sync) may be a good way to sync a centralized contributing guide to different repos. We should also have [this directory](https://github.com/keras-team/keras-tuner/tree/master/.devcontainer) to support GitHub Codespaces, which is a trend on GitHub. It provides a web-based IDE to save the contributors from setting up their own dev environment, which would attract more contributors. ## Issues and pull requests We will have the same issue and pull request [templates](https://github.com/keras-team/keras/tree/master/.github/ISSUE_TEMPLATE) across projects in `keras-team`. They will also be stored in `keras-team/governance` and be distributed to the other repos. Also need to confirm if there is a way to unify the taggings between the repos.
governance/project_setup_best_practices.md/0
{ "file_path": "governance/project_setup_best_practices.md", "repo_id": "governance", "token_count": 2478 }
10
"""Utilities for ImageNet data preprocessing & prediction decoding. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import warnings import numpy as np from . import get_submodules_from_kwargs CLASS_INDEX = None CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/' 'data/imagenet_class_index.json') def _preprocess_numpy_input(x, data_format, mode, **kwargs): """Preprocesses a Numpy array encoding a batch of images. # Arguments x: Input array, 3D or 4D. data_format: Data format of the image array. mode: One of "caffe", "tf" or "torch". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. # Returns Preprocessed Numpy array. """ backend, _, _, _ = get_submodules_from_kwargs(kwargs) if not issubclass(x.dtype.type, np.floating): x = x.astype(backend.floatx(), copy=False) if mode == 'tf': x /= 127.5 x -= 1. return x if mode == 'torch': x /= 255. mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] else: if data_format == 'channels_first': # 'RGB'->'BGR' if x.ndim == 3: x = x[::-1, ...] else: x = x[:, ::-1, ...] else: # 'RGB'->'BGR' x = x[..., ::-1] mean = [103.939, 116.779, 123.68] std = None # Zero-center by mean pixel if data_format == 'channels_first': if x.ndim == 3: x[0, :, :] -= mean[0] x[1, :, :] -= mean[1] x[2, :, :] -= mean[2] if std is not None: x[0, :, :] /= std[0] x[1, :, :] /= std[1] x[2, :, :] /= std[2] else: x[:, 0, :, :] -= mean[0] x[:, 1, :, :] -= mean[1] x[:, 2, :, :] -= mean[2] if std is not None: x[:, 0, :, :] /= std[0] x[:, 1, :, :] /= std[1] x[:, 2, :, :] /= std[2] else: x[..., 0] -= mean[0] x[..., 1] -= mean[1] x[..., 2] -= mean[2] if std is not None: x[..., 0] /= std[0] x[..., 1] /= std[1] x[..., 2] /= std[2] return x def _preprocess_symbolic_input(x, data_format, mode, **kwargs): """Preprocesses a tensor encoding a batch of images. # Arguments x: Input tensor, 3D or 4D. data_format: Data format of the image tensor. mode: One of "caffe", "tf" or "torch". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. # Returns Preprocessed tensor. """ backend, _, _, _ = get_submodules_from_kwargs(kwargs) if mode == 'tf': x /= 127.5 x -= 1. return x if mode == 'torch': x /= 255. mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] else: if data_format == 'channels_first': # 'RGB'->'BGR' if backend.ndim(x) == 3: x = x[::-1, ...] else: x = x[:, ::-1, ...] else: # 'RGB'->'BGR' x = x[..., ::-1] mean = [103.939, 116.779, 123.68] std = None mean_tensor = backend.constant(-np.array(mean)) # Zero-center by mean pixel if backend.dtype(x) != backend.dtype(mean_tensor): x = backend.bias_add( x, backend.cast(mean_tensor, backend.dtype(x)), data_format=data_format) else: x = backend.bias_add(x, mean_tensor, data_format) if std is not None: x /= std return x def preprocess_input(x, data_format=None, mode='caffe', **kwargs): """Preprocesses a tensor or Numpy array encoding a batch of images. # Arguments x: Input Numpy or symbolic tensor, 3D or 4D. The preprocessed data is written over the input data if the data types are compatible. To avoid this behaviour, `numpy.copy(x)` can be used. data_format: Data format of the image tensor/array. mode: One of "caffe", "tf" or "torch". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. # Returns Preprocessed tensor or Numpy array. # Raises ValueError: In case of unknown `data_format` argument. """ backend, _, _, _ = get_submodules_from_kwargs(kwargs) if data_format is None: data_format = backend.image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if isinstance(x, np.ndarray): return _preprocess_numpy_input(x, data_format=data_format, mode=mode, **kwargs) else: return _preprocess_symbolic_input(x, data_format=data_format, mode=mode, **kwargs) def decode_predictions(preds, top=5, **kwargs): """Decodes the prediction of an ImageNet model. # Arguments preds: Numpy tensor encoding a batch of predictions. top: Integer, how many top-guesses to return. # Returns A list of lists of top class prediction tuples `(class_name, class_description, score)`. One list of tuples per sample in batch input. # Raises ValueError: In case of invalid shape of the `pred` array (must be 2D). """ global CLASS_INDEX backend, _, _, keras_utils = get_submodules_from_kwargs(kwargs) if len(preds.shape) != 2 or preds.shape[1] != 1000: raise ValueError('`decode_predictions` expects ' 'a batch of predictions ' '(i.e. a 2D array of shape (samples, 1000)). ' 'Found array with shape: ' + str(preds.shape)) if CLASS_INDEX is None: fpath = keras_utils.get_file( 'imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models', file_hash='c2c37ea517e94d9795004a39431a14cb') with open(fpath) as f: CLASS_INDEX = json.load(f) results = [] for pred in preds: top_indices = pred.argsort()[-top:][::-1] result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices] result.sort(key=lambda x: x[2], reverse=True) results.append(result) return results def _obtain_input_shape(input_shape, default_size, min_size, data_format, require_flatten, weights=None): """Internal utility to compute/validate a model's input shape. # Arguments input_shape: Either None (will return the default network input shape), or a user-provided shape to be validated. default_size: Default input width/height for the model. min_size: Minimum input width/height accepted by the model. data_format: Image data format to use. require_flatten: Whether the model is expected to be linked to a classifier via a Flatten layer. weights: One of `None` (random initialization) or 'imagenet' (pre-training on ImageNet). If weights='imagenet' input channels must be equal to 3. # Returns An integer shape tuple (may include None entries). # Raises ValueError: In case of invalid argument values. """ if weights != 'imagenet' and input_shape and len(input_shape) == 3: if data_format == 'channels_first': if input_shape[0] not in {1, 3}: warnings.warn( 'This model usually expects 1 or 3 input channels. ' 'However, it was passed an input_shape with ' + str(input_shape[0]) + ' input channels.') default_shape = (input_shape[0], default_size, default_size) else: if input_shape[-1] not in {1, 3}: warnings.warn( 'This model usually expects 1 or 3 input channels. ' 'However, it was passed an input_shape with ' + str(input_shape[-1]) + ' input channels.') default_shape = (default_size, default_size, input_shape[-1]) else: if data_format == 'channels_first': default_shape = (3, default_size, default_size) else: default_shape = (default_size, default_size, 3) if weights == 'imagenet' and require_flatten: if input_shape is not None: if input_shape != default_shape: raise ValueError('When setting `include_top=True` ' 'and loading `imagenet` weights, ' '`input_shape` should be ' + str(default_shape) + '.') return default_shape if input_shape: if data_format == 'channels_first': if input_shape is not None: if len(input_shape) != 3: raise ValueError( '`input_shape` must be a tuple of three integers.') if input_shape[0] != 3 and weights == 'imagenet': raise ValueError('The input must have 3 channels; got ' '`input_shape=' + str(input_shape) + '`') if ((input_shape[1] is not None and input_shape[1] < min_size) or (input_shape[2] is not None and input_shape[2] < min_size)): raise ValueError('Input size must be at least ' + str(min_size) + 'x' + str(min_size) + '; got `input_shape=' + str(input_shape) + '`') else: if input_shape is not None: if len(input_shape) != 3: raise ValueError( '`input_shape` must be a tuple of three integers.') if input_shape[-1] != 3 and weights == 'imagenet': raise ValueError('The input must have 3 channels; got ' '`input_shape=' + str(input_shape) + '`') if ((input_shape[0] is not None and input_shape[0] < min_size) or (input_shape[1] is not None and input_shape[1] < min_size)): raise ValueError('Input size must be at least ' + str(min_size) + 'x' + str(min_size) + '; got `input_shape=' + str(input_shape) + '`') else: if require_flatten: input_shape = default_shape else: if data_format == 'channels_first': input_shape = (3, None, None) else: input_shape = (None, None, 3) if require_flatten: if None in input_shape: raise ValueError('If `include_top` is True, ' 'you should specify a static `input_shape`. ' 'Got `input_shape=' + str(input_shape) + '`') return input_shape
keras-applications/keras_applications/imagenet_utils.py/0
{ "file_path": "keras-applications/keras_applications/imagenet_utils.py", "repo_id": "keras-applications", "token_count": 6278 }
11
from setuptools import setup from setuptools import find_packages long_description = ''' Keras Applications is the `applications` module of the Keras deep learning library. It provides model definitions and pre-trained weights for a number of popular archictures, such as VGG16, ResNet50, Xception, MobileNet, and more. Read the documentation at: https://keras.io/applications/ Keras Applications may be imported directly from an up-to-date installation of Keras: ``` from keras import applications ``` Keras Applications is compatible with Python 2.7-3.6 and is distributed under the MIT license. ''' setup(name='Keras_Applications', version='1.0.8', description='Reference implementations of popular deep learning models', long_description=long_description, author='Keras Team', url='https://github.com/keras-team/keras-applications', download_url='https://github.com/keras-team/' 'keras-applications/tarball/1.0.8', license='MIT', install_requires=['numpy>=1.9.1', 'h5py'], extras_require={ 'tests': ['pytest', 'pytest-pep8', 'pytest-xdist', 'pytest-cov'], }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=find_packages())
keras-applications/setup.py/0
{ "file_path": "keras-applications/setup.py", "repo_id": "keras-applications", "token_count": 745 }
12
sudo: required dist: trusty language: python cache: directories: - $HOME/.theano matrix: include: - python: 3.6 env: KERAS_BACKEND=tensorflow TEST_MODE=PEP8_DOC - python: 2.7 env: KERAS_BACKEND=tensorflow - python: 3.6 env: KERAS_BACKEND=tensorflow - python: 3.6 env: KERAS_BACKEND=tensorflow USE_TF_KERAS=1 PYTEST_IGNORE='--ignore=tests/test_doc_auto_generation.py --ignore=tests/keras_contrib/backend --ignore=tests/keras_contrib/utils/save_load_utils_test.py' - python: 3.6 env: KERAS_BACKEND=theano THEANO_FLAGS=optimizer=fast_compile # - python: 3.6 # env: KERAS_BACKEND=cntk PYTHONWARNINGS=ignore install: # code below is taken from http://conda.pydata.org/docs/travis.html # We do this conditionally because it saves us some downloading if the # version is the same. - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh; else wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; fi - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - conda update -q conda # Useful for debugging any issues with conda - conda info -a - travis_retry conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION - source activate test-environment - travis_retry pip install --only-binary=numpy,scipy,pandas numpy nose scipy h5py theano pydoc-markdown pytest pytest-pep8 pandas pygithub --progress-bar off - if [[ "$USE_TF_KERAS" == "" ]]; then pip install git+https://github.com/keras-team/keras.git --progress-bar off; fi # set library path - export LD_LIBRARY_PATH=$HOME/miniconda/envs/test-environment/lib/:$LD_LIBRARY_PATH - if [[ "$KERAS_BACKEND" == "theano" ]]; then travis_retry conda install -q mkl mkl-service; fi - if [[ "$USE_TF_KERAS" == "1" ]]; then python convert_to_tf_keras.py; fi - pip install -e .[tests] --progress-bar off # install TensorFlow (CPU version). - pip install tensorflow==1.12 --progress-bar off # install cntk - ./.travis/install_cntk.sh # Remove the current backend from the coverage exclusion. - sed -i "\/keras\/backend\/${KERAS_BACKEND}_backend.py/d" .coveragerc # command to run tests script: - export MKL_THREADING_LAYER="GNU" # run keras backend init to initialize backend config - python -c "import keras_contrib.backend" # create dataset directory to avoid concurrent directory creation at runtime - mkdir ~/.keras/datasets # set up keras backend - sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json; - echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)" - if [[ "$TEST_MODE" == "PEP8_DOC" ]]; then PYTHONPATH=$PWD:$PYTHONPATH py.test --pep8 -m pep8 -n0 && py.test tests/tooling/ convert_to_tf_keras.py && cd contrib_docs && pydocmd build; else PYTHONPATH=$PWD:$PYTHONPATH py.test tests/ $PYTEST_IGNORE --ignore=tests/tooling/ --cov-config .coveragerc --cov=keras_contrib tests/; fi
keras-contrib/.travis.yml/0
{ "file_path": "keras-contrib/.travis.yml", "repo_id": "keras-contrib", "token_count": 1348 }
13
''' Trains a WRN-28-8 model on the CIFAR-10 Dataset. Performance is slightly less than the paper, since they use WRN-28-10 model (95.83%). Gets a 95.54% accuracy score after 300 epochs. ''' from __future__ import absolute_import from __future__ import print_function from __future__ import division from keras.datasets import cifar10 import keras.callbacks as callbacks import keras.utils.np_utils as kutils from keras.preprocessing.image import ImageDataGenerator from keras_contrib.applications.wide_resnet import WideResidualNetwork batch_size = 64 epochs = 300 img_rows, img_cols = 32, 32 (trainX, trainY), (testX, testY) = cifar10.load_data() trainX = trainX.astype('float32') trainX /= 255.0 testX = testX.astype('float32') testX /= 255.0 tempY = testY trainY = kutils.to_categorical(trainY) testY = kutils.to_categorical(testY) generator = ImageDataGenerator(rotation_range=10, width_shift_range=5. / 32, height_shift_range=5. / 32, horizontal_flip=True) generator.fit(trainX, seed=0, augment=True) # We will be training the model, therefore no need to load weights model = WideResidualNetwork(depth=28, width=8, dropout_rate=0.0, weights=None) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) print('Finished compiling') model_checkpoint = callbacks.ModelCheckpoint('WRN-28-8 Weights.h5', monitor='val_acc', save_best_only=True, save_weights_only=True) model.fit_generator(generator.flow(trainX, trainY, batch_size=batch_size), steps_per_epoch=len(trainX) // batch_size, epochs=epochs, callbacks=[model_checkpoint], validation_data=(testX, testY)) scores = model.evaluate(testX, testY, batch_size) print('Test loss : %0.5f' % (scores[0])) print('Test accuracy = %0.5f' % (scores[1]))
keras-contrib/examples/cifar10_wide_resnet.py/0
{ "file_path": "keras-contrib/examples/cifar10_wide_resnet.py", "repo_id": "keras-contrib", "token_count": 938 }
14
from theano import tensor as T from theano.sandbox.neighbours import images2neibs try: import theano.sparse as th_sparse_module except ImportError: th_sparse_module = None try: from theano.tensor.nnet.nnet import softsign as T_softsign except ImportError: from theano.sandbox.softsign import softsign as T_softsign from keras.backend import theano_backend as KTH from keras.backend.common import image_data_format from keras.backend.theano_backend import _preprocess_conv2d_input from keras.backend.theano_backend import _postprocess_conv2d_output py_all = all def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format='channels_first', image_shape=None, filter_shape=None): ''' padding: string, "same" or "valid". ''' if data_format not in {'channels_first', 'channels_last'}: raise Exception('Unknown data_format ' + str(data_format)) if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) # TH kernel shape: (depth, input_depth, rows, cols) # TF kernel shape: (rows, cols, input_depth, depth) x = x.dimshuffle((0, 3, 1, 2)) kernel = kernel.dimshuffle((3, 2, 0, 1)) if image_shape: image_shape = (image_shape[0], image_shape[3], image_shape[1], image_shape[2]) if filter_shape: filter_shape = (filter_shape[3], filter_shape[2], filter_shape[0], filter_shape[1]) if padding == 'same': th_padding = 'half' np_kernel = kernel.eval() elif padding == 'valid': th_padding = 'valid' else: raise Exception('Border mode not supported: ' + str(padding)) # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if image_shape is not None: image_shape = tuple(int_or_none(v) for v in image_shape) if filter_shape is not None: filter_shape = tuple(int_or_none(v) for v in filter_shape) conv_out = T.nnet.conv2d(x, kernel, border_mode=th_padding, subsample=strides, input_shape=image_shape, filter_shape=filter_shape) if padding == 'same': if np_kernel.shape[2] % 2 == 0: end = (x.shape[2] + strides[0] - 1) // strides[0] conv_out = conv_out[:, :, :end, :] if np_kernel.shape[3] % 2 == 0: end = (x.shape[3] + strides[1] - 1) // strides[1] conv_out = conv_out[:, :, :, :end] if data_format == 'channels_last': conv_out = conv_out.dimshuffle((0, 2, 3, 1)) return conv_out def extract_image_patches(X, ksizes, strides, padding='valid', data_format='channels_first'): ''' Extract the patches from an image Parameters ---------- X : The input image ksizes : 2-d tuple with the kernel size strides : 2-d tuple with the strides size padding : 'same' or 'valid' data_format : 'channels_last' or 'channels_first' Returns ------- The (k_w,k_h) patches extracted TF ==> (batch_size,w,h,k_w,k_h,c) TH ==> (batch_size,w,h,c,k_w,k_h) ''' patch_size = ksizes[1] if padding == 'same': padding = 'ignore_borders' if data_format == 'channels_last': X = KTH.permute_dimensions(X, [0, 3, 1, 2]) # Thanks to https://github.com/awentzonline for the help! batch, c, w, h = KTH.shape(X) xs = KTH.shape(X) num_rows = 1 + (xs[-2] - patch_size) // strides[1] num_cols = 1 + (xs[-1] - patch_size) // strides[1] num_channels = xs[-3] patches = images2neibs(X, ksizes, strides, padding) # Theano is sorting by channel new_shape = (batch, num_channels, num_rows * num_cols, patch_size, patch_size) patches = KTH.reshape(patches, new_shape) patches = KTH.permute_dimensions(patches, (0, 2, 1, 3, 4)) # arrange in a 2d-grid (rows, cols, channels, px, py) new_shape = (batch, num_rows, num_cols, num_channels, patch_size, patch_size) patches = KTH.reshape(patches, new_shape) if data_format == 'channels_last': patches = KTH.permute_dimensions(patches, [0, 1, 2, 4, 5, 3]) return patches def depth_to_space(input, scale, data_format=None): """Uses phase shift algorithm to convert channels/depth for spatial resolution """ if data_format is None: data_format = image_data_format() data_format = data_format.lower() input = _preprocess_conv2d_input(input, data_format) b, k, row, col = input.shape out_channels = k // (scale ** 2) x = T.reshape(input, (b, scale, scale, out_channels, row, col)) x = T.transpose(x, (0, 3, 4, 1, 5, 2)) out = T.reshape(x, (b, out_channels, row * scale, col * scale)) out = _postprocess_conv2d_output(out, input, None, None, None, data_format) return out def moments(x, axes, shift=None, keep_dims=False): ''' Calculates and returns the mean and variance of the input ''' mean_batch = KTH.mean(x, axis=axes, keepdims=keep_dims) var_batch = KTH.var(x, axis=axes, keepdims=keep_dims) return mean_batch, var_batch
keras-contrib/keras_contrib/backend/theano_backend.py/0
{ "file_path": "keras-contrib/keras_contrib/backend/theano_backend.py", "repo_id": "keras-contrib", "token_count": 2435 }
15
from keras.layers import Layer, InputSpec from keras import initializers, regularizers, constraints import keras.backend as K from keras_contrib.utils.test_utils import to_tuple class PELU(Layer): """Parametric Exponential Linear Unit. It follows: `f(x) = alphas * (exp(x / betas) - 1) for x < 0`, `f(x) = (alphas / betas) * x for x >= 0`, where `alphas` & `betas` are learned arrays with the same shape as x. # Input shape Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. # Output shape Same shape as the input. # Arguments alphas_initializer: initialization function for the alpha variable weights. betas_initializer: initialization function for the beta variable weights. weights: initial weights, as a list of a single Numpy array. shared_axes: the axes along which to share learnable parameters for the activation function. For example, if the incoming feature maps are from a 2D convolution with output shape `(batch, height, width, channels)`, and you wish to share parameters across space so that each filter only has one set of parameters, set `shared_axes=[1, 2]`. # References - [Parametric exponential linear unit for deep convolutional neural networks]( https://arxiv.org/abs/1605.09332v3) """ def __init__(self, alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None, beta_initializer='ones', beta_regularizer=None, beta_constraint=None, shared_axes=None, **kwargs): super(PELU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.beta_initializer = initializers.get(beta_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.beta_constraint = constraints.get(beta_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes) def build(self, input_shape): input_shape = to_tuple(input_shape) param_shape = list(input_shape[1:]) self.param_broadcast = [False] * len(param_shape) if self.shared_axes is not None: for i in self.shared_axes: param_shape[i - 1] = 1 self.param_broadcast[i - 1] = True param_shape = tuple(param_shape) # Initialised as ones to emulate the default ELU self.alpha = self.add_weight(shape=param_shape, name='alpha', initializer=self.alpha_initializer, regularizer=self.alpha_regularizer, constraint=self.alpha_constraint) self.beta = self.add_weight(shape=param_shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) # Set input spec axes = {} if self.shared_axes: for i in range(1, len(input_shape)): if i not in self.shared_axes: axes[i] = input_shape[i] self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) self.built = True def call(self, x, mask=None): if K.backend() == 'theano': pos = K.relu(x) * (K.pattern_broadcast(self.alpha, self.param_broadcast) / K.pattern_broadcast(self.beta, self.param_broadcast)) neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) * (K.exp((-K.relu(-x)) / K.pattern_broadcast(self.beta, self.param_broadcast)) - 1)) else: pos = K.relu(x) * self.alpha / self.beta neg = self.alpha * (K.exp((-K.relu(-x)) / self.beta) - 1) return neg + pos def get_config(self): config = { 'alpha_initializer': initializers.serialize(self.alpha_initializer), 'alpha_regularizer': regularizers.serialize(self.alpha_regularizer), 'alpha_constraint': constraints.serialize(self.alpha_constraint), 'beta_initializer': initializers.serialize(self.beta_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'shared_axes': self.shared_axes } base_config = super(PELU, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): return input_shape
keras-contrib/keras_contrib/layers/advanced_activations/pelu.py/0
{ "file_path": "keras-contrib/keras_contrib/layers/advanced_activations/pelu.py", "repo_id": "keras-contrib", "token_count": 2493 }
16
from keras import backend as K def jaccard_distance(y_true, y_pred, smooth=100): """Jaccard distance for semantic segmentation. Also known as the intersection-over-union loss. This loss is useful when you have unbalanced numbers of pixels within an image because it gives all classes equal weight. However, it is not the defacto standard for image segmentation. For example, assume you are trying to predict if each pixel is cat, dog, or background. You have 80% background pixels, 10% dog, and 10% cat. If the model predicts 100% background should it be be 80% right (as with categorical cross entropy) or 30% (with this loss)? The loss has been modified to have a smooth gradient as it converges on zero. This has been shifted so it converges on 0 and is smoothed to avoid exploding or disappearing gradient. Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) # Arguments y_true: The ground truth tensor. y_pred: The predicted tensor smooth: Smoothing factor. Default is 100. # Returns The Jaccard distance between the two tensors. # References - [What is a good evaluation measure for semantic segmentation?]( http://www.bmva.org/bmvc/2013/Papers/paper0032/paper0032.pdf) """ intersection = K.sum(K.abs(y_true * y_pred), axis=-1) sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) jac = (intersection + smooth) / (sum_ - intersection + smooth) return (1 - jac) * smooth
keras-contrib/keras_contrib/losses/jaccard.py/0
{ "file_path": "keras-contrib/keras_contrib/losses/jaccard.py", "repo_id": "keras-contrib", "token_count": 569 }
17
import keras.backend as K def conv_output_length(input_length, filter_size, padding, stride, dilation=1): """Determines output length of a convolution given input length. Copy of the function of keras-team/keras because it's not in the public API So we can't use the function in keras-team/keras to test tf.keras # Arguments input_length: integer. filter_size: integer. padding: one of `"same"`, `"valid"`, `"full"`. stride: integer. dilation: dilation rate, integer. # Returns The output length (integer). """ if input_length is None: return None assert padding in {'same', 'valid', 'full', 'causal'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding == 'same': output_length = input_length elif padding == 'valid': output_length = input_length - dilated_filter_size + 1 elif padding == 'causal': output_length = input_length elif padding == 'full': output_length = input_length + dilated_filter_size - 1 return (output_length + stride - 1) // stride def normalize_data_format(value): """Checks that the value correspond to a valid data format. Copy of the function in keras-team/keras because it's not public API. # Arguments value: String or None. `'channels_first'` or `'channels_last'`. # Returns A string, either `'channels_first'` or `'channels_last'` # Example ```python >>> from keras import backend as K >>> K.normalize_data_format(None) 'channels_first' >>> K.normalize_data_format('channels_last') 'channels_last' ``` # Raises ValueError: if `value` or the global `data_format` invalid. """ if value is None: value = K.image_data_format() data_format = value.lower() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('The `data_format` argument must be one of ' '"channels_first", "channels_last". Received: ' + str(value)) return data_format
keras-contrib/keras_contrib/utils/conv_utils.py/0
{ "file_path": "keras-contrib/keras_contrib/utils/conv_utils.py", "repo_id": "keras-contrib", "token_count": 885 }
18
import pytest from keras_contrib.utils.test_utils import layer_test from keras_contrib.layers import PELU @pytest.mark.parametrize('kwargs', [{}, {'shared_axes': 1}]) def test_pelu(kwargs): layer_test(PELU, kwargs=kwargs, input_shape=(2, 3, 4)) if __name__ == '__main__': pytest.main([__file__])
keras-contrib/tests/keras_contrib/layers/advanced_activations/test_pelu.py/0
{ "file_path": "keras-contrib/tests/keras_contrib/layers/advanced_activations/test_pelu.py", "repo_id": "keras-contrib", "token_count": 143 }
19
from __future__ import print_function from keras_contrib.tests import optimizers from keras_contrib.optimizers import Padam def test_padam(): optimizers._test_optimizer(Padam()) optimizers._test_optimizer(Padam(decay=1e-3))
keras-contrib/tests/keras_contrib/optimizers/padam_test.py/0
{ "file_path": "keras-contrib/tests/keras_contrib/optimizers/padam_test.py", "repo_id": "keras-contrib", "token_count": 80 }
20
"""Benchmark merge layers. To run benchmarks, see the following command for an example, please change the flag to your custom value: ``` python3 -m benchmarks.layer_benchmark.merge_benchmark \ --benchmark_name=benchmark_add \ --num_samples=2048 \ --batch_size=256 \ --jit_compile=True ``` """ from absl import app from absl import flags from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark FLAGS = flags.FLAGS def benchmark_add( num_samples, batch_size, jit_compile=True, ): layer_name = "Add" init_args = {} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 256], [256, 256]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) def benchmark_average( num_samples, batch_size, jit_compile=True, ): layer_name = "Average" init_args = {} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 256], [256, 256]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) def benchmark_concatenate( num_samples, batch_size, jit_compile=True, ): layer_name = "Concatenate" init_args = {} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 256], [256, 256]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) def benchmark_dot( num_samples, batch_size, jit_compile=True, ): layer_name = "Dot" init_args = {"axes": [2, 1]} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 32], [32, 64]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) def benchmark_maximum( num_samples, batch_size, jit_compile=True, ): layer_name = "Maximum" init_args = {} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 256], [256, 256]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) def benchmark_minimum( num_samples, batch_size, jit_compile=True, ): layer_name = "Minimum" init_args = {} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 256], [256, 256]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) def benchmark_multiply( num_samples, batch_size, jit_compile=True, ): layer_name = "Multiply" init_args = {} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 64], [256, 64]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) def benchmark_subtract( num_samples, batch_size, jit_compile=True, ): layer_name = "Subtract" init_args = {} benchmark = LayerBenchmark( layer_name, init_args, input_shape=[[256, 256], [256, 256]], flat_call_inputs=False, jit_compile=jit_compile, ) benchmark.benchmark_predict( num_samples=num_samples, batch_size=batch_size, ) benchmark.benchmark_train( num_samples=num_samples, batch_size=batch_size, ) BENCHMARK_NAMES = { "benchmark_add": benchmark_add, "benchmark_average": benchmark_average, "benchmark_concatenate": benchmark_concatenate, "benchmark_dot": benchmark_dot, "benchmark_maximum": benchmark_maximum, "benchmark_minimum": benchmark_minimum, "benchmark_multiply": benchmark_multiply, "benchmark_subtract": benchmark_subtract, } def main(_): benchmark_name = FLAGS.benchmark_name num_samples = FLAGS.num_samples batch_size = FLAGS.batch_size jit_compile = FLAGS.jit_compile if benchmark_name is None: for name, benchmark_fn in BENCHMARK_NAMES.items(): benchmark_fn(num_samples, batch_size, jit_compile) return if benchmark_name not in BENCHMARK_NAMES: raise ValueError( f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must " f"be one of {BENCHMARK_NAMES.keys()}" ) benchmark_fn = BENCHMARK_NAMES[benchmark_name] benchmark_fn(num_samples, batch_size, jit_compile) if __name__ == "__main__": app.run(main)
keras-core/benchmarks/layer_benchmark/merge_benchmark.py/0
{ "file_path": "keras-core/benchmarks/layer_benchmark/merge_benchmark.py", "repo_id": "keras-core", "token_count": 2682 }
21
try: # When using torch and tensorflow, torch needs to be imported first, # otherwise it will segfault upon import. This should force the torch # import to happen first for all tests. import torch # noqa: F401 except ImportError: pass import pytest from keras_core.backend import backend def pytest_configure(config): config.addinivalue_line( "markers", "requires_trainable_backend: mark test for trainable backend only", ) def pytest_collection_modifyitems(config, items): requires_trainable_backend = pytest.mark.skipif( backend() == "numpy", reason="Trainer not implemented for NumPy backend.", ) for item in items: if "requires_trainable_backend" in item.keywords: item.add_marker(requires_trainable_backend)
keras-core/conftest.py/0
{ "file_path": "keras-core/conftest.py", "repo_id": "keras-core", "token_count": 296 }
22
""" Title: WGAN-GP overriding `Model.train_step` Author: [A_K_Nain](https://twitter.com/A_K_Nain) Date created: 2020/05/9 Last modified: 2020/05/9 Description: Implementation of Wasserstein GAN with Gradient Penalty. Accelerator: GPU """ """ ## Wasserstein GAN (WGAN) with Gradient Penalty (GP) The original [Wasserstein GAN](https://arxiv.org/abs/1701.07875) leverages the Wasserstein distance to produce a value function that has better theoretical properties than the value function used in the original GAN paper. WGAN requires that the discriminator (aka the critic) lie within the space of 1-Lipschitz functions. The authors proposed the idea of weight clipping to achieve this constraint. Though weight clipping works, it can be a problematic way to enforce 1-Lipschitz constraint and can cause undesirable behavior, e.g. a very deep WGAN discriminator (critic) often fails to converge. The [WGAN-GP](https://arxiv.org/abs/1704.00028) method proposes an alternative to weight clipping to ensure smooth training. Instead of clipping the weights, the authors proposed a "gradient penalty" by adding a loss term that keeps the L2 norm of the discriminator gradients close to 1. """ """ ## Setup """ import tensorflow as tf import keras_core as keras from keras_core import layers """ ## Prepare the Fashion-MNIST data To demonstrate how to train WGAN-GP, we will be using the [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset. Each sample in this dataset is a 28x28 grayscale image associated with a label from 10 classes (e.g. trouser, pullover, sneaker, etc.) """ IMG_SHAPE = (28, 28, 1) BATCH_SIZE = 512 # Size of the noise vector noise_dim = 128 fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), ( test_images, test_labels, ) = fashion_mnist.load_data() print(f"Number of examples: {len(train_images)}") print(f"Shape of the images in the dataset: {train_images.shape[1:]}") # Reshape each sample to (28, 28, 1) and normalize the pixel values in the [-1, 1] range train_images = train_images.reshape(train_images.shape[0], *IMG_SHAPE).astype( "float32" ) train_images = (train_images - 127.5) / 127.5 """ ## Create the discriminator (the critic in the original WGAN) The samples in the dataset have a (28, 28, 1) shape. Because we will be using strided convolutions, this can result in a shape with odd dimensions. For example, `(28, 28) -> Conv_s2 -> (14, 14) -> Conv_s2 -> (7, 7) -> Conv_s2 ->(3, 3)`. While peforming upsampling in the generator part of the network, we won't get the same input shape as the original images if we aren't careful. To avoid this, we will do something much simpler: - In the discriminator: "zero pad" the input to change the shape to `(32, 32, 1)` for each sample; and - Ihe generator: crop the final output to match the shape with input shape. """ def conv_block( x, filters, activation, kernel_size=(3, 3), strides=(1, 1), padding="same", use_bias=True, use_bn=False, use_dropout=False, drop_value=0.5, ): x = layers.Conv2D( filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, )(x) if use_bn: x = layers.BatchNormalization()(x) x = activation(x) if use_dropout: x = layers.Dropout(drop_value)(x) return x def get_discriminator_model(): img_input = layers.Input(shape=IMG_SHAPE) # Zero pad the input to make the input images size to (32, 32, 1). x = layers.ZeroPadding2D((2, 2))(img_input) x = conv_block( x, 64, kernel_size=(5, 5), strides=(2, 2), use_bn=False, use_bias=True, activation=layers.LeakyReLU(0.2), use_dropout=False, drop_value=0.3, ) x = conv_block( x, 128, kernel_size=(5, 5), strides=(2, 2), use_bn=False, activation=layers.LeakyReLU(0.2), use_bias=True, use_dropout=True, drop_value=0.3, ) x = conv_block( x, 256, kernel_size=(5, 5), strides=(2, 2), use_bn=False, activation=layers.LeakyReLU(0.2), use_bias=True, use_dropout=True, drop_value=0.3, ) x = conv_block( x, 512, kernel_size=(5, 5), strides=(2, 2), use_bn=False, activation=layers.LeakyReLU(0.2), use_bias=True, use_dropout=False, drop_value=0.3, ) x = layers.Flatten()(x) x = layers.Dropout(0.2)(x) x = layers.Dense(1)(x) d_model = keras.models.Model(img_input, x, name="discriminator") return d_model d_model = get_discriminator_model() d_model.summary() """ ## Create the generator """ def upsample_block( x, filters, activation, kernel_size=(3, 3), strides=(1, 1), up_size=(2, 2), padding="same", use_bn=False, use_bias=True, use_dropout=False, drop_value=0.3, ): x = layers.UpSampling2D(up_size)(x) x = layers.Conv2D( filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, )(x) if use_bn: x = layers.BatchNormalization()(x) if activation: x = activation(x) if use_dropout: x = layers.Dropout(drop_value)(x) return x def get_generator_model(): noise = layers.Input(shape=(noise_dim,)) x = layers.Dense(4 * 4 * 256, use_bias=False)(noise) x = layers.BatchNormalization()(x) x = layers.LeakyReLU(0.2)(x) x = layers.Reshape((4, 4, 256))(x) x = upsample_block( x, 128, layers.LeakyReLU(0.2), strides=(1, 1), use_bias=False, use_bn=True, padding="same", use_dropout=False, ) x = upsample_block( x, 64, layers.LeakyReLU(0.2), strides=(1, 1), use_bias=False, use_bn=True, padding="same", use_dropout=False, ) x = upsample_block( x, 1, layers.Activation("tanh"), strides=(1, 1), use_bias=False, use_bn=True, ) # At this point, we have an output which has the same shape as the input, (32, 32, 1). # We will use a Cropping2D layer to make it (28, 28, 1). x = layers.Cropping2D((2, 2))(x) g_model = keras.models.Model(noise, x, name="generator") return g_model g_model = get_generator_model() g_model.summary() """ ## Create the WGAN-GP model Now that we have defined our generator and discriminator, it's time to implement the WGAN-GP model. We will also override the `train_step` for training. """ class WGAN(keras.Model): def __init__( self, discriminator, generator, latent_dim, discriminator_extra_steps=3, gp_weight=10.0, ): super().__init__() self.discriminator = discriminator self.generator = generator self.latent_dim = latent_dim self.d_steps = discriminator_extra_steps self.gp_weight = gp_weight def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn): super().compile() self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.d_loss_fn = d_loss_fn self.g_loss_fn = g_loss_fn def gradient_penalty(self, batch_size, real_images, fake_images): """Calculates the gradient penalty. This loss is calculated on an interpolated image and added to the discriminator loss. """ # Get the interpolated image alpha = tf.random.normal([batch_size, 1, 1, 1], 0.0, 1.0) diff = fake_images - real_images interpolated = real_images + alpha * diff with tf.GradientTape() as gp_tape: gp_tape.watch(interpolated) # 1. Get the discriminator output for this interpolated image. pred = self.discriminator(interpolated, training=True) # 2. Calculate the gradients w.r.t to this interpolated image. grads = gp_tape.gradient(pred, [interpolated])[0] # 3. Calculate the norm of the gradients. norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3])) gp = tf.reduce_mean((norm - 1.0) ** 2) return gp def train_step(self, real_images): if isinstance(real_images, tuple): real_images = real_images[0] # Get the batch size batch_size = tf.shape(real_images)[0] # For each batch, we are going to perform the # following steps as laid out in the original paper: # 1. Train the generator and get the generator loss # 2. Train the discriminator and get the discriminator loss # 3. Calculate the gradient penalty # 4. Multiply this gradient penalty with a constant weight factor # 5. Add the gradient penalty to the discriminator loss # 6. Return the generator and discriminator losses as a loss dictionary # Train the discriminator first. The original paper recommends training # the discriminator for `x` more steps (typically 5) as compared to # one step of the generator. Here we will train it for 3 extra steps # as compared to 5 to reduce the training time. for i in range(self.d_steps): # Get the latent vector random_latent_vectors = tf.random.normal( shape=(batch_size, self.latent_dim) ) with tf.GradientTape() as tape: # Generate fake images from the latent vector fake_images = self.generator( random_latent_vectors, training=True ) # Get the logits for the fake images fake_logits = self.discriminator(fake_images, training=True) # Get the logits for the real images real_logits = self.discriminator(real_images, training=True) # Calculate the discriminator loss using the fake and real image logits d_cost = self.d_loss_fn( real_img=real_logits, fake_img=fake_logits ) # Calculate the gradient penalty gp = self.gradient_penalty(batch_size, real_images, fake_images) # Add the gradient penalty to the original discriminator loss d_loss = d_cost + gp * self.gp_weight # Get the gradients w.r.t the discriminator loss d_gradient = tape.gradient( d_loss, self.discriminator.trainable_variables ) # Update the weights of the discriminator using the discriminator optimizer self.d_optimizer.apply_gradients( zip(d_gradient, self.discriminator.trainable_variables) ) # Train the generator # Get the latent vector random_latent_vectors = tf.random.normal( shape=(batch_size, self.latent_dim) ) with tf.GradientTape() as tape: # Generate fake images using the generator generated_images = self.generator( random_latent_vectors, training=True ) # Get the discriminator logits for fake images gen_img_logits = self.discriminator(generated_images, training=True) # Calculate the generator loss g_loss = self.g_loss_fn(gen_img_logits) # Get the gradients w.r.t the generator loss gen_gradient = tape.gradient(g_loss, self.generator.trainable_variables) # Update the weights of the generator using the generator optimizer self.g_optimizer.apply_gradients( zip(gen_gradient, self.generator.trainable_variables) ) return {"d_loss": d_loss, "g_loss": g_loss} """ ## Create a Keras callback that periodically saves generated images """ class GANMonitor(keras.callbacks.Callback): def __init__(self, num_img=6, latent_dim=128): self.num_img = num_img self.latent_dim = latent_dim def on_epoch_end(self, epoch, logs=None): random_latent_vectors = tf.random.normal( shape=(self.num_img, self.latent_dim) ) generated_images = self.model.generator(random_latent_vectors) generated_images = (generated_images * 127.5) + 127.5 for i in range(self.num_img): img = generated_images[i].numpy() img = keras.utils.array_to_img(img) img.save("generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch)) """ ## Train the end-to-end model """ # Instantiate the optimizer for both networks # (learning_rate=0.0002, beta_1=0.5 are recommended) generator_optimizer = keras.optimizers.Adam( learning_rate=0.0002, beta_1=0.5, beta_2=0.9 ) discriminator_optimizer = keras.optimizers.Adam( learning_rate=0.0002, beta_1=0.5, beta_2=0.9 ) # Define the loss functions for the discriminator, # which should be (fake_loss - real_loss). # We will add the gradient penalty later to this loss function. def discriminator_loss(real_img, fake_img): real_loss = tf.reduce_mean(real_img) fake_loss = tf.reduce_mean(fake_img) return fake_loss - real_loss # Define the loss functions for the generator. def generator_loss(fake_img): return -tf.reduce_mean(fake_img) # Set the number of epochs for trainining. epochs = 20 # Instantiate the customer `GANMonitor` Keras callback. cbk = GANMonitor(num_img=3, latent_dim=noise_dim) # Get the wgan model wgan = WGAN( discriminator=d_model, generator=g_model, latent_dim=noise_dim, discriminator_extra_steps=3, ) # Compile the wgan model wgan.compile( d_optimizer=discriminator_optimizer, g_optimizer=generator_optimizer, g_loss_fn=generator_loss, d_loss_fn=discriminator_loss, ) # Start training wgan.fit(train_images, batch_size=BATCH_SIZE, epochs=epochs, callbacks=[cbk]) """ Display the last generated images: """ from IPython.display import Image, display display(Image("generated_img_0_19.png")) display(Image("generated_img_1_19.png")) display(Image("generated_img_2_19.png")) """ Example available on HuggingFace. | Trained Model | Demo | | :--: | :--: | | [![Generic badge](https://img.shields.io/badge/🤗%20Model-WGAN%20GP-black.svg)](https://huggingface.co/keras-io/WGAN-GP) | [![Generic badge](https://img.shields.io/badge/🤗%20Spaces-WGAN%20GP-black.svg)](https://huggingface.co/spaces/keras-io/WGAN-GP) | """
keras-core/examples/keras_io/tensorflow/generative/wgan_gp.py/0
{ "file_path": "keras-core/examples/keras_io/tensorflow/generative/wgan_gp.py", "repo_id": "keras-core", "token_count": 6216 }
23
import keras_core from keras_core.utils import plot_model def plot_sequential_model(): model = keras_core.Sequential( [ keras_core.Input((3,)), keras_core.layers.Dense(4, activation="relu"), keras_core.layers.Dense(1, activation="sigmoid"), ] ) plot_model(model, "sequential.png") plot_model(model, "sequential-show_shapes.png", show_shapes=True) plot_model( model, "sequential-show_shapes-show_dtype.png", show_shapes=True, show_dtype=True, ) plot_model( model, "sequential-show_shapes-show_dtype-show_layer_names.png", show_shapes=True, show_dtype=True, show_layer_names=True, ) plot_model( model, "sequential-show_shapes-show_dtype-show_layer_names-show_layer_activations.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, ) plot_model( model, "sequential-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, ) plot_model( model, "sequential-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable-LR.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, rankdir="LR", ) plot_model( model, "sequential-show_layer_activations-show_trainable.png", show_layer_activations=True, show_trainable=True, ) def plot_functional_model(): inputs = keras_core.Input((3,)) x = keras_core.layers.Dense(4, activation="relu", trainable=False)(inputs) residual = x x = keras_core.layers.Dense(4, activation="relu")(x) x = keras_core.layers.Dense(4, activation="relu")(x) x = keras_core.layers.Dense(4, activation="relu")(x) x += residual residual = x x = keras_core.layers.Dense(4, activation="relu")(x) x = keras_core.layers.Dense(4, activation="relu")(x) x = keras_core.layers.Dense(4, activation="relu")(x) x += residual x = keras_core.layers.Dropout(0.5)(x) outputs = keras_core.layers.Dense(1, activation="sigmoid")(x) model = keras_core.Model(inputs, outputs) plot_model(model, "functional.png") plot_model(model, "functional-show_shapes.png", show_shapes=True) plot_model( model, "functional-show_shapes-show_dtype.png", show_shapes=True, show_dtype=True, ) plot_model( model, "functional-show_shapes-show_dtype-show_layer_names.png", show_shapes=True, show_dtype=True, show_layer_names=True, ) plot_model( model, "functional-show_shapes-show_dtype-show_layer_names-show_layer_activations.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, ) plot_model( model, "functional-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, ) plot_model( model, "functional-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable-LR.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, rankdir="LR", ) plot_model( model, "functional-show_layer_activations-show_trainable.png", show_layer_activations=True, show_trainable=True, ) plot_model( model, "functional-show_shapes-show_layer_activations-show_trainable.png", show_shapes=True, show_layer_activations=True, show_trainable=True, ) def plot_subclassed_model(): class MyModel(keras_core.Model): def __init__(self, **kwargs): super().__init__(**kwargs) self.dense_1 = keras_core.layers.Dense(3, activation="relu") self.dense_2 = keras_core.layers.Dense(1, activation="sigmoid") def call(self, x): return self.dense_2(self.dense_1(x)) model = MyModel() model.build((None, 3)) plot_model(model, "subclassed.png") plot_model(model, "subclassed-show_shapes.png", show_shapes=True) plot_model( model, "subclassed-show_shapes-show_dtype.png", show_shapes=True, show_dtype=True, ) plot_model( model, "subclassed-show_shapes-show_dtype-show_layer_names.png", show_shapes=True, show_dtype=True, show_layer_names=True, ) plot_model( model, "subclassed-show_shapes-show_dtype-show_layer_names-show_layer_activations.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, ) plot_model( model, "subclassed-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, ) plot_model( model, "subclassed-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable-LR.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, rankdir="LR", ) plot_model( model, "subclassed-show_layer_activations-show_trainable.png", show_layer_activations=True, show_trainable=True, ) plot_model( model, "subclassed-show_shapes-show_layer_activations-show_trainable.png", show_shapes=True, show_layer_activations=True, show_trainable=True, ) def plot_nested_functional_model(): inputs = keras_core.Input((3,)) x = keras_core.layers.Dense(4, activation="relu")(inputs) x = keras_core.layers.Dense(4, activation="relu")(x) outputs = keras_core.layers.Dense(3, activation="relu")(x) inner_model = keras_core.Model(inputs, outputs) inputs = keras_core.Input((3,)) x = keras_core.layers.Dense(3, activation="relu", trainable=False)(inputs) residual = x x = inner_model(x) x += residual residual = x x = keras_core.layers.Dense(4, activation="relu")(x) x = keras_core.layers.Dense(4, activation="relu")(x) x = keras_core.layers.Dense(3, activation="relu")(x) x += residual x = keras_core.layers.Dropout(0.5)(x) outputs = keras_core.layers.Dense(1, activation="sigmoid")(x) model = keras_core.Model(inputs, outputs) plot_model(model, "nested-functional.png", expand_nested=True) plot_model( model, "nested-functional-show_shapes.png", show_shapes=True, expand_nested=True, ) plot_model( model, "nested-functional-show_shapes-show_dtype.png", show_shapes=True, show_dtype=True, expand_nested=True, ) plot_model( model, "nested-functional-show_shapes-show_dtype-show_layer_names.png", show_shapes=True, show_dtype=True, show_layer_names=True, expand_nested=True, ) plot_model( model, "nested-functional-show_shapes-show_dtype-show_layer_names-show_layer_activations.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, expand_nested=True, ) plot_model( model, "nested-functional-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, expand_nested=True, ) plot_model( model, "nested-functional-show_shapes-show_dtype-show_layer_names-show_layer_activations-show_trainable-LR.png", # noqa: E501 show_shapes=True, show_dtype=True, show_layer_names=True, show_layer_activations=True, show_trainable=True, rankdir="LR", expand_nested=True, ) plot_model( model, "nested-functional-show_layer_activations-show_trainable.png", show_layer_activations=True, show_trainable=True, expand_nested=True, ) plot_model( model, "nested-functional-show_shapes-show_layer_activations-show_trainable.png", # noqa: E501 show_shapes=True, show_layer_activations=True, show_trainable=True, expand_nested=True, ) if __name__ == "__main__": plot_sequential_model() plot_functional_model() plot_subclassed_model() plot_nested_functional_model()
keras-core/integration_tests/model_visualization_test.py/0
{ "file_path": "keras-core/integration_tests/model_visualization_test.py", "repo_id": "keras-core", "token_count": 4565 }
24
import threading from keras_core import backend from keras_core.api_export import keras_core_export GLOBAL_STATE_TRACKER = threading.local() GLOBAL_SETTINGS_TRACKER = threading.local() def set_global_attribute(name, value): setattr(GLOBAL_STATE_TRACKER, name, value) def get_global_attribute(name, default=None, set_to_default=False): attr = getattr(GLOBAL_STATE_TRACKER, name, None) if attr is None and default is not None: attr = default if set_to_default: set_global_attribute(name, attr) return attr @keras_core_export( ["keras_core.utils.clear_session", "keras_core.backend.clear_session"] ) def clear_session(): """Resets all state generated by Keras. Keras manages a global state, which it uses to implement the Functional model-building API and to uniquify autogenerated layer names. If you are creating many models in a loop, this global state will consume an increasing amount of memory over time, and you may want to clear it. Calling `clear_session()` releases the global state: this helps avoid clutter from old models and layers, especially when memory is limited. Example 1: calling `clear_session()` when creating models in a loop ```python for _ in range(100): # Without `clear_session()`, each iteration of this loop will # slightly increase the size of the global state managed by Keras model = keras_core.Sequential([ keras_core.layers.Dense(10) for _ in range(10)]) for _ in range(100): # With `clear_session()` called at the beginning, # Keras starts with a blank state at each iteration # and memory consumption is constant over time. keras_core.backend.clear_session() model = keras_core.Sequential([ keras_core.layers.Dense(10) for _ in range(10)]) ``` Example 2: resetting the layer name generation counter >>> layers = [keras_core.layers.Dense(10) for _ in range(10)] >>> new_layer = keras_core.layers.Dense(10) >>> print(new_layer.name) dense_10 >>> keras_core.backend.clear_session() >>> new_layer = keras_core.layers.Dense(10) >>> print(new_layer.name) dense """ global GLOBAL_STATE_TRACKER global GLOBAL_SETTINGS_TRACKER GLOBAL_STATE_TRACKER = threading.local() GLOBAL_SETTINGS_TRACKER = threading.local() if backend.backend() == "tensorflow": from keras_core.utils.module_utils import tensorflow as tf tf.compat.v1.reset_default_graph() if tf.executing_eagerly(): # Clear pending nodes in eager executors, kernel caches and # step_containers. from tensorflow.python.eager import context context.context().clear_kernel_cache()
keras-core/keras_core/backend/common/global_state.py/0
{ "file_path": "keras-core/keras_core/backend/common/global_state.py", "repo_id": "keras-core", "token_count": 1033 }
25
class JaxLayer: pass
keras-core/keras_core/backend/jax/layer.py/0
{ "file_path": "keras-core/keras_core/backend/jax/layer.py", "repo_id": "keras-core", "token_count": 11 }
26
import numpy as np import tree from keras_core import backend from keras_core import callbacks as callbacks_module from keras_core.backend.common import standardize_dtype from keras_core.backend.common.keras_tensor import KerasTensor from keras_core.backend.numpy.core import is_tensor from keras_core.trainers import trainer as base_trainer from keras_core.trainers.data_adapters import data_adapter_utils from keras_core.trainers.epoch_iterator import EpochIterator from keras_core.utils import traceback_utils class NumpyTrainer(base_trainer.Trainer): def __init__(self): super().__init__() self.test_function = None self.predict_function = None def test_step(self, data): ( x, y, sample_weight, ) = data_adapter_utils.unpack_x_y_sample_weight(data) if self._call_has_training_arg: y_pred = self(x, training=False) else: y_pred = self(x) loss = self.compute_loss( x=x, y=y, y_pred=y_pred, sample_weight=sample_weight ) self._loss_tracker.update_state(loss) return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight) def predict_step(self, data): x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data) if self._call_has_training_arg: y_pred = self(x, training=False) else: y_pred = self(x) return y_pred def make_test_function(self, force=False): if self.test_function is not None and not force: return self.test_function def one_test_step(data): data = data[0] return self.test_step(data) def multi_test_steps(data): for single_step_data in data: logs = one_test_step([single_step_data]) return logs if self.steps_per_execution > 1: test_step = multi_test_steps else: test_step = one_test_step self.test_function = test_step def make_predict_function(self, force=False): if self.predict_function is not None and not force: return self.predict_function def one_predict_step(data): data = data[0] return self.predict_step(data) def multi_predict_steps(data): outputs = one_predict_step(data[:1]) for single_step_data in data[1:]: step_outputs = one_predict_step([single_step_data]) outputs = tree.map_structure( lambda t1, t2: np.concatenate([t1, t2]), outputs, step_outputs, ) return outputs if self.steps_per_execution > 1: predict_step = multi_predict_steps else: predict_step = one_predict_step self.predict_function = predict_step def _symbolic_build(self, data_batch): model_unbuilt = not all(layer.built for layer in self._flatten_layers()) compile_metrics_unbuilt = ( self._compile_metrics is not None and not self._compile_metrics.built ) if model_unbuilt or compile_metrics_unbuilt: # Create symbolic tensors matching an input batch. def to_symbolic_input(v): if is_tensor(v): return KerasTensor(v.shape, standardize_dtype(v.dtype)) return v data_batch = tree.map_structure(to_symbolic_input, data_batch) ( x, y, sample_weight, ) = data_adapter_utils.unpack_x_y_sample_weight(data_batch) # Build all model state with `backend.compute_output_spec`. try: y_pred = backend.compute_output_spec(self, x) except: raise RuntimeError( "Unable to automatically build the model. " "Please build it yourself before calling " "fit/evaluate/predict. " "A model is 'built' when its variables have " "been created and its `self.built` attribute " "is True. Usually, calling the model on a batch " "of data is the right way to build it." ) if compile_metrics_unbuilt: # Build all metric state with `backend.compute_output_spec`. backend.compute_output_spec( self.compute_metrics, x, y, y_pred, sample_weight=sample_weight, ) self._post_build() def fit( self, x=None, y=None, batch_size=None, epochs=1, verbose="auto", callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_batch_size=None, validation_freq=1, ): raise NotImplementedError("fit not implemented for NumPy backend.") @traceback_utils.filter_traceback def predict( self, x, batch_size=None, verbose="auto", steps=None, callbacks=None ): # Create an iterator that yields batches of input data. epoch_iterator = EpochIterator( x=x, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution, ) # Container that configures and calls callbacks. if not isinstance(callbacks, callbacks_module.CallbackList): callbacks = callbacks_module.CallbackList( callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self, ) def append_to_outputs(batch_outputs, outputs): if outputs is None: outputs = tree.map_structure( lambda batch_output: [batch_output], batch_outputs, ) else: tree.map_structure_up_to( batch_outputs, lambda output, batch_output: output.append(batch_output), outputs, batch_outputs, ) return outputs self.make_predict_function() callbacks.on_predict_begin() outputs = None for step, data in epoch_iterator.enumerate_epoch(return_type="np"): callbacks.on_predict_batch_begin(step) batch_outputs = self.predict_function(data) outputs = append_to_outputs(batch_outputs, outputs) callbacks.on_predict_batch_end(step, {"outputs": batch_outputs}) callbacks.on_predict_end() return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs) @traceback_utils.filter_traceback def evaluate( self, x=None, y=None, batch_size=None, verbose="auto", sample_weight=None, steps=None, callbacks=None, return_dict=False, **kwargs, ): # TODO: respect compiled trainable state use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False) if kwargs: raise ValueError(f"Arguments not recognized: {kwargs}") if use_cached_eval_dataset: epoch_iterator = self._eval_epoch_iterator else: # Create an iterator that yields batches of input/target data. epoch_iterator = EpochIterator( x=x, y=y, sample_weight=sample_weight, batch_size=batch_size, steps_per_epoch=steps, shuffle=False, steps_per_execution=self.steps_per_execution, ) if not all(layer.built for layer in self._flatten_layers()): # Build the model on one batch of data. for _, data in epoch_iterator.enumerate_epoch(return_type="np"): data_batch = data[0] self._symbolic_build(data_batch) break # Container that configures and calls callbacks. if not isinstance(callbacks, callbacks_module.CallbackList): callbacks = callbacks_module.CallbackList( callbacks, add_history=True, add_progbar=verbose != 0, verbose=verbose, epochs=1, steps=epoch_iterator.num_batches, model=self, ) self.make_test_function() callbacks.on_test_begin() logs = None self.reset_metrics() for step, data in epoch_iterator.enumerate_epoch(return_type="np"): callbacks.on_test_batch_begin(step) logs = self.test_function(data) callbacks.on_test_batch_end(step, self._pythonify_logs(logs)) logs = self.get_metrics_result() callbacks.on_test_end(logs) if return_dict: return logs return self._flatten_metrics_in_order(logs) def train_on_batch( self, x, y=None, sample_weight=None, class_weight=None, return_dict=False, ): raise NotImplementedError( "train_on_batch not implemented for NumPy backend." ) def test_on_batch( self, x, y=None, sample_weight=None, return_dict=False, ): self._assert_compile_called("test_on_batch") data = (x, y, sample_weight) # Maybe build model self._symbolic_build(data) self.make_test_function() logs = self.test_function([data]) logs = tree.map_structure(lambda x: np.array(x), logs) if return_dict: return logs return self._flatten_metrics_in_order(logs) def predict_on_batch(self, x): self.make_predict_function() batch_outputs = self.predict_function([(x,)]) batch_outputs = tree.map_structure( backend.convert_to_numpy, batch_outputs ) return batch_outputs
keras-core/keras_core/backend/numpy/trainer.py/0
{ "file_path": "keras-core/keras_core/backend/numpy/trainer.py", "repo_id": "keras-core", "token_count": 5339 }
27
import warnings from keras_core import ops from keras_core.api_export import keras_core_export from keras_core.callbacks.callback import Callback from keras_core.utils import io_utils @keras_core_export("keras_core.callbacks.EarlyStopping") class EarlyStopping(Callback): """Stop training when a monitored metric has stopped improving. Assuming the goal of a training is to minimize the loss. With this, the metric to be monitored would be `'loss'`, and mode would be `'min'`. A `model.fit()` training loop will check at end of every epoch whether the loss is no longer decreasing, considering the `min_delta` and `patience` if applicable. Once it's found no longer decreasing, `model.stop_training` is marked True and the training terminates. The quantity to be monitored needs to be available in `logs` dict. To make it so, pass the loss or metrics at `model.compile()`. Args: monitor: Quantity to be monitored. Defaults to `"val_loss"`. min_delta: Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement. Defaults to `0`. patience: Number of epochs with no improvement after which training will be stopped. Defaults to `0`. verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1 displays messages when the callback takes an action. Defaults to `0`. mode: One of `{"auto", "min", "max"}`. In `min` mode, training will stop when the quantity monitored has stopped decreasing; in `"max"` mode it will stop when the quantity monitored has stopped increasing; in `"auto"` mode, the direction is automatically inferred from the name of the monitored quantity. Defaults to `"auto"`. baseline: Baseline value for the monitored quantity. If not `None`, training will stop if the model doesn't show improvement over the baseline. Defaults to `None`. restore_best_weights: Whether to restore model weights from the epoch with the best value of the monitored quantity. If `False`, the model weights obtained at the last step of training are used. An epoch will be restored regardless of the performance relative to the `baseline`. If no epoch improves on `baseline`, training will run for `patience` epochs and restore weights from the best epoch in that set. Defaults to `False`. start_from_epoch: Number of epochs to wait before starting to monitor improvement. This allows for a warm-up period in which no improvement is expected and thus training will not be stopped. Defaults to `0`. Example: >>> callback = keras_core.callbacks.EarlyStopping(monitor='loss', ... patience=3) >>> # This callback will stop the training when there is no improvement in >>> # the loss for three consecutive epochs. >>> model = keras_core.models.Sequential([keras_core.layers.Dense(10)]) >>> model.compile(keras_core.optimizers.SGD(), loss='mse') >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=10, batch_size=1, callbacks=[callback], ... verbose=0) >>> len(history.history['loss']) # Only 4 epochs are run. 4 """ def __init__( self, monitor="val_loss", min_delta=0, patience=0, verbose=0, mode="auto", baseline=None, restore_best_weights=False, start_from_epoch=0, ): super().__init__() self.monitor = monitor self.patience = patience self.verbose = verbose self.baseline = baseline self.min_delta = abs(min_delta) self.wait = 0 self.stopped_epoch = 0 self.restore_best_weights = restore_best_weights self.best_weights = None self.start_from_epoch = start_from_epoch if mode not in ["auto", "min", "max"]: warnings.warn( f"EarlyStopping mode {mode} is unknown, fallback to auto mode.", stacklevel=2, ) mode = "auto" if mode == "min": self.monitor_op = ops.less elif mode == "max": self.monitor_op = ops.greater else: if ( self.monitor.endswith("acc") or self.monitor.endswith("accuracy") or self.monitor.endswith("auc") ): self.monitor_op = ops.greater else: self.monitor_op = ops.less if self.monitor_op == ops.greater: self.min_delta *= 1 else: self.min_delta *= -1 def on_train_begin(self, logs=None): # Allow instances to be re-used self.wait = 0 self.stopped_epoch = 0 self.best = ( float("inf") if self.monitor_op == ops.less else -float("inf") ) self.best_weights = None self.best_epoch = 0 def on_epoch_end(self, epoch, logs=None): current = self.get_monitor_value(logs) if current is None or epoch < self.start_from_epoch: # If no monitor value exists or still in initial warm-up stage. return if self.restore_best_weights and self.best_weights is None: # Restore the weights after first epoch if no progress is ever made. self.best_weights = self.model.get_weights() self.wait += 1 if self._is_improvement(current, self.best): self.best = current self.best_epoch = epoch if self.restore_best_weights: self.best_weights = self.model.get_weights() # Only restart wait if we beat both the baseline and our previous # best. if self.baseline is None or self._is_improvement( current, self.baseline ): self.wait = 0 return # Only check after the first epoch. if self.wait >= self.patience and epoch > 0: self.stopped_epoch = epoch self.model.stop_training = True if self.restore_best_weights and self.best_weights is not None: if self.verbose > 0: io_utils.print_msg( "Restoring model weights from " "the end of the best epoch: " f"{self.best_epoch + 1}." ) self.model.set_weights(self.best_weights) def on_train_end(self, logs=None): if self.stopped_epoch > 0 and self.verbose > 0: io_utils.print_msg( f"Epoch {self.stopped_epoch + 1}: early stopping" ) def get_monitor_value(self, logs): logs = logs or {} monitor_value = logs.get(self.monitor) if monitor_value is None: warnings.warn( ( f"Early stopping conditioned on metric `{self.monitor}` " "which is not available. " f"Available metrics are: {','.join(list(logs.keys()))}" ), stacklevel=2, ) return monitor_value def _is_improvement(self, monitor_value, reference_value): return self.monitor_op(monitor_value - self.min_delta, reference_value)
keras-core/keras_core/callbacks/early_stopping.py/0
{ "file_path": "keras-core/keras_core/callbacks/early_stopping.py", "repo_id": "keras-core", "token_count": 3341 }
28
import numpy as np from keras_core.api_export import keras_core_export from keras_core.callbacks.callback import Callback from keras_core.utils import io_utils @keras_core_export("keras_core.callbacks.TerminateOnNaN") class TerminateOnNaN(Callback): """Callback that terminates training when a NaN loss is encountered.""" def on_batch_end(self, batch, logs=None): logs = logs or {} loss = logs.get("loss") if loss is not None: if np.isnan(loss) or np.isinf(loss): io_utils.print_msg( f"Batch {batch}: Invalid loss, terminating training" ) self.model.stop_training = True
keras-core/keras_core/callbacks/terminate_on_nan.py/0
{ "file_path": "keras-core/keras_core/callbacks/terminate_on_nan.py", "repo_id": "keras-core", "token_count": 292 }
29
"""Unified high level distribution APIs across backends. !!!DO NOT USE!!! Currently under development and APIs are not final. Currently only the JAX backend has been implemented. The TensorFlow backend will be implemented in the future (via tf.dtensor API). """ import collections import contextlib import re import warnings import numpy as np from keras_core.api_export import keras_core_export from keras_core.backend import distribution_lib from keras_core.backend.common import global_state DEFAULT_BATCH_DIM_NAME = "batch" GLOBAL_ATTRIBUTE_NAME = "distribution" @keras_core_export("keras_core.distribution.list_devices") def list_devices(device_type=None): """Return all the available devices based on the device type. Note: in a distributed setting, global devices are returned. Args: device_type: string, one of `"cpu"`, `"gpu"` or `"tpu"`. Defaults to `"gpu"` or `"tpu"` if available when `device_type` is not provided. Otherwise will return the `"cpu"` devices. Return: List of devices that are available for distribute computation. """ return distribution_lib.list_devices(device_type) @keras_core_export("keras_core.distribution.DeviceMesh") class DeviceMesh: """A cluster of computation devices for distributed computation. This API is aligned with `jax.sharding.Mesh` and `tf.dtensor.Mesh`, which represents the computation devices in the global context. See more details in [jax.sharding.Mesh]( https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.Mesh) and [tf.dtensor.Mesh]( https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Mesh). Args: shape: tuple of list of integers. The shape of the overall `DeviceMesh`, e.g. `(8,)` for a data parallel only distribution, or `(4, 2)` for a model+data parallel distribution. axis_names: List of string. The logical name of the each axis for the `DeviceMesh`. The length of the `axis_names` should match to the rank of the `shape`. The `axis_names` will be used to match/create the `TensorLayout` when distribute the data and variables. devices: Optional list of devices. Defaults to all the available devices locally from `keras_core.distribution.list_devices()`. """ def __init__( self, shape, axis_names, devices=None, ): if not shape or not axis_names: raise ValueError( "Shape and axis_names cannot be empty. Received: " f"shape={shape}, axis_names={axis_names}" ) if len(shape) != len(axis_names): raise ValueError( "Shape and axis_names should have same size. " f"Received: shape={shape}, axis_names={axis_names}" ) if devices is None: devices = list_devices() devices = np.array(devices) if np.prod(shape) != np.prod(devices.shape): raise ValueError( "Shape does not match the number of devices. " f"Received: shape={shape}; devices.shape=" f"{devices.shape}" ) self._shape = shape self._axis_names = axis_names self._devices = np.reshape(devices, shape) @property def shape(self): return self._shape @property def axis_names(self): return self._axis_names @property def devices(self): return self._devices @keras_core_export("keras_core.distribution.TensorLayout") class TensorLayout: """A layout to apply to a tensor. This API is aligned with `jax.sharding.NamedSharding` and `tf.dtensor.Layout`. See more details in [jax.sharding.NamedSharding]( https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.NamedSharding) and [tf.dtensor.Layout]( https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Layout). Args: axes: list of strings that should map to the `axis_names` in a `DeviceMesh`. For any dimentions that doesn't need any sharding, A `None` can be used a placeholder. device_mesh: Optional `DeviceMesh` that will be used to create the layout. The actual mapping of tensor to physical device is not known until the mesh is specified. """ def __init__(self, axes, device_mesh=None): self._axes = axes self._device_mesh = device_mesh self._validate_axes() @property def axes(self): return self._axes @property def device_mesh(self): return self._device_mesh @device_mesh.setter def device_mesh(self, device_mesh): if self._device_mesh is not None: raise ValueError( "Cannot override device mesh value. Existing " f"value is {self._device_mesh}" ) self._device_mesh = device_mesh self._validate_axes() def _validate_axes(self): if self._device_mesh: valid_axis_names = set(self._device_mesh.axis_names) axis_names = set(self._axes) - set([None]) if axis_names - valid_axis_names: raise ValueError( "Invalid axis names for Layout. Valid axis " f"names: {valid_axis_names}, Got {axis_names}" ) class Distribution: """Base class for variable distribution strategies. A `Distribution` has following key functionalities: 1. Distribute the model variables to a `DeviceMesh`. 2. Distribute the input data to a `DeviceMesh`. It can create a context scope so that the framework to properly detect the `Distribution` and distribute the variable/data accordingly. Args: device_mesh: A `DeviceMesh` instance. """ def __init__(self, device_mesh): self._device_mesh = device_mesh def get_data_layout(self, data_shape): """Retrieve the `TensorLayout` for the input data. Args: data_shape: shape for the input data in list or tuple format. Returns: The `TensorLayout` for the data, which can be used by `backend.distribute_value()` to redistribute a input data. """ raise NotImplementedError() def get_variable_layout(self, variable): """Retrieve the `TensorLayout` for the variable. Args: variable: A `KerasVariable` instance. return: The `TensorLayout` for the variable, which can be used by `backend.distribute_value()` to redistribute a variable. """ raise NotImplementedError() @contextlib.contextmanager def scope(self): """Context manager to make the `Distribution` current.""" original_scope = distribution() set_distribution(self) try: yield finally: set_distribution(original_scope) @property def device_mesh(self): return self._device_mesh @keras_core_export("keras_core.distribution.DataParallel") class DataParallel(Distribution): """Distribution for data parallelism. You can choose to create this instance by either specifing the `device_mesh` or `devices` arguments (but not both). The `device_mesh` argument is expected to be a `DeviceMesh` instance, and is expected to be 1D only. In case that the mesh has multiple axes, then the first axis will be treated as the data parallel dimension (and a warning will be raised). When a list of `devices` are provided, they will be used to construct a 1D mesh. When both `mesh` and `devices` are absent, then `list_devices()` will be used to detect any available devices and create a 1D mesh from them. Args: device_mesh: Optional `DeviceMesh` instance. devices: Optional list of devices. """ def __init__(self, device_mesh=None, devices=None): if device_mesh: self._initialize_with_device_mesh(device_mesh) elif devices: self._initialize_mesh_from_devices(devices) else: self._initialize_mesh_from_list_devices() self._batch_dim_name = self.device_mesh.axis_names[0] def _initialize_with_device_mesh(self, device_mesh): if not isinstance(device_mesh, DeviceMesh): raise ValueError( "Expect `mesh` to be an instance of `DeviceMesh`. " f"Received: mesh={device_mesh} (of type {type(device_mesh)})" ) super().__init__(device_mesh) if self.device_mesh.devices.ndim != 1: warnings.warn( "Expect the input mesh to be 1D, but received " "mesh.devices.ndim=%d. " "The first axis will be used for data-parallel sharding.", device_mesh.devices.ndim, ) def _initialize_mesh_from_devices(self, devices): devices = np.array(devices) device_mesh = DeviceMesh( shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices, ) super().__init__(device_mesh) def _initialize_mesh_from_list_devices(self): devices = np.array(list_devices()) device_mesh = DeviceMesh( shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices, ) super().__init__(device_mesh) def get_data_layout(self, data_shape): data_shard_spec = [None] * len(data_shape) data_shard_spec[0] = self._batch_dim_name # Shard on the first dim return TensorLayout(data_shard_spec, self.device_mesh) def get_variable_layout(self, variable): variable_shard_spec = [None] * len(variable.shape) return TensorLayout(variable_shard_spec, self.device_mesh) @keras_core_export("keras_core.distribution.ModelParallel") class ModelParallel(Distribution): """Distribution that shards model variables. Compare to `DataParallel` which replicates the variables across all devices, `ModelParallel` allows you to shard variables in addition to the input data. To construct a `ModelParallel` distribution, you need to provide a `DeviceMesh` and a `LayoutMap`. 1. `DeviceMesh` contains physcial device information. The axis names in the mesh will be used to map the variable and data layout. 2. `LayoutMap` contains the mapping between variable paths to their corresponding `TensorLayout`. Example: ```python devices = list_devices() # Assume there are 8 devices. # Create a mesh with 2 devices for data parallelism and 4 devices for # model parallelism. device_mesh = DeviceMesh(shape=(2, 4), axis_names=('batch', 'model'), devices=devices) # Create a layout map that shard the `Dense` layer and `Conv2D` # layer variables on the last dimension. # Based on the `device_mesh`, this means the variables # will be split across 4 devices. Any other variable that doesn't # match any key in the layout map will be fully replicated. layout_map = LayoutMap(device_mesh) layout_map['.*dense.*kernel'] = TensorLayout([None, 'model']) layout_map['.*dense.*bias'] = TensorLayout(['model']) layout_map['.*conv2d.*kernel'] = TensorLayout([None, None, None, 'model']) layout_map['.*conv2d.*bias'] = TensorLayout(['model']) distribution = ModelParallel(device_mesh=device_mesh, layout_map=layout_map, batch_dim_name='batch') # Set the global distribution, or via `with distribution.scope():` set_distribution(distribution) model = model_creation() model.compile() model.fit(data) ``` You can quickly update the device mesh shape to change the sharding factor of the variables. E.g. ``` # With only the shape change for the device mesh, the variables will be # sharded across 8 devices instead of 4, which further reduces the memory # footprint of variables on each of the device. device_mesh = DeviceMesh(shape=(1, 8), axis_names=('batch', 'model'), devices=devices) ``` To figure out a proper layout mapping rule for all the model variables, you can first list out all the model variable paths, which will be used as the key to map the variables to `TensorLayout`. e.g. ``` model = create_model() for v in model.variables: print(v.path) ``` Args: device_mesh: `DeviceMesh` instance for physical device and its logical mapping. layout_map: `LayoutMap` instance which map the variable path to the corresponding `TensorLayout`. The axis names of the `TensorLayout`s should match to the axis names in the device_mesh, or exception will be raised. batch_dim_name: optional string, the axis name in the `device_mesh` that will be used to distribute data. If unspecified, the first axis from the `device_mesh` will be used. """ def __init__(self, device_mesh, layout_map, batch_dim_name=None): super().__init__(device_mesh) self._layout_map = layout_map self._batch_dim_name = batch_dim_name or self.device_mesh.axis_names[0] def get_data_layout(self, data_shape): data_shard_spec = [None] * len(data_shape) data_shard_spec[0] = self._batch_dim_name # Shard on the first dim return TensorLayout(data_shard_spec, self.device_mesh) def get_variable_layout(self, variable): variable_layout = self._layout_map[variable.path] if variable_layout is not None: return variable_layout variable_shard_spec = [None] * len(variable.shape) return TensorLayout(variable_shard_spec, self.device_mesh) @keras_core_export("keras_core.distribution.LayoutMap") class LayoutMap(collections.abc.MutableMapping): """A dict-like object that maps string to `TensorLayout` instances. `LayoutMap` uses a string as key and a `TensorLayout` as value. There is a behavior difference between a normal Python dict and this class. The string key will be treated as a regex when retrieving the value. See the docstring of `get` for more details. See below for a usage example. You can define the naming schema of the `TensorLayout`, and then retrieve the corresponding `TensorLayout` instance. In the normal case, the key to query is usually the `variable.path`, which is the idenifier of the variable. ```python layout_map = LayoutMap(device_mesh=None) layout_map['.*dense.*kernel'] = layout_2d layout_map['.*dense.*bias'] = layout_1d layout_map['.*conv2d.*kernel'] = layout_4d layout_map['.*conv2d.*bias'] = layout_1d layout_1 = layout_map['dense_1.kernel'] # layout_1 == layout_2d layout_2 = layout_map['dense_1.bias'] # layout_2 == layout_1d layout_3 = layout_map['dense_2.kernel'] # layout_3 == layout_2d layout_4 = layout_map['dense_2.bias'] # layout_4 == layout_1d layout_5 = layout_map['my_model/conv2d_123/kernel'] # layout_5 == layout_4d layout_6 = layout_map['my_model/conv2d_123/bias'] # layout_6 == layout_1d layout_7 = layout_map['my_model/conv3d_1/kernel'] # layout_7 == None layout_8 = layout_map['my_model/conv3d_1/bias'] # layout_8 == None ``` Args: device_mesh: An optional `DeviceMesh` that can be used to populate the `TensorLayout.device_mesh` if `TensorLayout.device_mesh` is not set. """ def __init__(self, device_mesh=None): self._layout_map = collections.OrderedDict() self._device_mesh = device_mesh def __getitem__(self, key): """Retrieves the corresponding layout by the string key. When there isn't an exact match, all the existing keys in the layout map will be treated as a regex and map against the input key again. The first match will be returned, based on the key insertion order. Returns `None` if there isn't any match found. Args: key: String key to query a layout. Returns: Corresponding layout based on the query. """ if key in self._layout_map: return self._layout_map[key] for k in self._layout_map: if re.match(k, key): return self._layout_map[k] return None def __setitem__(self, key, layout): if key in self._layout_map: raise ValueError( f"{key} already exist in the LayoutMap with " f"value {self._layout_map[key]}. Please make sure to " "not use duplicated keys." ) if not isinstance(layout, TensorLayout): raise ValueError( f"{layout} should be a TensorLayout type, got {type(layout)}" ) self._maybe_populate_device_mesh(layout) self._layout_map[key] = layout def __delitem__(self, key): # let the dict to handle the key missing error return self._layout_map.pop(key) def __len__(self): return len(self._layout_map) def __iter__(self): return iter(self._layout_map) @property def device_mesh(self): return self._device_mesh def _maybe_populate_device_mesh(self, layout): if layout.device_mesh is None and self.device_mesh is not None: layout.device_mesh = self.device_mesh @keras_core_export("keras_core.distribution.distribution") def distribution(): """Retrieve the current distribution from global context.""" return global_state.get_global_attribute(GLOBAL_ATTRIBUTE_NAME) @keras_core_export("keras_core.distribution.set_distribution") def set_distribution(value): """Set the distribution as the global distribution setting. Args: value: a `Distribution` instance. """ global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value)
keras-core/keras_core/distribution/distribution_lib.py/0
{ "file_path": "keras-core/keras_core/distribution/distribution_lib.py", "repo_id": "keras-core", "token_count": 7389 }
30
import numpy as np import pytest from keras_core import testing from keras_core.layers.activations import elu class ELUTest(testing.TestCase): def test_config(self): elu_layer = elu.ELU() self.run_class_serialization_test(elu_layer) @pytest.mark.requires_trainable_backend def test_elu(self): self.run_layer_test( elu.ELU, init_kwargs={}, input_shape=(2, 3, 4), supports_masking=True, ) def test_correctness(self): def np_elu(x, alpha=1.0): return (x > 0) * x + (x <= 0) * alpha * (np.exp(x) - 1) x = np.random.random((2, 2, 5)) elu_layer = elu.ELU() self.assertAllClose(elu_layer(x), np_elu(x)) elu_layer = elu.ELU(alpha=0.7) self.assertAllClose(elu_layer(x), np_elu(x, alpha=0.7))
keras-core/keras_core/layers/activations/elu_test.py/0
{ "file_path": "keras-core/keras_core/layers/activations/elu_test.py", "repo_id": "keras-core", "token_count": 432 }
31
from keras_core import ops from keras_core.api_export import keras_core_export from keras_core.layers.merging.base_merge import Merge @keras_core_export("keras_core.layers.Multiply") class Multiply(Merge): """Performs elementwise multiplication. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). Examples: >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) >>> y = keras_core.layers.Multiply()([x1, x2]) Usage in a Keras model: >>> input1 = keras_core.layers.Input(shape=(16,)) >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) >>> input2 = keras_core.layers.Input(shape=(32,)) >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) >>> # equivalent to `y = keras_core.layers.multiply([x1, x2])` >>> y = keras_core.layers.Multiply()([x1, x2]) >>> out = keras_core.layers.Dense(4)(y) >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) """ def _merge_function(self, inputs): output = inputs[0] for i in range(1, len(inputs)): output = ops.multiply(output, inputs[i]) return output @keras_core_export("keras_core.layers.multiply") def multiply(inputs, **kwargs): """Functional interface to the `keras_core.layers.Multiply` layer. Args: inputs: A list of input tensors , all of the same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor as the elementwise product of the inputs with the same shape as the inputs. Examples: >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) >>> y = keras_core.layers.multiply([x1, x2]) Usage in a Keras model: >>> input1 = keras_core.layers.Input(shape=(16,)) >>> x1 = keras_core.layers.Dense(8, activation='relu')(input1) >>> input2 = keras_core.layers.Input(shape=(32,)) >>> x2 = keras_core.layers.Dense(8, activation='relu')(input2) >>> y = keras_core.layers.multiply([x1, x2]) >>> out = keras_core.layers.Dense(4)(y) >>> model = keras_core.models.Model(inputs=[input1, input2], outputs=out) """ return Multiply(**kwargs)(inputs)
keras-core/keras_core/layers/merging/multiply.py/0
{ "file_path": "keras-core/keras_core/layers/merging/multiply.py", "repo_id": "keras-core", "token_count": 965 }
32
import numpy as np from absl.testing import parameterized from tensorflow import data as tf_data from keras_core import backend from keras_core import layers from keras_core import testing class RandomZoomTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("random_zoom_in_4_by_6", -0.4, -0.6), ("random_zoom_in_2_by_3", -0.2, -0.3), ("random_zoom_in_tuple_factor", (-0.4, -0.5), (-0.2, -0.3)), ("random_zoom_out_4_by_6", 0.4, 0.6), ("random_zoom_out_2_by_3", 0.2, 0.3), ("random_zoom_out_tuple_factor", (0.4, 0.5), (0.2, 0.3)), ) def test_random_zoom(self, height_factor, width_factor): self.run_layer_test( layers.RandomZoom, init_kwargs={ "height_factor": height_factor, "width_factor": width_factor, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 4), supports_masking=False, run_training_check=False, ) def test_random_zoom_out_correctness(self): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)) expected_output = np.asarray( [ [0, 0, 0, 0, 0], [0, 2.7, 4.5, 6.3, 0], [0, 10.2, 12.0, 13.8, 0], [0, 17.7, 19.5, 21.3, 0], [0, 0, 0, 0, 0], ] ) expected_output = backend.convert_to_tensor( np.reshape(expected_output, (1, 5, 5, 1)) ) self.run_layer_test( layers.RandomZoom, init_kwargs={ "height_factor": (0.5, 0.5), "width_factor": (0.8, 0.8), "interpolation": "bilinear", "fill_mode": "constant", }, input_shape=None, input_data=input_image, expected_output=expected_output, supports_masking=False, run_training_check=False, ) def test_random_zoom_in_correctness(self): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)) expected_output = np.asarray( [ [6.0, 6.5, 7.0, 7.5, 8.0], [8.5, 9.0, 9.5, 10.0, 10.5], [11.0, 11.5, 12.0, 12.5, 13.0], [13.5, 14.0, 14.5, 15.0, 15.5], [16.0, 16.5, 17.0, 17.5, 18.0], ] ) expected_output = backend.convert_to_tensor( np.reshape(expected_output, (1, 5, 5, 1)) ) self.run_layer_test( layers.RandomZoom, init_kwargs={ "height_factor": (-0.5, -0.5), "width_factor": (-0.5, -0.5), "interpolation": "bilinear", "fill_mode": "constant", }, input_shape=None, input_data=input_image, expected_output=expected_output, supports_masking=False, run_training_check=False, ) def test_tf_data_compatibility(self): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)) layer = layers.RandomZoom( height_factor=(0.5, 0.5), width_factor=(0.8, 0.8), interpolation="nearest", fill_mode="constant", ) ds = tf_data.Dataset.from_tensor_slices(input_image).batch(1).map(layer) expected_output = np.asarray( [ [0, 0, 0, 0, 0], [0, 5, 7, 9, 0], [0, 10, 12, 14, 0], [0, 20, 22, 24, 0], [0, 0, 0, 0, 0], ] ).reshape((1, 5, 5, 1)) for output in ds.take(1): output = output.numpy() self.assertAllClose(expected_output, output)
keras-core/keras_core/layers/preprocessing/random_zoom_test.py/0
{ "file_path": "keras-core/keras_core/layers/preprocessing/random_zoom_test.py", "repo_id": "keras-core", "token_count": 2215 }
33
import numpy as np import pytest from keras_core import backend from keras_core import layers from keras_core import testing class GaussianDropoutTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_gaussian_dropout_basics(self): self.run_layer_test( layers.GaussianDropout, init_kwargs={ "rate": 0.2, }, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=1, expected_num_losses=0, supports_masking=True, ) def test_gaussian_dropout_correctness(self): inputs = np.ones((20, 500)) layer = layers.GaussianDropout(0.3, seed=1337) outputs = layer(inputs, training=True) self.assertAllClose( np.std(backend.convert_to_numpy(outputs)), np.sqrt(0.3 / (1 - 0.3)), atol=0.02, )
keras-core/keras_core/layers/regularization/gaussian_dropout_test.py/0
{ "file_path": "keras-core/keras_core/layers/regularization/gaussian_dropout_test.py", "repo_id": "keras-core", "token_count": 515 }
34
import numpy as np from keras_core import testing from keras_core.metrics import accuracy_metrics class AccuracyTest(testing.TestCase): def test_config(self): acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32") self.assertEqual(acc_obj.name, "accuracy") self.assertEqual(len(acc_obj.variables), 2) self.assertEqual(acc_obj._dtype, "float32") # Test get_config acc_obj_config = acc_obj.get_config() self.assertEqual(acc_obj_config["name"], "accuracy") self.assertEqual(acc_obj_config["dtype"], "float32") # TODO: Check save and restore config def test_unweighted(self): acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32") y_true = np.array([[1], [2], [3], [4]]) y_pred = np.array([[0], [2], [3], [4]]) acc_obj.update_state(y_true, y_pred) result = acc_obj.result() self.assertAllClose(result, 0.75, atol=1e-3) def test_weighted(self): acc_obj = accuracy_metrics.Accuracy(name="accuracy", dtype="float32") y_true = np.array([[1], [2], [3], [4]]) y_pred = np.array([[0], [2], [3], [4]]) sample_weight = np.array([1, 1, 0, 0]) acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight) result = acc_obj.result() self.assertAllClose(result, 0.5, atol=1e-3) class BinaryAccuracyTest(testing.TestCase): def test_config(self): bin_acc_obj = accuracy_metrics.BinaryAccuracy( name="binary_accuracy", dtype="float32" ) self.assertEqual(bin_acc_obj.name, "binary_accuracy") self.assertEqual(len(bin_acc_obj.variables), 2) self.assertEqual(bin_acc_obj._dtype, "float32") # Test get_config bin_acc_obj_config = bin_acc_obj.get_config() self.assertEqual(bin_acc_obj_config["name"], "binary_accuracy") self.assertEqual(bin_acc_obj_config["dtype"], "float32") # TODO: Check save and restore config def test_unweighted(self): bin_acc_obj = accuracy_metrics.BinaryAccuracy( name="binary_accuracy", dtype="float32" ) y_true = np.array([[1], [1], [0], [0]]) y_pred = np.array([[0.98], [1], [0], [0.6]]) bin_acc_obj.update_state(y_true, y_pred) result = bin_acc_obj.result() self.assertAllClose(result, 0.75, atol=1e-3) # Test broadcasting case bin_acc_obj = accuracy_metrics.BinaryAccuracy( name="binary_accuracy", dtype="float32" ) y_true = np.array([1, 1, 0, 0]) y_pred = np.array([[0.98], [1], [0], [0.6]]) bin_acc_obj.update_state(y_true, y_pred) result = bin_acc_obj.result() self.assertAllClose(result, 0.75, atol=1e-3) def test_weighted(self): bin_acc_obj = accuracy_metrics.BinaryAccuracy( name="binary_accuracy", dtype="float32" ) y_true = np.array([[1], [1], [0], [0]]) y_pred = np.array([[0.98], [1], [0], [0.6]]) sample_weight = np.array([1, 0, 0, 1]) bin_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight) result = bin_acc_obj.result() self.assertAllClose(result, 0.5, atol=1e-3) class CategoricalAccuracyTest(testing.TestCase): def test_config(self): cat_acc_obj = accuracy_metrics.CategoricalAccuracy( name="categorical_accuracy", dtype="float32" ) self.assertEqual(cat_acc_obj.name, "categorical_accuracy") self.assertEqual(len(cat_acc_obj.variables), 2) self.assertEqual(cat_acc_obj._dtype, "float32") # Test get_config cat_acc_obj_config = cat_acc_obj.get_config() self.assertEqual(cat_acc_obj_config["name"], "categorical_accuracy") self.assertEqual(cat_acc_obj_config["dtype"], "float32") # TODO: Check save and restore config def test_unweighted(self): cat_acc_obj = accuracy_metrics.CategoricalAccuracy( name="categorical_accuracy", dtype="float32" ) y_true = np.array([[0, 0, 1], [0, 1, 0]]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) cat_acc_obj.update_state(y_true, y_pred) result = cat_acc_obj.result() self.assertAllClose(result, 0.5, atol=1e-3) def test_weighted(self): cat_acc_obj = accuracy_metrics.CategoricalAccuracy( name="categorical_accuracy", dtype="float32" ) y_true = np.array([[0, 0, 1], [0, 1, 0]]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) sample_weight = np.array([0.7, 0.3]) cat_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight) result = cat_acc_obj.result() self.assertAllClose(result, 0.3, atol=1e-3) class SparseCategoricalAccuracyTest(testing.TestCase): def test_config(self): sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy( name="sparse_categorical_accuracy", dtype="float32" ) self.assertEqual(sp_cat_acc_obj.name, "sparse_categorical_accuracy") self.assertEqual(len(sp_cat_acc_obj.variables), 2) self.assertEqual(sp_cat_acc_obj._dtype, "float32") # Test get_config sp_cat_acc_obj_config = sp_cat_acc_obj.get_config() self.assertEqual( sp_cat_acc_obj_config["name"], "sparse_categorical_accuracy" ) self.assertEqual(sp_cat_acc_obj_config["dtype"], "float32") # TODO: Check save and restore config def test_unweighted(self): sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy( name="sparse_categorical_accuracy", dtype="float32" ) y_true = np.array([[2], [1]]) y_pred = np.array([[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) sp_cat_acc_obj.update_state(y_true, y_pred) result = sp_cat_acc_obj.result() self.assertAllClose(result, 0.5, atol=1e-3) def test_weighted(self): sp_cat_acc_obj = accuracy_metrics.SparseCategoricalAccuracy( name="sparse_categorical_accuracy", dtype="float32" ) y_true = np.array([[2], [1]]) y_pred = np.array([[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) sample_weight = np.array([0.7, 0.3]) sp_cat_acc_obj.update_state(y_true, y_pred, sample_weight=sample_weight) result = sp_cat_acc_obj.result() self.assertAllClose(result, 0.3, atol=1e-3) class TopKCategoricalAccuracyTest(testing.TestCase): def test_config(self): top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy( k=1, name="top_k_categorical_accuracy", dtype="float32" ) self.assertEqual(top_k_cat_acc_obj.name, "top_k_categorical_accuracy") self.assertEqual(len(top_k_cat_acc_obj.variables), 2) self.assertEqual(top_k_cat_acc_obj._dtype, "float32") # Test get_config top_k_cat_acc_obj_config = top_k_cat_acc_obj.get_config() self.assertEqual( top_k_cat_acc_obj_config["name"], "top_k_categorical_accuracy" ) self.assertEqual(top_k_cat_acc_obj_config["dtype"], "float32") self.assertEqual(top_k_cat_acc_obj_config["k"], 1) # TODO: Check save and restore config def test_unweighted(self): top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy( k=1, name="top_k_categorical_accuracy", dtype="float32" ) y_true = np.array([[0, 0, 1], [0, 1, 0]]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32") top_k_cat_acc_obj.update_state(y_true, y_pred) result = top_k_cat_acc_obj.result() self.assertAllClose(result, 0.5, atol=1e-3) def test_weighted(self): top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy( k=1, name="top_k_categorical_accuracy", dtype="float32" ) y_true = np.array([[0, 0, 1], [0, 1, 0]]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32") sample_weight = np.array([0.7, 0.3]) top_k_cat_acc_obj.update_state( y_true, y_pred, sample_weight=sample_weight ) result = top_k_cat_acc_obj.result() self.assertAllClose(result, 0.3, atol=1e-3) class SparseTopKCategoricalAccuracyTest(testing.TestCase): def test_config(self): sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy( k=1, name="sparse_top_k_categorical_accuracy", dtype="float32" ) self.assertEqual( sp_top_k_cat_acc_obj.name, "sparse_top_k_categorical_accuracy" ) self.assertEqual(len(sp_top_k_cat_acc_obj.variables), 2) self.assertEqual(sp_top_k_cat_acc_obj._dtype, "float32") # Test get_config sp_top_k_cat_acc_obj_config = sp_top_k_cat_acc_obj.get_config() self.assertEqual( sp_top_k_cat_acc_obj_config["name"], "sparse_top_k_categorical_accuracy", ) self.assertEqual(sp_top_k_cat_acc_obj_config["dtype"], "float32") self.assertEqual(sp_top_k_cat_acc_obj_config["k"], 1) # TODO: Check save and restore config def test_unweighted(self): sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy( k=1, name="sparse_top_k_categorical_accuracy", dtype="float32" ) y_true = np.array([2, 1]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32") sp_top_k_cat_acc_obj.update_state(y_true, y_pred) result = sp_top_k_cat_acc_obj.result() self.assertAllClose(result, 0.5, atol=1e-3) def test_weighted(self): sp_top_k_cat_acc_obj = accuracy_metrics.SparseTopKCategoricalAccuracy( k=1, name="sparse_top_k_categorical_accuracy", dtype="float32" ) y_true = np.array([2, 1]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32") sample_weight = np.array([0.7, 0.3]) sp_top_k_cat_acc_obj.update_state( y_true, y_pred, sample_weight=sample_weight ) result = sp_top_k_cat_acc_obj.result() self.assertAllClose(result, 0.3, atol=1e-3)
keras-core/keras_core/metrics/accuracy_metrics_test.py/0
{ "file_path": "keras-core/keras_core/metrics/accuracy_metrics_test.py", "repo_id": "keras-core", "token_count": 5041 }
35
import warnings from keras_core import initializers from keras_core import ops from keras_core.api_export import keras_core_export from keras_core.losses.loss import squeeze_to_same_rank from keras_core.losses.losses import log_cosh from keras_core.losses.losses import mean_absolute_error from keras_core.losses.losses import mean_absolute_percentage_error from keras_core.losses.losses import mean_squared_error from keras_core.losses.losses import mean_squared_logarithmic_error from keras_core.metrics import reduction_metrics from keras_core.utils.numerical_utils import normalize @keras_core_export("keras_core.metrics.MeanSquaredError") class MeanSquaredError(reduction_metrics.MeanMetricWrapper): """Computes the mean squared error between `y_true` and `y_pred`. Formula: ```python loss = mean(square(y_true - y_pred)) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Example: >>> m = keras_core.metrics.MeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.25 """ def __init__(self, name="mean_squared_error", dtype=None): super().__init__(fn=mean_squared_error, name=name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_core_export("keras_core.metrics.MeanAbsoluteError") class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper): """Computes the mean absolute error between the labels and predictions. Formula: ```python loss = mean(abs(y_true - y_pred)) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: Standalone usage: >>> m = keras_core.metrics.MeanAbsoluteError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.25 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 0.5 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras_core.metrics.MeanAbsoluteError()]) ``` """ def __init__(self, name="mean_absolute_error", dtype=None): super().__init__(mean_absolute_error, name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_core_export("keras_core.metrics.MeanAbsolutePercentageError") class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper): """Computes mean absolute percentage error between `y_true` and `y_pred`. Formula: ```python loss = 100 * mean(abs((y_true - y_pred) / y_true)) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: Standalone usage: >>> m = keras_core.metrics.MeanAbsolutePercentageError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 250000000.0 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 500000000.0 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras_core.metrics.MeanAbsolutePercentageError()]) ``` """ def __init__(self, name="mean_absolute_percentage_error", dtype=None): super().__init__(mean_absolute_percentage_error, name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_core_export("keras_core.metrics.MeanSquaredLogarithmicError") class MeanSquaredLogarithmicError(reduction_metrics.MeanMetricWrapper): """Computes mean squared logarithmic error between `y_true` and `y_pred`. Formula: ```python loss = mean(square(log(y_true + 1) - log(y_pred + 1))) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: Standalone usage: >>> m = keras_core.metrics.MeanSquaredLogarithmicError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.12011322 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 0.24022643 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras_core.metrics.MeanSquaredLogarithmicError()]) ``` """ def __init__(self, name="mean_squared_logarithmic_error", dtype=None): super().__init__(mean_squared_logarithmic_error, name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_core_export("keras_core.metrics.RootMeanSquaredError") class RootMeanSquaredError(reduction_metrics.Mean): """Computes root mean squared error metric between `y_true` and `y_pred`. Formula: ```python loss = sqrt(mean((y_pred - y_true) ** 2)) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: Standalone usage: >>> m = keras_core.metrics.RootMeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.5 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 0.70710677 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras_core.metrics.RootMeanSquaredError()]) ``` """ def __init__(self, name="root_mean_squared_error", dtype=None): super().__init__(name, dtype=dtype) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates root mean squared error statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Defaults to `1`. Returns: Update op. """ y_true = ops.convert_to_tensor(y_true, self._dtype) y_pred = ops.convert_to_tensor(y_pred, self._dtype) y_true, y_pred = squeeze_to_same_rank(y_true, y_pred) error_sq = ops.square(y_pred - y_true) return super().update_state(error_sq, sample_weight=sample_weight) def result(self): return ops.sqrt(super().result()) @keras_core_export("keras_core.metrics.CosineSimilarity") class CosineSimilarity(reduction_metrics.MeanMetricWrapper): """Computes the cosine similarity between the labels and predictions. Formula: ```python loss = sum(l2_norm(y_true) * l2_norm(y_pred)) ``` See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity). This metric keeps the average cosine similarity between `predictions` and `labels` over a stream of data. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. axis: (Optional) Defaults to `-1`. The dimension along which the cosine similarity is computed. Examples: Standalone usage: >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2 >>> m = keras_core.metrics.CosineSimilarity(axis=1) >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) >>> m.result() 0.49999997 >>> m.reset_state() >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]], ... sample_weight=[0.3, 0.7]) >>> m.result() 0.6999999 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[keras_core.metrics.CosineSimilarity(axis=1)]) ``` """ def __init__(self, name="cosine_similarity", dtype=None, axis=-1): super().__init__(cosine_similarity, name, dtype=dtype, axis=axis) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_core_export("keras_core.metrics.LogCoshError") class LogCoshError(reduction_metrics.MeanMetricWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. Formula: ```python error = y_pred - y_true logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: Standalone usage: >>> m = keras_core.metrics.LogCoshError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.10844523 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 0.21689045 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[keras_core.metrics.LogCoshError()]) ``` """ def __init__(self, name="logcosh", dtype=None): super().__init__(log_cosh, name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} # Adapted from TF-Addons implementation (RSquare class). @keras_core_export("keras_core.metrics.R2Score") class R2Score(reduction_metrics.Metric): """Computes R2 score. Formula: ```python sum_squares_residuals = sum((y_true - y_pred) ** 2) sum_squares = sum((y_true - mean(y_true)) ** 2) R2 = 1 - sum_squares_residuals / sum_squares ``` This is also called the [coefficient of determination]( https://en.wikipedia.org/wiki/Coefficient_of_determination). It indicates how close the fitted regression line is to ground-truth data. - The highest score possible is 1.0. It indicates that the predictors perfectly accounts for variation in the target. - A score of 0.0 indicates that the predictors do not account for variation in the target. - It can also be negative if the model is worse than random. This metric can also compute the "Adjusted R2" score. Args: class_aggregation: Specifies how to aggregate scores corresponding to different output classes (or target dimensions), i.e. different dimensions on the last axis of the predictions. Equivalent to `multioutput` argument in Scikit-Learn. Should be one of `None` (no aggregation), `"uniform_average"`, `"variance_weighted_average"`. num_regressors: Number of independent regressors used ("Adjusted R2" score). 0 is the standard R2 score. Defaults to `0`. name: Optional. string name of the metric instance. dtype: Optional. data type of the metric result. Example: >>> y_true = np.array([[1], [4], [3]], dtype=np.float32) >>> y_pred = np.array([[2], [4], [4]], dtype=np.float32) >>> metric = keras_core.metrics.R2Score() >>> metric.update_state(y_true, y_pred) >>> result = metric.result() >>> result 0.57142854 """ def __init__( self, class_aggregation="uniform_average", num_regressors=0, name="r2_score", dtype=None, ): super().__init__(name=name, dtype=dtype) valid_class_aggregation_values = ( None, "uniform_average", "variance_weighted_average", ) if class_aggregation not in valid_class_aggregation_values: raise ValueError( "Invalid value for argument `class_aggregation`. Expected " f"one of {valid_class_aggregation_values}. " f"Received: class_aggregation={class_aggregation}" ) if num_regressors < 0: raise ValueError( "Invalid value for argument `num_regressors`. " "Expected a value >= 0. " f"Received: num_regressors={num_regressors}" ) self.class_aggregation = class_aggregation self.num_regressors = num_regressors self.num_samples = self.add_variable( shape=(), initializer=initializers.Zeros(), name="num_samples", ) self._built = False def _build(self, y_true_shape, y_pred_shape): if len(y_pred_shape) != 2 or len(y_true_shape) != 2: raise ValueError( "R2Score expects 2D inputs with shape " "(batch_size, output_dim). Received input " f"shapes: y_pred.shape={y_pred_shape} and " f"y_true.shape={y_true_shape}." ) if y_pred_shape[-1] is None or y_true_shape[-1] is None: raise ValueError( "R2Score expects 2D inputs with shape " "(batch_size, output_dim), with output_dim fully " "defined (not None). Received input " f"shapes: y_pred.shape={y_pred_shape} and " f"y_true.shape={y_true_shape}." ) num_classes = y_pred_shape[-1] self.squared_sum = self.add_variable( name="squared_sum", shape=[num_classes], initializer=initializers.Zeros(), ) self.sum = self.add_variable( name="sum", shape=[num_classes], initializer=initializers.Zeros(), ) self.total_mse = self.add_variable( name="residual", shape=[num_classes], initializer=initializers.Zeros(), ) self.count = self.add_variable( name="count", shape=[num_classes], initializer=initializers.Zeros(), ) self._built = True def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates root mean squared error statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Can be a `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`. Defaults to `1`. Returns: Update op. """ y_true = ops.convert_to_tensor(y_true, dtype=self._dtype) y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype) y_true, y_pred = squeeze_to_same_rank(y_true, y_pred) if not self._built: self._build(y_true.shape, y_pred.shape) if sample_weight is None: sample_weight = 1 sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype) if len(sample_weight.shape) == 1: # Make sure there's a features dimension sample_weight = ops.expand_dims(sample_weight, axis=1) sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true)) weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype) self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0)) self.squared_sum.assign( self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0) ) self.total_mse.assign( self.total_mse + ops.sum( (y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype), axis=0, ) ) self.count.assign(self.count + ops.sum(sample_weight, axis=0)) self.num_samples.assign(self.num_samples + ops.size(y_true)) def result(self): mean = self.sum / self.count total = self.squared_sum - self.sum * mean raw_scores = 1 - (self.total_mse / total) raw_scores = ops.where(ops.isinf(raw_scores), 0.0, raw_scores) if self.class_aggregation == "uniform_average": r2_score = ops.mean(raw_scores) elif self.class_aggregation == "variance_weighted_average": weighted_sum = ops.sum(total * raw_scores) sum_of_weights = ops.sum(total) r2_score = weighted_sum / sum_of_weights else: r2_score = raw_scores if self.num_regressors != 0: if self.num_regressors > self.num_samples - 1: warnings.warn( "More independent predictors than datapoints " "in adjusted R2 score. Falling back to standard R2 score.", stacklevel=2, ) elif self.num_regressors == self.num_samples - 1: warnings.warn( "Division by zero in Adjusted R2 score. " "Falling back to standard R2 score.", stacklevel=2, ) else: n = ops.convert_to_tensor(self.num_samples, dtype="float32") p = ops.convert_to_tensor(self.num_regressors, dtype="float32") num = ops.multiply( ops.subtract(1.0, r2_score), ops.subtract(n, 1.0) ) den = ops.subtract(ops.subtract(n, p), 1.0) r2_score = ops.subtract(1.0, ops.divide(num, den)) return r2_score def reset_state(self): for v in self.variables: v.assign(ops.zeros(v.shape, dtype=v.dtype)) def get_config(self): config = { "name": self.name, "dtype": self.dtype, "class_aggregation": self.class_aggregation, "num_regressors": self.num_regressors, } base_config = super().get_config() return {**base_config, **config} def cosine_similarity(y_true, y_pred, axis=-1): """Computes the cosine similarity between labels and predictions. Formula: ```python loss = sum(l2_norm(y_true) * l2_norm(y_pred)) ``` Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Defaults to `-1`. Returns: Cosine similarity tensor. Example: >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> loss = keras_core.losses.cosine_similarity(y_true, y_pred, axis=-1) [0., 0.99999994, -0.99999994] """ y_pred = ops.convert_to_tensor(y_pred) y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) y_true, y_pred = squeeze_to_same_rank(y_true, y_pred) y_pred = normalize(y_pred, axis=axis) y_true = normalize(y_true, axis=axis) return ops.sum(y_true * y_pred, axis=axis)
keras-core/keras_core/metrics/regression_metrics.py/0
{ "file_path": "keras-core/keras_core/metrics/regression_metrics.py", "repo_id": "keras-core", "token_count": 8887 }
36
import math import numpy as np import tree from keras_core.api_export import keras_core_export def compute_pooling_output_shape( input_shape, pool_size, strides, padding="valid", data_format="channels_last", ): """Computes the output shape of pooling operations. Args: input_shape: Input shape. Must be a tuple of integers. pool_size: Size of the pooling operation. Must be a tuple of integers. strides: Stride of the pooling operation. Must be a tuple of integers. Defaults to `pool_size`. padding: Padding method. Available methods are `"valid"` or `"same"`. Defaults to `"valid"`. data_format: String, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, height, width, channels)` while `"channels_first"` corresponds to inputs with shape `(batch, channels, height, weight)`. Defaults to `"channels_last"`. Returns: Tuple of ints: The output shape of the pooling operation. Examples: # Basic usage with square pooling on a single image >>> compute_pooling_output_shape((1, 4, 4, 1), (2, 2)) (1, 2, 2, 1) # Strided pooling on a single image with strides different from pool_size >>> compute_pooling_output_shape((1, 4, 4, 1), (2, 2), strides=(1, 1)) (1, 3, 3, 1) # Pooling on a batch of images >>> compute_pooling_output_shape((32, 4, 4, 3), (2, 2)) (32, 2, 2, 3) """ strides = pool_size if strides is None else strides input_shape_origin = list(input_shape) input_shape = np.array(input_shape) if data_format == "channels_last": spatial_shape = input_shape[1:-1] else: spatial_shape = input_shape[2:] none_dims = [] for i in range(len(spatial_shape)): if spatial_shape[i] is None: # Set `None` shape to a manual value so that we can run numpy # computation on `spatial_shape`. spatial_shape[i] = -1 none_dims.append(i) pool_size = np.array(pool_size) if padding == "valid": output_spatial_shape = ( np.floor((spatial_shape - pool_size) / strides) + 1 ) for i in range(len(output_spatial_shape)): if i not in none_dims and output_spatial_shape[i] < 0: raise ValueError( "Computed output size would be negative. Received: " f"`inputs.shape={input_shape}` and `pool_size={pool_size}`." ) elif padding == "same": output_spatial_shape = np.floor((spatial_shape - 1) / strides) + 1 else: raise ValueError( "`padding` must be either `'valid'` or `'same'`. Received " f"{padding}." ) output_spatial_shape = [int(i) for i in output_spatial_shape] for i in none_dims: output_spatial_shape[i] = None output_spatial_shape = tuple(output_spatial_shape) if data_format == "channels_last": output_shape = ( (input_shape_origin[0],) + output_spatial_shape + (input_shape_origin[-1],) ) else: output_shape = ( input_shape_origin[0], input_shape_origin[1], ) + output_spatial_shape return output_shape def compute_conv_output_shape( input_shape, filters, kernel_size, strides=1, padding="valid", data_format="channels_last", dilation_rate=1, ): """Compute the output shape of conv ops.""" if data_format == "channels_last": spatial_shape = input_shape[1:-1] kernel_shape = kernel_size + (input_shape[-1], filters) else: spatial_shape = input_shape[2:] kernel_shape = kernel_size + (input_shape[1], filters) if len(kernel_shape) != len(input_shape): raise ValueError( "Kernel shape must have the same length as input, but received " f"kernel of shape {kernel_shape} and " f"input of shape {input_shape}." ) if isinstance(dilation_rate, int): dilation_rate = (dilation_rate,) * len(spatial_shape) if isinstance(strides, int): strides = (strides,) * len(spatial_shape) if len(dilation_rate) != len(spatial_shape): raise ValueError( "Dilation must be None, scalar or tuple/list of length of " "inputs' spatial shape, but received " f"`dilation_rate={dilation_rate}` and " f"input of shape {input_shape}." ) none_dims = [] spatial_shape = np.array(spatial_shape) for i in range(len(spatial_shape)): if spatial_shape[i] is None: # Set `None` shape to a manual value so that we can run numpy # computation on `spatial_shape`. spatial_shape[i] = -1 none_dims.append(i) kernel_spatial_shape = np.array(kernel_shape[:-2]) dilation_rate = np.array(dilation_rate) if padding == "valid": output_spatial_shape = ( np.floor( (spatial_shape - dilation_rate * (kernel_spatial_shape - 1) - 1) / strides ) + 1 ) for i in range(len(output_spatial_shape)): if i not in none_dims and output_spatial_shape[i] < 0: raise ValueError( "Computed output size would be negative. Received " f"`inputs shape={input_shape}`, " f"`kernel shape={kernel_shape}`, " f"`dilation_rate={dilation_rate}`." ) elif padding == "same" or padding == "causal": output_spatial_shape = np.floor((spatial_shape - 1) / strides) + 1 output_spatial_shape = [int(i) for i in output_spatial_shape] for i in none_dims: output_spatial_shape[i] = None output_spatial_shape = tuple(output_spatial_shape) if data_format == "channels_last": output_shape = ( (input_shape[0],) + output_spatial_shape + (kernel_shape[-1],) ) else: output_shape = (input_shape[0], kernel_shape[-1]) + output_spatial_shape return output_shape def compute_reshape_output_shape(input_shape, new_shape, new_shape_arg_name): """Converts `-1` in `new_shape` to either an actual dimension or `None`. This utility does not special case the 0th dimension (batch size). """ unknown_dim_count = new_shape.count(-1) if unknown_dim_count > 1: raise ValueError( "There must be at most one unknown dimension (-1) in " f"{new_shape_arg_name}. Received: {new_shape_arg_name}={new_shape}." ) # If there is a None in input_shape, we can't infer what the -1 is if None in input_shape: return tuple(dim if dim != -1 else None for dim in new_shape) input_size = math.prod(input_shape) # If the new_shape fully defined, return it if unknown_dim_count == 0: if input_size != math.prod(new_shape): raise ValueError( "The total size of the tensor must be unchanged. Received: " f"input_shape={input_shape}, {new_shape_arg_name}={new_shape}" ) return new_shape # We have one -1 in new_shape, compute the actual value known_output_size = 1 unknown_dim_index = None for index, dim in enumerate(new_shape): if dim == -1: unknown_dim_index = index else: known_output_size *= dim if known_output_size == 0 or input_size % known_output_size != 0: raise ValueError( "The total size of the tensor must be unchanged, however, the " "input size cannot by divided by the specified dimensions in " f"{new_shape_arg_name}. Received: input_shape={input_shape}, " f"{new_shape_arg_name}={new_shape}" ) output_shape = list(new_shape) output_shape[unknown_dim_index] = input_size // known_output_size return tuple(output_shape) def reduce_shape(shape, axis=None, keepdims=False): shape = list(shape) if axis is None: if keepdims: return tuple([1 for _ in shape]) else: return tuple([]) if keepdims: for ax in axis: shape[ax] = 1 return tuple(shape) else: for ax in sorted(axis, reverse=True): del shape[ax] return tuple(shape) @keras_core_export("keras_core.utils.get_source_inputs") def get_source_inputs(tensor): """Returns the list of input tensors necessary to compute `tensor`. Output will always be a list of tensors (potentially with 1 element). Args: tensor: The tensor to start from. Returns: List of input tensors. """ if not hasattr(tensor, "_keras_history"): return tensor operation, node_index, _ = tensor._keras_history if not operation or not operation._inbound_nodes: return [tensor] else: node = operation._inbound_nodes[node_index] if node.is_input: # Reached input node, stop recursion. return tree.flatten(node.output_tensors) else: source_tensors = [] for tensor in node.input_tensors: previous_sources = get_source_inputs(tensor) # Avoid input redundancy. for x in previous_sources: if all(x is not t for t in source_tensors): source_tensors.append(x) return source_tensors
keras-core/keras_core/ops/operation_utils.py/0
{ "file_path": "keras-core/keras_core/ops/operation_utils.py", "repo_id": "keras-core", "token_count": 4307 }
37
"""Tests for Keras python-based idempotent saving functions.""" import json import os import warnings import zipfile from pathlib import Path from unittest import mock import numpy as np import pytest import keras_core from keras_core import ops from keras_core import testing from keras_core.saving import saving_lib @keras_core.saving.register_keras_serializable(package="my_custom_package") class MyDense(keras_core.layers.Layer): def __init__(self, units, **kwargs): super().__init__(**kwargs) self.units = units self.nested_layer = keras_core.layers.Dense(self.units, name="dense") def build(self, input_shape): self.additional_weights = [ self.add_weight( shape=(), name="my_additional_weight", initializer="ones", trainable=True, ), self.add_weight( shape=(), name="my_additional_weight_2", initializer="ones", trainable=True, ), ] self.weights_in_dict = { "my_weight": self.add_weight( shape=(), name="my_dict_weight", initializer="ones", trainable=True, ), } self.nested_layer.build(input_shape) def call(self, inputs): return self.nested_layer(inputs) def two(self): return 2 ASSETS_DATA = "These are my assets" VARIABLES_DATA = np.random.random((10,)) @keras_core.saving.register_keras_serializable(package="my_custom_package") class LayerWithCustomSaving(MyDense): def build(self, input_shape): self.assets = ASSETS_DATA self.stored_variables = VARIABLES_DATA return super().build(input_shape) def save_assets(self, inner_path): with open(os.path.join(inner_path, "assets.txt"), "w") as f: f.write(self.assets) def save_own_variables(self, store): store["variables"] = self.stored_variables def load_assets(self, inner_path): with open(os.path.join(inner_path, "assets.txt"), "r") as f: text = f.read() self.assets = text def load_own_variables(self, store): self.stored_variables = np.array(store["variables"]) @keras_core.saving.register_keras_serializable(package="my_custom_package") class CustomModelX(keras_core.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dense1 = MyDense(1, name="my_dense_1") self.dense2 = MyDense(1, name="my_dense_2") def call(self, inputs): out = self.dense1(inputs) return self.dense2(out) def one(self): return 1 @keras_core.saving.register_keras_serializable(package="my_custom_package") class ModelWithCustomSaving(keras_core.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.custom_dense = LayerWithCustomSaving(1) def call(self, inputs): return self.custom_dense(inputs) @keras_core.saving.register_keras_serializable(package="my_custom_package") class CompileOverridingModel(keras_core.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dense1 = MyDense(1) def compile(self, *args, **kwargs): super().compile(*args, **kwargs) def call(self, inputs): return self.dense1(inputs) @keras_core.saving.register_keras_serializable(package="my_custom_package") class CompileOverridingSequential(keras_core.Sequential): def compile(self, *args, **kwargs): super().compile(*args, **kwargs) @keras_core.saving.register_keras_serializable(package="my_custom_package") def my_mean_squared_error(y_true, y_pred): """Identical to built-in `mean_squared_error`, but as a custom fn.""" return ops.mean(ops.square(y_pred - y_true), axis=-1) def _get_subclassed_model(compile=True): subclassed_model = CustomModelX(name="custom_model_x") if compile: subclassed_model.compile( optimizer="adam", loss=my_mean_squared_error, metrics=[keras_core.metrics.Hinge(), "mse"], ) return subclassed_model def _get_custom_sequential_model(compile=True): sequential_model = keras_core.Sequential( [MyDense(1), MyDense(1)], name="sequential" ) if compile: sequential_model.compile( optimizer="adam", loss=my_mean_squared_error, metrics=[keras_core.metrics.Hinge(), "mse"], ) return sequential_model def _get_basic_sequential_model(compile=True): sequential_model = keras_core.Sequential( [ keras_core.layers.Dense(1, name="dense_1"), keras_core.layers.Dense(1, name="dense_2"), ], name="sequential", ) if compile: sequential_model.compile( optimizer="adam", loss=my_mean_squared_error, metrics=[keras_core.metrics.Hinge(), "mse"], ) return sequential_model def _get_custom_functional_model(compile=True): inputs = keras_core.Input(shape=(4,), batch_size=2) x = MyDense(1, name="first_dense")(inputs) outputs = MyDense(1, name="second_dense")(x) functional_model = keras_core.Model(inputs, outputs) if compile: functional_model.compile( optimizer="adam", loss=my_mean_squared_error, metrics=[keras_core.metrics.Hinge(), "mse"], ) return functional_model def _get_basic_functional_model(compile=True): inputs = keras_core.Input(shape=(4,), batch_size=2) x = keras_core.layers.Dense(1, name="first_dense")(inputs) outputs = keras_core.layers.Dense(1, name="second_dense")(x) functional_model = keras_core.Model(inputs, outputs) if compile: functional_model.compile( optimizer="adam", loss=my_mean_squared_error, metrics=[keras_core.metrics.Hinge(), "mse"], ) return functional_model @pytest.mark.requires_trainable_backend class SavingTest(testing.TestCase): def _test_inference_after_instantiation(self, model): x_ref = np.random.random((2, 4)) y_ref = model(x_ref) temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") model.save(temp_filepath) loaded_model = saving_lib.load_model(temp_filepath) self.assertFalse(model.compiled) for w_ref, w in zip(model.variables, loaded_model.variables): self.assertAllClose(w_ref, w) self.assertAllClose(y_ref, loaded_model(x_ref)) def test_inference_after_instantiation_subclassed(self): model = _get_subclassed_model(compile=False) self._test_inference_after_instantiation(model) def test_inference_after_instantiation_basic_sequential(self): model = _get_basic_sequential_model(compile=False) self._test_inference_after_instantiation(model) def test_inference_after_instantiation_basic_functional(self): model = _get_basic_functional_model(compile=False) self._test_inference_after_instantiation(model) def test_inference_after_instantiation_custom_sequential(self): model = _get_custom_sequential_model(compile=False) self._test_inference_after_instantiation(model) def test_inference_after_instantiation_custom_functional(self): model = _get_custom_functional_model(compile=False) self._test_inference_after_instantiation(model) def _test_compile_preserved(self, model): x_ref = np.random.random((2, 4)) y_ref = np.random.random((2, 1)) model.fit(x_ref, y_ref) out_ref = model(x_ref) ref_metrics = model.evaluate(x_ref, y_ref) temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") model.save(temp_filepath) loaded_model = saving_lib.load_model(temp_filepath) self.assertTrue(model.compiled) self.assertTrue(loaded_model.built) for w_ref, w in zip(model.variables, loaded_model.variables): self.assertAllClose(w_ref, w) self.assertAllClose(out_ref, loaded_model(x_ref)) self.assertEqual( model.optimizer.__class__, loaded_model.optimizer.__class__ ) self.assertEqual( model.optimizer.get_config(), loaded_model.optimizer.get_config() ) for w_ref, w in zip( model.optimizer.variables, loaded_model.optimizer.variables ): self.assertAllClose(w_ref, w) new_metrics = loaded_model.evaluate(x_ref, y_ref) for ref_m, m in zip(ref_metrics, new_metrics): self.assertAllClose(ref_m, m) def test_compile_preserved_subclassed(self): model = _get_subclassed_model(compile=True) self._test_compile_preserved(model) def test_compile_preserved_basic_sequential(self): model = _get_basic_sequential_model(compile=True) self._test_compile_preserved(model) def test_compile_preserved_custom_sequential(self): model = _get_custom_sequential_model(compile=True) self._test_compile_preserved(model) def test_compile_preserved_basic_functional(self): model = _get_basic_functional_model(compile=True) self._test_compile_preserved(model) def test_compile_preserved_custom_functional(self): model = _get_custom_functional_model(compile=True) self._test_compile_preserved(model) def test_saving_preserve_unbuilt_state(self): temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") subclassed_model = CustomModelX() subclassed_model.save(temp_filepath) loaded_model = saving_lib.load_model(temp_filepath) self.assertEqual(subclassed_model.compiled, loaded_model.compiled) self.assertFalse(subclassed_model.built) self.assertFalse(loaded_model.built) def test_saved_module_paths_and_class_names(self): temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") subclassed_model = _get_subclassed_model() x = np.random.random((100, 32)) y = np.random.random((100, 1)) subclassed_model.fit(x, y, epochs=1) subclassed_model.save(temp_filepath) with zipfile.ZipFile(temp_filepath, "r") as z: with z.open(saving_lib._CONFIG_FILENAME, "r") as c: config_json = c.read() config_dict = json.loads(config_json) self.assertEqual( config_dict["registered_name"], "my_custom_package>CustomModelX" ) self.assertEqual( config_dict["compile_config"]["optimizer"], "adam", ) print(config_dict["compile_config"]) self.assertEqual( config_dict["compile_config"]["loss"]["config"], "my_mean_squared_error", ) def test_saving_custom_assets_and_variables(self): temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") model = ModelWithCustomSaving() model.compile( optimizer="adam", loss="mse", ) x = np.random.random((100, 32)) y = np.random.random((100, 1)) model.fit(x, y, epochs=1) # Assert that the archive has not been saved. self.assertFalse(os.path.exists(temp_filepath)) model.save(temp_filepath) loaded_model = saving_lib.load_model(temp_filepath) self.assertEqual(loaded_model.custom_dense.assets, ASSETS_DATA) self.assertEqual( loaded_model.custom_dense.stored_variables.tolist(), VARIABLES_DATA.tolist(), ) def _test_compile_overridden_warnings(self, model_type): temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") model = ( CompileOverridingModel() if model_type == "subclassed" else CompileOverridingSequential( [keras_core.layers.Embedding(4, 1), MyDense(1), MyDense(1)] ) ) model.compile("sgd", "mse") model.save(temp_filepath) with mock.patch.object(warnings, "warn") as mock_warn: saving_lib.load_model(temp_filepath) if not mock_warn.call_args_list: raise AssertionError("Did not warn.") self.assertIn( "`compile()` was not called as part of model loading " "because the model's `compile()` method is custom. ", mock_warn.call_args_list[0][0][0], ) def test_compile_overridden_warnings_sequential(self): self._test_compile_overridden_warnings("sequential") def test_compile_overridden_warnings_subclassed(self): self._test_compile_overridden_warnings("subclassed") def test_metadata(self): temp_filepath = Path( os.path.join(self.get_temp_dir(), "my_model.keras") ) model = CompileOverridingModel() model.save(temp_filepath) with zipfile.ZipFile(temp_filepath, "r") as z: with z.open(saving_lib._METADATA_FILENAME, "r") as c: metadata_json = c.read() metadata = json.loads(metadata_json) self.assertIn("keras_version", metadata) self.assertIn("date_saved", metadata) # def test_gfile_copy_local_called(self): # temp_filepath = Path( # os.path.join(self.get_temp_dir(), "my_model.keras") # ) # model = CompileOverridingModel() # with mock.patch( # "re.match", autospec=True # ) as mock_re_match, mock.patch( # "tensorflow.compat.v2.io.file_utils.copy", autospec=True # ) as mock_copy: # # Mock Remote Path check to true to test gfile copy logic # mock_re_match.return_value = True # model.save(temp_filepath) # mock_re_match.assert_called() # mock_copy.assert_called() # self.assertIn(str(temp_filepath), mock_re_match.call_args.args) # self.assertIn(str(temp_filepath), mock_copy.call_args.args) def test_save_load_weights_only(self): temp_filepath = Path( os.path.join(self.get_temp_dir(), "mymodel.weights.h5") ) model = _get_basic_functional_model() ref_input = np.random.random((2, 4)) ref_output = model.predict(ref_input) saving_lib.save_weights_only(model, temp_filepath) model = _get_basic_functional_model() saving_lib.load_weights_only(model, temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) # Test with Model method model = _get_basic_functional_model() model.load_weights(temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) def test_load_weights_only_with_keras_file(self): # Test loading weights from whole saved model temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras")) model = _get_basic_functional_model() ref_input = np.random.random((2, 4)) ref_output = model.predict(ref_input) saving_lib.save_model(model, temp_filepath) model = _get_basic_functional_model() saving_lib.load_weights_only(model, temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) # Test with Model method model = _get_basic_functional_model() model.load_weights(temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) def test_compile_arg(self): temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") model = _get_basic_functional_model() model.compile("sgd", "mse") model.fit(np.random.random((2, 4)), np.random.random((2, 1))) saving_lib.save_model(model, temp_filepath) model = saving_lib.load_model(temp_filepath) self.assertEqual(model.compiled, True) model = saving_lib.load_model(temp_filepath, compile=False) self.assertEqual(model.compiled, False) # def test_overwrite(self): # temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") # model = _get_basic_functional_model() # model.save(temp_filepath) # model.save(temp_filepath, overwrite=True) # with self.assertRaises(EOFError): # model.save(temp_filepath, overwrite=False) # temp_filepath = os.path.join( # self.get_temp_dir(), "mymodel.weights.h5" # ) # model = _get_basic_functional_model() # model.save_weights(temp_filepath) # model.save_weights(temp_filepath, overwrite=True) # with self.assertRaises(EOFError): # model.save_weights(temp_filepath, overwrite=False) def test_partial_load(self): temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") original_model = keras_core.Sequential( [ keras_core.Input(shape=(3,), batch_size=2), keras_core.layers.Dense(4), keras_core.layers.Dense(5), ] ) original_model.save(temp_filepath) # Test with a model that has a differently shaped layer new_model = keras_core.Sequential( [ keras_core.Input(shape=(3,), batch_size=2), keras_core.layers.Dense(4), keras_core.layers.Dense(6), ] ) new_layer_kernel_value = np.array(new_model.layers[1].kernel) with self.assertRaisesRegex(ValueError, "must match"): # Doesn't work by default new_model.load_weights(temp_filepath) # Now it works new_model.load_weights(temp_filepath, skip_mismatch=True) ref_weights = original_model.layers[0].get_weights() new_weights = new_model.layers[0].get_weights() self.assertEqual(len(ref_weights), len(new_weights)) for ref_w, w in zip(ref_weights, new_weights): self.assertAllClose(ref_w, w) self.assertAllClose( np.array(new_model.layers[1].kernel), new_layer_kernel_value ) # Test with a model that has a new layer at the end new_model = keras_core.Sequential( [ keras_core.Input(shape=(3,), batch_size=2), keras_core.layers.Dense(4), keras_core.layers.Dense(5), keras_core.layers.Dense(5), ] ) new_layer_kernel_value = np.array(new_model.layers[2].kernel) with self.assertRaisesRegex(ValueError, "received 0 variables"): # Doesn't work by default new_model.load_weights(temp_filepath) # Now it works new_model.load_weights(temp_filepath, skip_mismatch=True) for layer_index in [0, 1]: ref_weights = original_model.layers[layer_index].get_weights() new_weights = new_model.layers[layer_index].get_weights() self.assertEqual(len(ref_weights), len(new_weights)) for ref_w, w in zip(ref_weights, new_weights): self.assertAllClose(ref_w, w) self.assertAllClose( np.array(new_model.layers[2].kernel), new_layer_kernel_value ) @pytest.mark.requires_trainable_backend class SavingAPITest(testing.TestCase): def test_saving_api_errors(self): from keras_core.saving import saving_api model = _get_basic_functional_model() # Saving API errors temp_filepath = os.path.join(self.get_temp_dir(), "mymodel") with self.assertRaisesRegex(ValueError, "argument is deprecated"): saving_api.save_model(model, temp_filepath, save_format="keras") temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.notkeras") with self.assertRaisesRegex(ValueError, "Invalid filepath extension"): saving_api.save_model(model, temp_filepath) temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") with self.assertRaisesRegex(ValueError, "are not supported"): saving_api.save_model(model, temp_filepath, invalid_arg="hello") # Loading API errors temp_filepath = os.path.join(self.get_temp_dir(), "non_existent.keras") with self.assertRaisesRegex( ValueError, "Please ensure the file is an accessible" ): _ = saving_api.load_model(temp_filepath) temp_filepath = os.path.join(self.get_temp_dir(), "my_saved_model") with self.assertRaisesRegex(ValueError, "File format not supported"): _ = saving_api.load_model(temp_filepath) def test_model_api_endpoint(self): temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras")) model = _get_basic_functional_model() ref_input = np.random.random((2, 4)) ref_output = model.predict(ref_input) model.save(temp_filepath) model = keras_core.saving.load_model(temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) def test_model_api_endpoint_h5(self): temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.h5")) model = _get_basic_functional_model() ref_input = np.random.random((2, 4)) ref_output = model.predict(ref_input) model.save(temp_filepath) model = keras_core.saving.load_model(temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6) def test_model_api_errors(self): model = _get_basic_functional_model() # Saving API errors temp_filepath = os.path.join(self.get_temp_dir(), "mymodel") with self.assertRaisesRegex(ValueError, "argument is deprecated"): model.save(temp_filepath, save_format="keras") temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.notkeras") with self.assertRaisesRegex(ValueError, "Invalid filepath extension"): model.save(temp_filepath) temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") with self.assertRaisesRegex(ValueError, "are not supported"): model.save(temp_filepath, invalid_arg="hello") # def test_safe_mode(self): # temp_filepath = os.path.join(self.get_temp_dir(), "unsafe_model.keras") # model = keras_core.Sequential( # [ # keras_core.Input(shape=(3,)), # keras_core.layers.Dense(2, activation=lambda x: x * 2), # ] # ) # model.save(temp_filepath) # with self.assertRaisesRegex(ValueError, "arbitrary code execution"): # model = saving_lib.load_model(temp_filepath) # model = saving_lib.load_model(temp_filepath, safe_mode=False) # def test_normalization_kpl(self): # # With adapt # temp_filepath = os.path.join(self.get_temp_dir(), "norm_model.keras") # model = keras_core.Sequential( # [ # keras_core.Input(shape=(3,)), # keras_core.layers.Normalization(), # ] # ) # data = np.random.random((3, 3)) # model.layers[0].adapt(data) # ref_out = model(data) # model.save(temp_filepath) # model = saving_lib.load_model(temp_filepath) # out = model(data) # self.assertAllClose(ref_out, out, atol=1e-6) # # Without adapt # model = keras_core.Sequential( # [ # keras_core.Input(shape=(3,)), # keras_core.layers.Normalization( # mean=np.random.random((3,)), # variance=np.random.random((3,)), # ), # ] # ) # ref_out = model(data) # model.save(temp_filepath) # model = saving_lib.load_model(temp_filepath) # out = model(data) # self.assertAllClose(ref_out, out, atol=1e-6) # # This custom class lacks custom object registration. # class CustomRNN(keras_core.layers.Layer): # def __init__(self, units): # super(CustomRNN, self).__init__() # self.units = units # self.projection_1 = keras_core.layers.Dense( # units=units, activation="tanh" # ) # self.projection_2 = keras_core.layers.Dense( # units=units, activation="tanh" # ) # self.classifier = keras_core.layers.Dense(1) # def call(self, inputs): # outputs = [] # state = ops.zeros(shape=(inputs.shape[0], self.units)) # for t in range(inputs.shape[1]): # x = inputs[:, t, :] # h = self.projection_1(x) # y = h + self.projection_2(state) # state = y # outputs.append(y) # features = ops.stack(outputs, axis=1) # return self.classifier(features) # # This class is properly registered with a `get_config()` method. # # However, since it does not subclass keras_core.layers.Layer, it lacks # # `from_config()` for deserialization. # @keras_core.saving.register_keras_serializable() # class GrowthFactor: # def __init__(self, factor): # self.factor = factor # def __call__(self, inputs): # return inputs * self.factor # def get_config(self): # return {"factor": self.factor} # @keras_core.saving.register_keras_serializable(package="Complex") # class FactorLayer(keras_core.layers.Layer): # def __init__(self, factor): # super().__init__() # self.factor = factor # def call(self, x): # return x * self.factor # def get_config(self): # return {"factor": self.factor} # # This custom model does not explicitly deserialize the layers it includes # # in its `get_config`. Explicit deserialization in a `from_config` override # # or `__init__` is needed here, or an error will be thrown at loading time. # @keras_core.saving.register_keras_serializable(package="Complex") # class ComplexModel(keras_core.layers.Layer): # def __init__(self, first_layer, second_layer=None, **kwargs): # super().__init__(**kwargs) # self.first_layer = first_layer # if second_layer is not None: # self.second_layer = second_layer # else: # self.second_layer = keras_core.layers.Dense(8) # def get_config(self): # config = super().get_config() # config.update( # { # "first_layer": self.first_layer, # "second_layer": self.second_layer, # } # ) # return config # def call(self, inputs): # return self.first_layer(self.second_layer(inputs)) # class SavingBattleTest(testing.TestCase): # def test_custom_model_without_registration_error(self): # temp_filepath = os.path.join( # self.get_temp_dir(), "my_custom_model.keras" # ) # timesteps = 10 # input_dim = 5 # batch_size = 16 # inputs = keras_core.Input( # batch_shape=(batch_size, timesteps, input_dim) # ) # x = keras_core.layers.Conv1D(32, 3)(inputs) # outputs = CustomRNN(32)(x) # model = keras_core.Model(inputs, outputs) # with self.assertRaisesRegex( # TypeError, "is a custom class, please register it" # ): # model.save(temp_filepath) # _ = keras_core.models.load_model(temp_filepath) # def test_custom_object_without_from_config(self): # temp_filepath = os.path.join( # self.get_temp_dir(), "custom_fn_model.keras" # ) # inputs = keras_core.Input(shape=(4, 4)) # outputs = keras_core.layers.Dense( # 1, activation=GrowthFactor(0.5) # )(inputs) # model = keras_core.Model(inputs, outputs) # model.save(temp_filepath) # with self.assertRaisesRegex( # TypeError, "Unable to reconstruct an instance" # ): # _ = keras_core.models.load_model(temp_filepath) # def test_complex_model_without_explicit_deserialization(self): # temp_filepath = os.path.join( # self.get_temp_dir(), "complex_model.keras" # ) # inputs = keras_core.Input((32,)) # outputs = ComplexModel(first_layer=FactorLayer(0.5))(inputs) # model = keras_core.Model(inputs, outputs) # model.save(temp_filepath) # with self.assertRaisesRegex(TypeError, "are explicitly deserialized"): # _ = keras_core.models.load_model(temp_filepath)
keras-core/keras_core/saving/saving_lib_test.py/0
{ "file_path": "keras-core/keras_core/saving/saving_lib_test.py", "repo_id": "keras-core", "token_count": 13317 }
38
CI to run on PR and merge to Master.
keras-cv/.kokoro/README.md/0
{ "file_path": "keras-cv/.kokoro/README.md", "repo_id": "keras-cv", "token_count": 10 }
39
# KerasCV [![](https://github.com/keras-team/keras-cv/workflows/Tests/badge.svg?branch=master)](https://github.com/keras-team/keras-cv/actions?query=workflow%3ATests+branch%3Amaster) ![Downloads](https://img.shields.io/pypi/dm/keras-cv.svg) ![Python](https://img.shields.io/badge/python-v3.7.0+-success.svg) ![Tensorflow](https://img.shields.io/badge/tensorflow-v2.9.0+-success.svg) [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/keras-team/keras-cv/issues) KerasCV is a library of modular computer vision components that work natively with TensorFlow, JAX, or PyTorch. Built on Keras 3, these models, layers, metrics, callbacks, etc., can be trained and serialized in any framework and re-used in another without costly migrations. See "Configuring your backend" below for more details on multi-framework KerasCV. <img style="width: 440px; max-width: 90%;" src="https://storage.googleapis.com/keras-cv/guides/keras-cv-augmentations.gif"> KerasCV can be understood as a horizontal extension of the Keras API: the components are new first-party Keras objects that are too specialized to be added to core Keras. They receive the same level of polish and backwards compatibility guarantees as the core Keras API, and they are maintained by the Keras team. Our APIs assist in common computer vision tasks such as data augmentation, classification, object detection, segmentation, image generation, and more. Applied computer vision engineers can leverage KerasCV to quickly assemble production-grade, state-of-the-art training and inference pipelines for all of these common tasks. ## Quick Links - [List of available models and presets](https://keras.io/api/keras_cv/models/) - [Developer Guides](https://keras.io/guides/keras_cv/) - [Contributing Guide](.github/CONTRIBUTING.md) - [Call for Contributions](https://github.com/keras-team/keras-cv/issues?q=is%3Aopen+is%3Aissue+label%3Acontribution-welcome) - [API Design Guidelines](.github/API_DESIGN.md) ## Installation KerasCV supports both Keras 2 and Keras 3. We recommend Keras 3 for all new users, as it enables using KerasCV models and layers with JAX, TensorFlow and PyTorch. ### Keras 2 Installation To install the latest KerasCV release with Keras 2, simply run: ``` pip install --upgrade keras-cv tensorflow ``` ### Keras 3 Installation There are currently two ways to install Keras 3 with KerasCV. To install the latest changes for KerasCV and Keras, you can use our nightly package. ``` pip install --upgrade keras-cv-nightly tf-nightly ``` To install the stable versions of KerasCV and Keras 3, you should install Keras 3 **after** installing KerasCV. This is a temporary step while TensorFlow is pinned to Keras 2, and will no longer be necessary after TensorFlow 2.16. ``` pip install --upgrade keras-cv tensorflow pip install --upgrade keras ``` > [!IMPORTANT] > Keras 3 will not function with TensorFlow 2.14 or earlier. ## Configuring your backend If you have Keras 3 installed in your environment (see installation above), you can use KerasCV with any of JAX, TensorFlow and PyTorch. To do so, set the `KERAS_BACKEND` environment variable. For example: so by setting the `KERAS_BACKEND` environment variable. For example: ```shell export KERAS_BACKEND=jax ``` Or in Colab, with: ```python import os os.environ["KERAS_BACKEND"] = "jax" import keras_cv ``` > [!IMPORTANT] > Make sure to set the `KERAS_BACKEND` before import any Keras libraries, it > will be used to set up Keras when it is first imported. Once that configuration step is done, you can just import KerasCV and start using it on top of your backend of choice: ```python import keras_cv import keras filepath = keras.utils.get_file(origin="https://i.imgur.com/gCNcJJI.jpg") image = np.array(keras.utils.load_img(filepath)) image_resized = keras.ops.image.resize(image, (640, 640))[None, ...] model = keras_cv.models.YOLOV8Detector.from_preset( "yolo_v8_m_pascalvoc", bounding_box_format="xywh", ) predictions = model.predict(image_resized) ``` ## Quickstart ```python import tensorflow as tf import keras_cv import tensorflow_datasets as tfds import keras # Create a preprocessing pipeline with augmentations BATCH_SIZE = 16 NUM_CLASSES = 3 augmenter = keras_cv.layers.Augmenter( [ keras_cv.layers.RandomFlip(), keras_cv.layers.RandAugment(value_range=(0, 255)), keras_cv.layers.CutMix(), ], ) def preprocess_data(images, labels, augment=False): labels = tf.one_hot(labels, NUM_CLASSES) inputs = {"images": images, "labels": labels} outputs = inputs if augment: outputs = augmenter(outputs) return outputs['images'], outputs['labels'] train_dataset, test_dataset = tfds.load( 'rock_paper_scissors', as_supervised=True, split=['train', 'test'], ) train_dataset = train_dataset.batch(BATCH_SIZE).map( lambda x, y: preprocess_data(x, y, augment=True), num_parallel_calls=tf.data.AUTOTUNE).prefetch( tf.data.AUTOTUNE) test_dataset = test_dataset.batch(BATCH_SIZE).map( preprocess_data, num_parallel_calls=tf.data.AUTOTUNE).prefetch( tf.data.AUTOTUNE) # Create a model using a pretrained backbone backbone = keras_cv.models.EfficientNetV2Backbone.from_preset( "efficientnetv2_b0_imagenet" ) model = keras_cv.models.ImageClassifier( backbone=backbone, num_classes=NUM_CLASSES, activation="softmax", ) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=1e-5), metrics=['accuracy'] ) # Train your model model.fit( train_dataset, validation_data=test_dataset, epochs=8, ) ``` ## Contributors If you'd like to contribute, please see our [contributing guide](.github/CONTRIBUTING.md). To find an issue to tackle, please check our [call for contributions](.github/CALL_FOR_CONTRIBUTIONS.md). We would like to leverage/outsource the Keras community not only for bug reporting, but also for active development for feature delivery. To achieve this, here is the predefined process for how to contribute to this repository: 1) Contributors are always welcome to help us fix an issue, add tests, better documentation. 2) If contributors would like to create a backbone, we usually require a pre-trained weight set with the model for one dataset as the first PR, and a training script as a follow-up. The training script will preferably help us reproduce the results claimed from paper. The backbone should be generic but the training script can contain paper specific parameters such as learning rate schedules and weight decays. The training script will be used to produce leaderboard results. Exceptions apply to large transformer-based models which are difficult to train. If this is the case, contributors should let us know so the team can help in training the model or providing GCP resources. 3) If contributors would like to create a meta arch, please try to be aligned with our roadmap and create a PR for design review to make sure the meta arch is modular. 4) If contributors would like to create a new input formatting which is not in our roadmap for the next 6 months, e.g., keypoint, please create an issue and ask for a sponsor. 5) If contributors would like to support a new task which is not in our roadmap for the next 6 months, e.g., 3D reconstruction, please create an issue and ask for a sponsor. Thank you to all of our wonderful contributors! <a href="https://github.com/keras-team/keras-cv/graphs/contributors"> <img src="https://contrib.rocks/image?repo=keras-team/keras-cv" /> </a> ## Pretrained Weights Many models in KerasCV come with pre-trained weights. With the exception of StableDiffusion and the standard Vision Transformer, all of these weights are trained using Keras and KerasCV components and training scripts in this repository. While some models are not trained with the same parameters or preprocessing pipeline as defined in their original publications, the KerasCV team ensures strong numerical performance. Performance metrics for the provided pre-trained weights can be found in the training history for each documented task. An example of this can be found in the ImageNet classification training [history for backbone models](examples/training/classification/imagenet/training_history.json). All results are reproducible using the training scripts in this repository. Historically, many models have been trained on image datasets rescaled via manually crafted normalization schemes. The most common variant of manually crafted normalization scheme is subtraction of the imagenet mean pixel followed by standard deviation normalization based on the imagenet pixel standard deviation. This scheme is an artifact of the days of manual feature engineering, but is no longer required to score state of the art scores using modern deep learning architectures. Due to this, KerasCV is standardized to operate on images that have been rescaled using a simple `1/255` rescaling layer. This can be seen in all KerasCV training pipelines and code examples. ## Custom Ops Note that in some of the 3D Object Detection layers, custom TF ops are used. The binaries for these ops are not shipped in our PyPi package in order to keep our wheels pure-Python. If you'd like to use these custom ops, you can install from source using the instructions below. ### Installing KerasCV with Custom Ops from Source Installing custom ops from source requires the [Bazel](https://bazel.build/) build system (version >= 5.4.0). Steps to install Bazel can be [found here](https://github.com/keras-team/keras/blob/v2.11.0/.devcontainer/Dockerfile#L21-L23). ``` git clone https://github.com/keras-team/keras-cv.git cd keras-cv python3 build_deps/configure.py bazel build build_pip_pkg export BUILD_WITH_CUSTOM_OPS=true bazel-bin/build_pip_pkg wheels pip install wheels/keras_cv-*.whl ``` Note that GitHub actions exist to release KerasCV with custom ops, but are currently disabled. You can use these [actions](https://github.com/keras-team/keras-cv/blob/master/.github/workflows/release.yml) in your own fork to create wheels for Linux (manylinux2014), MacOS (both x86 and ARM), and Windows. ## Disclaimer KerasCV provides access to pre-trained models via the `keras_cv.models` API. These pre-trained models are provided on an "as is" basis, without warranties or conditions of any kind. The following underlying models are provided by third parties, and are subject to separate licenses: StableDiffusion, Vision Transformer ## Citing KerasCV If KerasCV helps your research, we appreciate your citations. Here is the BibTeX entry: ```bibtex @misc{wood2022kerascv, title={KerasCV}, author={Wood, Luke and Tan, Zhenyu and Stenbit, Ian and Bischof, Jonathan and Zhu, Scott and Chollet, Fran\c{c}ois and Sreepathihalli, Divyashree and Sampath, Ramesh and others}, year={2022}, howpublished={\url{https://github.com/keras-team/keras-cv}}, } ```
keras-cv/README.md/0
{ "file_path": "keras-cv/README.md", "repo_id": "keras-cv", "token_count": 3402 }
40
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow import keras from keras_cv import bounding_box from keras_cv.layers import RandomCrop from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) H_AXIS = -3 W_AXIS = -2 class OldRandomCrop(BaseImageAugmentationLayer): """A preprocessing layer which randomly crops images during training. During training, this layer will randomly choose a location to crop images down to a target size. The layer will crop all the images in the same batch to the same cropping location. At inference time, and during training if an input image is smaller than the target size, the input will be resized and cropped so as to return the largest possible window in the image that matches the target aspect ratio. If you need to apply random cropping at inference time, set `training` to True when calling the layer. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point dtype. By default, the layer will output floats. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format. Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., target_height, target_width, channels)`. Args: height: Integer, the height of the output shape. width: Integer, the width of the output shape. seed: Integer. Used to create a random seed. """ def __init__( self, height, width, seed=None, bounding_box_format=None, **kwargs ): super().__init__( **kwargs, autocast=False, seed=seed, ) self.height = height self.width = width self.seed = seed self.auto_vectorize = False self.bounding_box_format = bounding_box_format def get_random_transformation(self, image=None, **kwargs): image_shape = tf.shape(image) h_diff = image_shape[H_AXIS] - self.height w_diff = image_shape[W_AXIS] - self.width dtype = image_shape.dtype rands = self._random_generator.uniform([2], 0, dtype.max, dtype) h_start = rands[0] % (h_diff + 1) w_start = rands[1] % (w_diff + 1) return {"top": h_start, "left": w_start} def augment_image(self, image, transformation, **kwargs): image_shape = tf.shape(image) h_diff = image_shape[H_AXIS] - self.height w_diff = image_shape[W_AXIS] - self.width return tf.cond( tf.reduce_all((h_diff >= 0, w_diff >= 0)), lambda: self._crop(image, transformation), lambda: self._resize(image), ) def compute_image_signature(self, images): return tf.TensorSpec( shape=(self.height, self.width, images.shape[-1]), dtype=self.compute_dtype, ) def augment_bounding_boxes( self, bounding_boxes, transformation, image=None, **kwargs ): if self.bounding_box_format is None: raise ValueError( "`RandomCrop()` was called with bounding boxes," "but no `bounding_box_format` was specified in the constructor." "Please specify a bounding box format in the constructor. i.e." "`RandomCrop(bounding_box_format='xyxy')`" ) bounding_boxes = bounding_box.convert_format( bounding_boxes, source=self.bounding_box_format, target="xyxy", images=image, ) image_shape = tf.shape(image) h_diff = image_shape[H_AXIS] - self.height w_diff = image_shape[W_AXIS] - self.width bounding_boxes = tf.cond( tf.reduce_all((h_diff >= 0, w_diff >= 0)), lambda: self._crop_bounding_boxes( image, bounding_boxes, transformation ), lambda: self._resize_bounding_boxes( image, bounding_boxes, ), ) bounding_boxes = bounding_box.clip_to_image( bounding_boxes, bounding_box_format="xyxy", image_shape=(self.height, self.width, image_shape[-1]), ) bounding_boxes = bounding_box.convert_format( bounding_boxes, source="xyxy", target=self.bounding_box_format, dtype=self.compute_dtype, images=image, ) return bounding_boxes def _crop(self, image, transformation): top = transformation["top"] left = transformation["left"] return tf.image.crop_to_bounding_box( image, top, left, self.height, self.width ) def _resize(self, image): resizing_layer = keras.layers.Resizing(self.height, self.width) outputs = resizing_layer(image) # smart_resize will always output float32, so we need to re-cast. return tf.cast(outputs, self.compute_dtype) def augment_label(self, label, transformation, **kwargs): return label def get_config(self): config = { "height": self.height, "width": self.width, "seed": self.seed, "bounding_box_format": self.bounding_box_format, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def _crop_bounding_boxes(self, image, bounding_boxes, transformation): top = tf.cast(transformation["top"], dtype=self.compute_dtype) left = tf.cast(transformation["left"], dtype=self.compute_dtype) output = bounding_boxes.copy() x1, y1, x2, y2 = tf.split( bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1 ) output["boxes"] = tf.concat( [ x1 - left, y1 - top, x2 - left, y2 - top, ], axis=-1, ) return output def _resize_bounding_boxes(self, image, bounding_boxes): output = bounding_boxes.copy() image_shape = tf.shape(image) x_scale = tf.cast( self.width / image_shape[W_AXIS], dtype=self.compute_dtype ) y_scale = tf.cast( self.height / image_shape[H_AXIS], dtype=self.compute_dtype ) x1, y1, x2, y2 = tf.split( bounding_boxes["boxes"], [1, 1, 1, 1], axis=-1 ) output["boxes"] = tf.concat( [ x1 * x_scale, y1 * y_scale, x2 * x_scale, y2 * y_scale, ], axis=-1, ) return output class RandomCropTest(tf.test.TestCase): def test_consistency_with_old_impl_crop(self): ori_height, ori_width = 256, 256 height, width = 128, 128 input_image = np.random.random((ori_height, ori_width, 3)).astype( np.float32 ) bboxes = { "boxes": tf.convert_to_tensor([[100, 100, 200, 200]]), "classes": tf.convert_to_tensor([1]), } input = {"images": input_image, "bounding_boxes": bboxes} layer = RandomCrop( height=height, width=width, bounding_box_format="xyxy" ) old_layer = OldRandomCrop( height=height, width=width, bounding_box_format="xyxy" ) # manually set height_offset and width_offset height_offset = 20 width_offset = 30 transformations = { "tops": tf.ones((1, 1)) * (height_offset / (ori_height - height)), "lefts": tf.ones((1, 1)) * (width_offset / (ori_width - width)), } old_transformation = { "top": tf.convert_to_tensor(height_offset, dtype=tf.int32), "left": tf.convert_to_tensor(width_offset, dtype=tf.int32), } with unittest.mock.patch.object( layer, "get_random_transformation_batch", return_value=transformations, ): output = layer(input, training=True) with unittest.mock.patch.object( old_layer, "get_random_transformation", return_value=old_transformation, ): old_output = old_layer(input, training=True) self.assertAllClose( output["bounding_boxes"]["boxes"], old_output["bounding_boxes"]["boxes"].to_tensor(-1), ) self.assertAllClose(output["images"], old_output["images"]) def test_consistency_with_old_impl_resize(self): input_image = np.random.random((256, 256, 3)).astype(np.float32) bboxes = { "boxes": tf.convert_to_tensor([[100, 100, 200, 200]]), "classes": tf.convert_to_tensor([1]), } input = {"images": input_image, "bounding_boxes": bboxes} layer = RandomCrop(height=512, width=512, bounding_box_format="xyxy") old_layer = OldRandomCrop( height=512, width=512, bounding_box_format="xyxy" ) output = layer(input, training=True) old_output = old_layer(input, training=True) self.assertAllClose( output["bounding_boxes"]["boxes"], old_output["bounding_boxes"]["boxes"].to_tensor(-1), ) self.assertAllClose(output["images"], old_output["images"]) if __name__ == "__main__": # Run benchmark (x_train, _), _ = keras.datasets.cifar10.load_data() x_train = x_train.astype(np.float32) num_images = [100, 200, 500, 1000] num_classes = 10 results = {} aug_candidates = [RandomCrop, OldRandomCrop] aug_args = {"height": 16, "width": 16} for aug in aug_candidates: # Eager Mode c = aug.__name__ layer = aug(**aug_args) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup layer(x_train[:n_images]) t0 = time.time() r1 = layer(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes # Graph Mode c = aug.__name__ + " Graph Mode" layer = aug(**aug_args) @tf.function() def apply_aug(inputs): return layer(inputs) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup apply_aug(x_train[:n_images]) t0 = time.time() r1 = apply_aug(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1-t0}") results[c] = runtimes # XLA Mode # cannot run tf.image.crop_and_resize on XLA plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison.png") # So we can actually see more relevant margins del results[aug_candidates[1].__name__] plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison_no_old_eager.png") # Run unit tests tf.test.main()
keras-cv/benchmarks/vectorized_random_crop.py/0
{ "file_path": "keras-cv/benchmarks/vectorized_random_crop.py", "repo_id": "keras-cv", "token_count": 5656 }
41
# Addons Build Definitions inherited from TensorFlow Core D_GLIBCXX_USE_CXX11_ABI = "%{tf_cx11_abi}" CPLUSPLUS_VERSION = "%{tf_cplusplus_ver}"
keras-cv/build_deps/tf_dependency/build_defs.bzl.tpl/0
{ "file_path": "keras-cv/build_deps/tf_dependency/build_defs.bzl.tpl", "repo_id": "keras-cv", "token_count": 59 }
42