text
stringlengths
5
261k
id
stringlengths
16
106
metadata
dict
__index_level_0__
int64
0
266
from keras_core import ops from keras_core.api_export import keras_core_export from keras_core.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_core_export( [ "keras_core.layers.GlobalAveragePooling3D", "keras_core.layers.GlobalAvgPool3D", ] ) class GlobalAveragePooling3D(BaseGlobalPooling): """Global average pooling operation for 3D data. Args: data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `"channels_first"` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `"channels_last"`. keepdims: A boolean, whether to keep the temporal dimension or not. If `keepdims` is `False` (default), the rank of the tensor is reduced for spatial dimensions. If `keepdims` is `True`, the spatial dimension are retained with length 1. The behavior is the same as for `tf.reduce_mean` or `np.mean`. Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `keepdims=False`: 2D tensor with shape `(batch_size, channels)`. - If `keepdims=True`: - If `data_format="channels_last"`: 5D tensor with shape `(batch_size, 1, 1, 1, channels)` - If `data_format="channels_first"`: 5D tensor with shape `(batch_size, channels, 1, 1, 1)` Example: >>> x = np.random.rand(2, 4, 5, 4, 3) >>> y = keras_core.layers.GlobalAveragePooling3D()(x) >>> y.shape (2, 3) """ def __init__(self, data_format=None, keepdims=False, **kwargs): super().__init__( pool_dimensions=3, data_format=data_format, keepdims=keepdims, **kwargs, ) def call(self, inputs): if self.data_format == "channels_last": return ops.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims) return ops.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
keras-core/keras_core/layers/pooling/global_average_pooling3d.py/0
{ "file_path": "keras-core/keras_core/layers/pooling/global_average_pooling3d.py", "repo_id": "keras-core", "token_count": 1158 }
43
import os import numpy as np from tensorflow import data as tf_data from keras_core import backend from keras_core import layers from keras_core import models from keras_core import testing from keras_core.saving import saving_api class DicretizationTest(testing.TestCase): def test_discretization_basics(self): self.run_layer_test( layers.Discretization, init_kwargs={ "bin_boundaries": [0.0, 0.5, 1.0], }, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, run_training_check=False, ) def test_adapt_flow(self): layer = layers.Discretization(num_bins=4) layer.adapt( np.random.random((32, 3)), ) output = layer(np.array([[0.0, 0.1, 0.3]])) self.assertTrue(output.dtype, "int32") def test_correctness(self): # int mode layer = layers.Discretization( bin_boundaries=[0.0, 0.5, 1.0], output_mode="int" ) output = layer(np.array([[-1.0, 0.0, 0.1, 0.8, 1.2]])) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, np.array([[0, 1, 1, 2, 3]])) # one_hot mode layer = layers.Discretization( bin_boundaries=[0.0, 0.5, 1.0], output_mode="one_hot" ) output = layer(np.array([0.1, 0.8])) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, np.array([[0, 1, 0, 0], [0, 0, 1, 0]])) # multi_hot mode layer = layers.Discretization( bin_boundaries=[0.0, 0.5, 1.0], output_mode="multi_hot" ) output = layer(np.array([[0.1, 0.8]])) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, np.array([[0, 1, 1, 0]])) # count mode layer = layers.Discretization( bin_boundaries=[0.0, 0.5, 1.0], output_mode="count" ) output = layer(np.array([[0.1, 0.8, 0.9]])) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, np.array([[0, 1, 2, 0]])) def test_tf_data_compatibility(self): # With fixed bins layer = layers.Discretization( bin_boundaries=[0.0, 0.35, 0.5, 1.0], dtype="float32" ) x = np.array([[-1.0, 0.0, 0.1, 0.2, 0.4, 0.5, 1.0, 1.2, 0.98]]) self.assertAllClose(layer(x), np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]])) ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer) for output in ds.take(1): output = output.numpy() self.assertAllClose(output, np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]])) # With adapt flow layer = layers.Discretization(num_bins=4) layer.adapt( np.random.random((32, 3)), ) x = np.array([[0.0, 0.1, 0.3]]) ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer) for output in ds.take(1): output.numpy() def test_saving(self): # With fixed bins layer = layers.Discretization(bin_boundaries=[0.0, 0.35, 0.5, 1.0]) model = models.Sequential( [ layers.Input((2,)), layer, ] ) fpath = os.path.join(self.get_temp_dir(), "model.keras") model.save(fpath) model = saving_api.load_model(fpath) x = np.array([[-1.0, 0.0, 0.1, 0.2, 0.4, 0.5, 1.0, 1.2, 0.98]]) self.assertAllClose(layer(x), np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]])) # With adapt flow layer = layers.Discretization(num_bins=4) layer.adapt( np.random.random((32, 3)), ) ref_input = np.random.random((1, 2)) ref_output = layer(ref_input) model = models.Sequential( [ layers.Input((2,)), layer, ] ) fpath = os.path.join(self.get_temp_dir(), "model.keras") model.save(fpath) model = saving_api.load_model(fpath) self.assertAllClose(layer(ref_input), ref_output) def test_sparse_inputs(self): # TODO pass
keras-core/keras_core/layers/preprocessing/discretization_test.py/0
{ "file_path": "keras-core/keras_core/layers/preprocessing/discretization_test.py", "repo_id": "keras-core", "token_count": 2284 }
44
import numpy as np import pytest from tensorflow import data as tf_data from keras_core import backend from keras_core import layers from keras_core import testing class RandomContrastTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_layer(self): self.run_layer_test( layers.RandomContrast, init_kwargs={ "factor": 0.75, "seed": 1, }, input_shape=(8, 3, 4, 3), supports_masking=False, expected_output_shape=(8, 3, 4, 3), ) def test_random_contrast(self): seed = 9809 np.random.seed(seed) inputs = np.random.random((12, 8, 16, 3)) layer = layers.RandomContrast(factor=0.5, seed=seed) outputs = layer(inputs) # Actual contrast arithmetic np.random.seed(seed) factor = np.random.uniform(0.5, 1.5) inp_mean = np.mean(inputs, axis=-3, keepdims=True) inp_mean = np.mean(inp_mean, axis=-2, keepdims=True) actual_outputs = (inputs - inp_mean) * factor + inp_mean outputs = backend.convert_to_numpy(outputs) actual_outputs = np.clip(outputs, 0, 255) self.assertAllClose(outputs, actual_outputs) def test_tf_data_compatibility(self): layer = layers.RandomContrast(factor=0.5, seed=1337) input_data = np.random.random((2, 8, 8, 3)) ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer) for output in ds.take(1): output.numpy()
keras-core/keras_core/layers/preprocessing/random_contrast_test.py/0
{ "file_path": "keras-core/keras_core/layers/preprocessing/random_contrast_test.py", "repo_id": "keras-core", "token_count": 737 }
45
import numpy as np from tensorflow import data as tf_data from keras_core import backend from keras_core import layers from keras_core import testing class StringLookupTest(testing.TestCase): # TODO: increase coverage. Most features aren't being tested. def test_config(self): layer = layers.StringLookup( output_mode="int", vocabulary=["a", "b", "c"], oov_token="[OOV]", mask_token="[MASK]", ) self.run_class_serialization_test(layer) def test_adapt_flow(self): layer = layers.StringLookup( output_mode="int", ) layer.adapt(["a", "a", "a", "b", "b", "c"]) input_data = ["b", "c", "d"] output = layer(input_data) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, np.array([2, 3, 0])) def test_fixed_vocabulary(self): layer = layers.StringLookup( output_mode="int", vocabulary=["a", "b", "c"], ) input_data = ["b", "c", "d"] output = layer(input_data) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, np.array([2, 3, 0])) def test_set_vocabulary(self): layer = layers.StringLookup( output_mode="int", ) layer.set_vocabulary(["a", "b", "c"]) input_data = ["b", "c", "d"] output = layer(input_data) self.assertTrue(backend.is_tensor(output)) self.assertAllClose(output, np.array([2, 3, 0])) def test_tf_data_compatibility(self): layer = layers.StringLookup( output_mode="int", vocabulary=["a", "b", "c"], ) input_data = ["b", "c", "d"] ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer) for output in ds.take(1): output = output.numpy() self.assertAllClose(output, np.array([2, 3, 0]))
keras-core/keras_core/layers/preprocessing/string_lookup_test.py/0
{ "file_path": "keras-core/keras_core/layers/preprocessing/string_lookup_test.py", "repo_id": "keras-core", "token_count": 931 }
46
from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.Cropping1D") class Cropping1D(Layer): """Cropping layer for 1D input (e.g. temporal sequence). It crops along the time dimension (axis 1). Examples: >>> input_shape = (2, 3, 2) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> x [[[ 0 1] [ 2 3] [ 4 5]] [[ 6 7] [ 8 9] [10 11]]] >>> y = keras_core.layers.Cropping1D(cropping=1)(x) >>> y [[[2 3]] [[8 9]]] Args: cropping: Int, or tuple of int (length 2), or dictionary. - If int: how many units should be trimmed off at the beginning and end of the cropping dimension (axis 1). - If tuple of 2 ints: how many units should be trimmed off at the beginning and end of the cropping dimension (`(left_crop, right_crop)`). Input shape: 3D tensor with shape `(batch_size, axis_to_crop, features)` Output shape: 3D tensor with shape `(batch_size, cropped_axis, features)` """ def __init__(self, cropping=(1, 1), **kwargs): super().__init__(**kwargs) self.cropping = argument_validation.standardize_tuple( cropping, 2, "cropping", allow_zero=True ) self.input_spec = InputSpec(ndim=3) def compute_output_shape(self, input_shape): if input_shape[1] is not None: length = input_shape[1] - self.cropping[0] - self.cropping[1] if length <= 0: raise ValueError( "`cropping` parameter of `Cropping1D` layer must be " "greater than the input length. Received: input_shape=" f"{input_shape}, cropping={self.cropping}" ) else: length = None return (input_shape[0], length, input_shape[2]) def call(self, inputs): if ( inputs.shape[1] is not None and sum(self.cropping) >= inputs.shape[1] ): raise ValueError( "`cropping` parameter of `Cropping1D` layer must be " "greater than the input length. Received: inputs.shape=" f"{inputs.shape}, cropping={self.cropping}" ) if self.cropping[1] == 0: return inputs[:, self.cropping[0] :, :] else: return inputs[:, self.cropping[0] : -self.cropping[1], :] def get_config(self): config = {"cropping": self.cropping} base_config = super().get_config() return {**base_config, **config}
keras-core/keras_core/layers/reshaping/cropping1d.py/0
{ "file_path": "keras-core/keras_core/layers/reshaping/cropping1d.py", "repo_id": "keras-core", "token_count": 1292 }
47
import numpy as np from keras_core import backend from keras_core import ops from keras_core.api_export import keras_core_export from keras_core.layers.input_spec import InputSpec from keras_core.layers.layer import Layer from keras_core.utils import argument_validation @keras_core_export("keras_core.layers.UpSampling2D") class UpSampling2D(Layer): """Upsampling layer for 2D inputs. The implementation uses interpolative resizing, given the resize method (specified by the `interpolation` argument). Use `interpolation=nearest` to repeat the rows and columns of the data. Examples: >>> input_shape = (2, 2, 1, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[[ 0 1 2]] [[ 3 4 5]]] [[[ 6 7 8]] [[ 9 10 11]]]] >>> y = keras_core.layers.UpSampling2D(size=(1, 2))(x) >>> print(y) [[[[ 0 1 2] [ 0 1 2]] [[ 3 4 5] [ 3 4 5]]] [[[ 6 7 8] [ 6 7 8]] [[ 9 10 11] [ 9 10 11]]]] Args: size: Int, or tuple of 2 integers. The upsampling factors for rows and columns. data_format: A string, one of `"channels_last"` (default) or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch_size, height, width, channels)` while `"channels_first"` corresponds to inputs with shape `(batch_size, channels, height, width)`. When unspecified, uses `image_data_format` value found in your Keras config file at `~/.keras/keras.json` (if exists) else `"channels_last"`. Defaults to `"channels_last"`. interpolation: A string, one of `"bicubic"`, `"bilinear"`, `"lanczos3"`, `"lanczos5"`, `"nearest"`. Input shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, rows, cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `"channels_last"`: `(batch_size, upsampled_rows, upsampled_cols, channels)` - If `data_format` is `"channels_first"`: `(batch_size, channels, upsampled_rows, upsampled_cols)` """ def __init__( self, size=(2, 2), data_format=None, interpolation="nearest", **kwargs ): super().__init__(**kwargs) self.data_format = backend.config.standardize_data_format(data_format) self.size = argument_validation.standardize_tuple(size, 2, "size") self.interpolation = interpolation.lower() self.input_spec = InputSpec(ndim=4) def compute_output_shape(self, input_shape): if self.data_format == "channels_first": height = ( self.size[0] * input_shape[2] if input_shape[2] is not None else None ) width = ( self.size[1] * input_shape[3] if input_shape[3] is not None else None ) return (input_shape[0], input_shape[1], height, width) else: height = ( self.size[0] * input_shape[1] if input_shape[1] is not None else None ) width = ( self.size[1] * input_shape[2] if input_shape[2] is not None else None ) return (input_shape[0], height, width, input_shape[3]) def call(self, inputs): return self._resize_images( inputs, self.size[0], self.size[1], self.data_format, interpolation=self.interpolation, ) def get_config(self): config = { "size": self.size, "data_format": self.data_format, "interpolation": self.interpolation, } base_config = super().get_config() return {**base_config, **config} def _resize_images( self, x, height_factor, width_factor, data_format, interpolation="nearest", ): """Resizes the images contained in a 4D tensor. Args: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. interpolation: A string, one of `"bicubic"`, `"bilinear"`, `"lanczos3"`, `"lanczos5"`, or `"nearest"`. Returns: A tensor. """ if data_format == "channels_first": rows, cols = 2, 3 elif data_format == "channels_last": rows, cols = 1, 2 else: raise ValueError(f"Invalid `data_format` argument: {data_format}") new_shape = x.shape[rows : cols + 1] new_shape *= np.array([height_factor, width_factor]) if data_format == "channels_first": x = ops.transpose(x, [0, 2, 3, 1]) # https://github.com/keras-team/keras-core/issues/294 # Use `ops.repeat` for `nearest` interpolation if interpolation == "nearest": x = ops.repeat(x, height_factor, axis=1) x = ops.repeat(x, width_factor, axis=2) else: x = ops.image.resize(x, new_shape, interpolation=interpolation) if data_format == "channels_first": x = ops.transpose(x, [0, 3, 1, 2]) return x
keras-core/keras_core/layers/reshaping/up_sampling2d.py/0
{ "file_path": "keras-core/keras_core/layers/reshaping/up_sampling2d.py", "repo_id": "keras-core", "token_count": 2787 }
48
from keras_core.api_export import keras_core_export from keras_core.layers.rnn.conv_lstm import ConvLSTM @keras_core_export("keras_core.layers.ConvLSTM2D") class ConvLSTM2D(ConvLSTM): """2D Convolutional LSTM. Similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional. Args: filters: int, the dimension of the output space (the number of filters in the convolution). kernel_size: int or tuple/list of 2 integers, specifying the size of the convolution window. strides: int or tuple/list of 2 integers, specifying the stride length of the convolution. `strides > 1` is incompatible with `dilation_rate > 1`. padding: string, `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, steps, features)` while `"channels_first"` corresponds to inputs with shape `(batch, features, steps)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `"channels_last"`. dilation_rate: int or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. activation: Activation function to use. By default hyperbolic tangent activation function is applied (`tanh(x)`). recurrent_activation: Activation function to use for the recurrent step. use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If `True`, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al., 2015]( http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. seed: Random seed for dropout. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. Default: `False`. return_state: Boolean. Whether to return the last state in addition to the output. Default: `False`. go_backwards: Boolean (default: `False`). If `True`, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If `True`, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default: `False`). If `True`, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. Call arguments: inputs: A 5D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This is only relevant if `dropout` or `recurrent_dropout` are set. initial_state: List of initial state tensors to be passed to the first call of the cell. Input shape: - If `data_format='channels_first'`: 5D tensor with shape: `(samples, time, channels, rows, cols)` - If `data_format='channels_last'`: 5D tensor with shape: `(samples, time, rows, cols, channels)` Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if `data_format='channels_first'` or shape: `(samples, new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows` and `cols` values might have changed due to padding. - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or shape: `(samples, timesteps, new_rows, new_cols, filters)` if `data_format='channels_last'`. - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if `data_format='channels_first'` or shape: `(samples, new_rows, new_cols, filters)` if `data_format='channels_last'`. References: - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1) (the current implementation does not include the feedback loop on the cells output). """ def __init__( self, filters, kernel_size, strides=1, padding="valid", data_format=None, dilation_rate=1, activation="tanh", recurrent_activation="sigmoid", use_bias=True, kernel_initializer="glorot_uniform", recurrent_initializer="orthogonal", bias_initializer="zeros", unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs ): super().__init__( rank=2, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, **kwargs )
keras-core/keras_core/layers/rnn/conv_lstm2d.py/0
{ "file_path": "keras-core/keras_core/layers/rnn/conv_lstm2d.py", "repo_id": "keras-core", "token_count": 3471 }
49
import numpy as np import pytest from keras_core import layers from keras_core import testing from keras_core.layers.rnn.rnn_test import OneStateRNNCell from keras_core.layers.rnn.rnn_test import TwoStatesRNNCell class StackedRNNTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basics(self): self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ OneStateRNNCell(3), OneStateRNNCell(4), OneStateRNNCell(5), ], }, input_shape=(2, 3, 4), expected_output_shape=(2, 5), expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"OneStateRNNCell": OneStateRNNCell}, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ OneStateRNNCell(3), OneStateRNNCell(4), OneStateRNNCell(5), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"OneStateRNNCell": OneStateRNNCell}, ) # Two-state case. self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ TwoStatesRNNCell(3), TwoStatesRNNCell(4), TwoStatesRNNCell(5), ], }, input_shape=(2, 3, 4), expected_output_shape=(2, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"TwoStatesRNNCell": TwoStatesRNNCell}, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ TwoStatesRNNCell(3), TwoStatesRNNCell(4), TwoStatesRNNCell(5), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"TwoStatesRNNCell": TwoStatesRNNCell}, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ layers.SimpleRNNCell(3, dropout=0.1, recurrent_dropout=0.1), layers.SimpleRNNCell(4, dropout=0.1, recurrent_dropout=0.1), layers.SimpleRNNCell(5, dropout=0.1, recurrent_dropout=0.1), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ layers.GRUCell(3, dropout=0.1, recurrent_dropout=0.1), layers.GRUCell(4, dropout=0.1, recurrent_dropout=0.1), layers.GRUCell(5, dropout=0.1, recurrent_dropout=0.1), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ layers.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1), layers.LSTMCell(4, dropout=0.1, recurrent_dropout=0.1), layers.LSTMCell(5, dropout=0.1, recurrent_dropout=0.1), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) def test_correctness_single_state_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN([OneStateRNNCell(3), OneStateRNNCell(2)]) output = layer(sequence) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), output ) layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], return_sequences=True ) output = layer(sequence) self.assertAllClose( np.array( [ [[18.0, 18.0], [156.0, 156.0], [786.0, 786.0]], [[162.0, 162.0], [1020.0, 1020.0], [4386.0, 4386.0]], ] ), output, ) layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], return_state=True ) output, state_1, state_2 = layer(sequence) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), output ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1 ) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), state_2 ) layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], return_sequences=True, return_state=True, ) output, state_1, state_2 = layer(sequence) self.assertAllClose( np.array( [ [[18.0, 18.0], [156.0, 156.0], [786.0, 786.0]], [[162.0, 162.0], [1020.0, 1020.0], [4386.0, 4386.0]], ] ), output, ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1 ) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), state_2 ) def test_correctness_two_states_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN([TwoStatesRNNCell(3), TwoStatesRNNCell(2)]) output = layer(sequence) self.assertAllClose( np.array([[3144.0, 3144.0], [17544.0, 17544.0]]), output ) layer = layers.RNN( [TwoStatesRNNCell(3), TwoStatesRNNCell(2)], return_sequences=True ) output = layer(sequence) self.assertAllClose( np.array( [ [[72.0, 72.0], [624.0, 624.0], [3144.0, 3144.0]], [[648.0, 648.0], [4080.0, 4080.0], [17544.0, 17544.0]], ] ), output, ) layer = layers.RNN( [TwoStatesRNNCell(3), TwoStatesRNNCell(2)], return_state=True ) output, state_1, state_2 = layer(sequence) self.assertAllClose( np.array([[3144.0, 3144.0], [17544.0, 17544.0]]), output ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1[0] ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1[1] ) self.assertAllClose( np.array([[1572.0, 1572.0], [8772.0, 8772.0]]), state_2[0] ) self.assertAllClose( np.array([[1572.0, 1572.0], [8772.0, 8772.0]]), state_2[1] ) def test_statefullness_single_state_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], stateful=True ) layer(sequence) output = layer(sequence) self.assertAllClose( np.array([[34092.0, 34092.0], [173196.0, 173196.0]]), output ) def test_statefullness_two_states_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN( [TwoStatesRNNCell(3), TwoStatesRNNCell(2)], stateful=True ) layer(sequence) output = layer(sequence) self.assertAllClose( np.array([[136368.0, 136368.0], [692784.0, 692784.0]]), output )
keras-core/keras_core/layers/rnn/stacked_rnn_cells_test.py/0
{ "file_path": "keras-core/keras_core/layers/rnn/stacked_rnn_cells_test.py", "repo_id": "keras-core", "token_count": 5239 }
50
import contextlib from keras_core.backend.common import global_state @contextlib.contextmanager def keras_option_scope(use_legacy_config=True): use_legacy_config_prev_value = global_state.get_global_attribute( "use_legacy_config", None ) global_state.set_global_attribute("use_legacy_config", use_legacy_config) try: yield finally: global_state.set_global_attribute( "use_legacy_config", use_legacy_config_prev_value )
keras-core/keras_core/legacy/saving/saving_options.py/0
{ "file_path": "keras-core/keras_core/legacy/saving/saving_options.py", "repo_id": "keras-core", "token_count": 199 }
51
import numpy as np from keras_core import testing from keras_core.metrics import hinge_metrics class HingeTest(testing.TestCase): def test_config(self): hinge_obj = hinge_metrics.Hinge(name="hinge", dtype="int32") self.assertEqual(hinge_obj.name, "hinge") self.assertEqual(hinge_obj._dtype, "int32") # TODO: Check save and restore config def test_unweighted(self): hinge_obj = hinge_metrics.Hinge() y_true = np.array([[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]]) hinge_obj.update_state(y_true, y_pred) result = hinge_obj.result() self.assertAllClose(0.506, result, atol=1e-3) def test_weighted(self): hinge_obj = hinge_metrics.Hinge() y_true = np.array([[-1, 1, -1, 1], [-1, -1, 1, 1]]) y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]]) sample_weight = np.array([1.5, 2.0]) result = hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.493, result, atol=1e-3) class SquaredHingeTest(testing.TestCase): def test_config(self): sq_hinge_obj = hinge_metrics.SquaredHinge( name="squared_hinge", dtype="int32" ) self.assertEqual(sq_hinge_obj.name, "squared_hinge") self.assertEqual(sq_hinge_obj._dtype, "int32") # TODO: Check save and restore config def test_unweighted(self): sq_hinge_obj = hinge_metrics.SquaredHinge() y_true = np.array([[0, 1, 0, 1], [0, 0, 1, 1]], dtype="float32") y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]]) sq_hinge_obj.update_state(y_true, y_pred) result = sq_hinge_obj.result() self.assertAllClose(0.364, result, atol=1e-3) def test_weighted(self): sq_hinge_obj = hinge_metrics.SquaredHinge() y_true = np.array([[-1, 1, -1, 1], [-1, -1, 1, 1]], dtype="float32") y_pred = np.array([[-0.3, 0.2, -0.1, 1.6], [-0.25, -1.0, 0.5, 0.6]]) sample_weight = np.array([1.5, 2.0]) result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.347, result, atol=1e-3) class CategoricalHingeTest(testing.TestCase): def test_config(self): cat_hinge_obj = hinge_metrics.CategoricalHinge( name="cat_hinge", dtype="int32" ) self.assertEqual(cat_hinge_obj.name, "cat_hinge") self.assertEqual(cat_hinge_obj._dtype, "int32") # TODO: Check save and restore config def test_unweighted(self): cat_hinge_obj = hinge_metrics.CategoricalHinge() y_true = np.array( ( (0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1), ), dtype="float32", ) y_pred = np.array( ( (0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1), ), dtype="float32", ) cat_hinge_obj.update_state(y_true, y_pred) result = cat_hinge_obj.result() self.assertAllClose(0.5, result, atol=1e-5) def test_weighted(self): cat_hinge_obj = hinge_metrics.CategoricalHinge() y_true = np.array( ( (0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1), ), dtype="float32", ) y_pred = np.array( ( (0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1), ), dtype="float32", ) sample_weight = np.array((1.0, 1.5, 2.0, 2.5)) result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight) self.assertAllClose(0.5, result, atol=1e-5)
keras-core/keras_core/metrics/hinge_metrics_test.py/0
{ "file_path": "keras-core/keras_core/metrics/hinge_metrics_test.py", "repo_id": "keras-core", "token_count": 2262 }
52
import numpy as np import pytest from absl.testing import parameterized from keras_core import layers from keras_core import models from keras_core import testing from keras_core.models.cloning import clone_model def get_functional_model(shared_layers=False): inputs = layers.Input(shape=(3,)) x = layers.Dense(2)(inputs) if shared_layers: layer = layers.Dense(2, name="shared") x = layer(x) x = layer(x) outputs = layers.Dense(2)(x) model = models.Model(inputs, outputs) return model def get_sequential_model(explicit_input=True): model = models.Sequential() if explicit_input: model.add(layers.Input(shape=(3,))) model.add(layers.Dense(2)) model.add(layers.Dense(2)) return model def get_subclassed_model(): class ExampleModel(models.Model): def __init__(self, **kwargs): super().__init__(**kwargs) self.d1 = layers.Dense(2) self.d2 = layers.Dense(2) def call(self, x): return self.d2(self.d1(x)) return ExampleModel() @pytest.mark.requires_trainable_backend class CloneModelTest(testing.TestCase, parameterized.TestCase): @parameterized.named_parameters( ("functional", get_functional_model), ("sequential", get_sequential_model), ( "deferred_sequential", lambda: get_sequential_model(explicit_input=False), ), ("subclassed", get_subclassed_model), ) def test_cloning_correctness(self, model_fn): ref_input = np.random.random((2, 3)) model = model_fn() new_model = clone_model(model) ref_output = model(ref_input) new_model(ref_input) # Maybe needed to build the model new_model.set_weights(model.get_weights()) output = new_model(ref_input) self.assertAllClose(ref_output, output) @parameterized.named_parameters( ("functional", get_functional_model), ("sequential", get_sequential_model), ) def test_custom_clone_function(self, model_fn): def clone_function(layer): config = layer.get_config() config["name"] = config["name"] + "_custom" return layer.__class__.from_config(config) model = model_fn() new_model = clone_model(model, clone_function=clone_function) for l1, l2 in zip(model.layers, new_model.layers): if not isinstance(l1, layers.InputLayer): self.assertEqual(l2.name, l1.name + "_custom") def test_shared_layers_cloning(self): model = get_functional_model(shared_layers=True) new_model = clone_model(model) self.assertLen(new_model.layers, 4)
keras-core/keras_core/models/cloning_test.py/0
{ "file_path": "keras-core/keras_core/models/cloning_test.py", "repo_id": "keras-core", "token_count": 1174 }
53
"""Commonly used math operations not included in NumPy.""" from keras_core import backend from keras_core.api_export import keras_core_export from keras_core.backend import KerasTensor from keras_core.backend import any_symbolic_tensors from keras_core.ops.operation import Operation from keras_core.ops.operation_utils import reduce_shape class SegmentSum(Operation): def __init__(self, num_segments=None, sorted=False): super().__init__() self.num_segments = num_segments self.sorted = sorted def compute_output_spec(self, data, segment_ids): num_segments = self.num_segments output_shape = (num_segments,) + tuple(data.shape[1:]) return KerasTensor(shape=output_shape, dtype=data.dtype) def call(self, data, segment_ids): return backend.math.segment_sum( data, segment_ids, num_segments=self.num_segments, sorted=self.sorted, ) @keras_core_export("keras_core.ops.segment_sum") def segment_sum(data, segment_ids, num_segments=None, sorted=False): """Computes the sum of segments in a tensor. Args: data: Input tensor. segment_ids: A 1-D tensor containing segment indices for each element in `data`. num_segments: An integer representing the total number of segments. If not specified, it is inferred from the maximum value in `segment_ids`. sorted: A boolean indicating whether `segment_ids` is sorted. Defaults to`False`. Returns: A tensor containing the sum of segments, where each element represents the sum of the corresponding segment in `data`. Example: >>> data = keras_core.ops.convert_to_tensor([1, 2, 10, 20, 100, 200]) >>> segment_ids = keras_core.ops.convert_to_tensor([0, 0, 1, 1, 2, 2]) >>> num_segments = 3 >>> keras_core.ops.segment_sum(data, segment_ids,num_segments) array([3, 30, 300], dtype=int32) """ if any_symbolic_tensors((data,)): return SegmentSum(num_segments, sorted).symbolic_call(data, segment_ids) return backend.math.segment_sum( data, segment_ids, num_segments=num_segments, sorted=sorted ) class SegmentMax(Operation): def __init__(self, num_segments=None, sorted=False): super().__init__() self.num_segments = num_segments self.sorted = sorted def compute_output_spec(self, data, segment_ids): num_segments = self.num_segments output_shape = (num_segments,) + tuple(data.shape[1:]) return KerasTensor(shape=output_shape, dtype=data.dtype) def call(self, data, segment_ids): return backend.math.segment_max( data, segment_ids, num_segments=self.num_segments, sorted=self.sorted, ) @keras_core_export("keras_core.ops.segment_max") def segment_max(data, segment_ids, num_segments=None, sorted=False): """Computes the max of segments in a tensor. Args: data: Input tensor. segment_ids: A 1-D tensor containing segment indices for each element in `data`. num_segments: An integer representing the total number of segments. If not specified, it is inferred from the maximum value in `segment_ids`. sorted: A boolean indicating whether `segment_ids` is sorted. Defaults to`False`. Returns: A tensor containing the max of segments, where each element represents the max of the corresponding segment in `data`. Example: >>> data = keras_core.ops.convert_to_tensor([1, 2, 10, 20, 100, 200]) >>> segment_ids = keras_core.ops.convert_to_tensor([0, 0, 1, 1, 2, 2]) >>> num_segments = 3 >>> keras_core.ops.segment_max(data, segment_ids, num_segments) array([2, 20, 200], dtype=int32) """ if any_symbolic_tensors((data,)): return SegmentMax(num_segments, sorted).symbolic_call(data, segment_ids) return backend.math.segment_max( data, segment_ids, num_segments=num_segments, sorted=sorted ) class TopK(Operation): def __init__(self, k, sorted=False): super().__init__() self.k = k self.sorted = sorted def compute_output_spec(self, x): output_shape = list(x.shape) output_shape[-1] = self.k # Return a tuple (values, indices). return ( KerasTensor(shape=output_shape, dtype=x.dtype), KerasTensor(shape=output_shape, dtype="int32"), ) def call(self, x): return backend.math.top_k(x, self.k, self.sorted) @keras_core_export("keras_core.ops.top_k") def top_k(x, k, sorted=True): """Finds the top-k values and their indices in a tensor. Args: x: Input tensor. k: An integer representing the number of top elements to retrieve. sorted: A boolean indicating whether to sort the output in descending order. Defaults to`True`. Returns: A tuple containing two tensors. The first tensor contains the top-k values, and the second tensor contains the indices of the top-k values in the input tensor. Example: >>> x = keras_core.ops.convert_to_tensor([5, 2, 7, 1, 9, 3]) >>> values, indices = top_k(x, k=3) >>> print(values) array([9 7 5], shape=(3,), dtype=int32) >>> print(indices) array([4 2 0], shape=(3,), dtype=int32) """ if any_symbolic_tensors((x,)): return TopK(k, sorted).symbolic_call(x) return backend.math.top_k(x, k, sorted) class InTopK(Operation): def __init__(self, k): super().__init__() self.k = k def compute_output_spec(self, targets, predictions): return KerasTensor(shape=targets.shape, dtype="bool") def call(self, targets, predictions): return backend.math.in_top_k(targets, predictions, self.k) @keras_core_export("keras_core.ops.in_top_k") def in_top_k(targets, predictions, k): """Checks if the targets are in the top-k predictions. Args: targets: A tensor of true labels. predictions: A tensor of predicted labels. k: An integer representing the number of predictions to consider. Returns: A boolean tensor of the same shape as `targets`, where each element indicates whether the corresponding target is in the top-k predictions. Example: >>> targets = keras_core.ops.convert_to_tensor([2, 5, 3]) >>> predictions = keras_core.ops.convert_to_tensor( ... [[0.1, 0.4, 0.6, 0.9, 0.5], ... [0.1, 0.7, 0.9, 0.8, 0.3], ... [0.1, 0.6, 0.9, 0.9, 0.5]]) >>> in_top_k(targets, predictions, k=3) array([ True False True], shape=(3,), dtype=bool) """ if any_symbolic_tensors((targets, predictions)): return InTopK(k).symbolic_call(targets, predictions) return backend.math.in_top_k(targets, predictions, k) class Logsumexp(Operation): def __init__(self, axis=None, keepdims=False): super().__init__() self.axis = axis self.keepdims = keepdims def compute_output_spec(self, x): output_shape = reduce_shape(x.shape, self.axis, self.keepdims) return KerasTensor(shape=output_shape) def call(self, x): return backend.math.logsumexp(x, axis=self.axis, keepdims=self.keepdims) @keras_core_export("keras_core.ops.logsumexp") def logsumexp(x, axis=None, keepdims=False): """Computes the logarithm of sum of exponentials of elements in a tensor. Args: x: Input tensor. axis: An integer or a tuple of integers specifying the axis/axes along which to compute the sum. If `None`, the sum is computed over all elements. Defaults to`None`. keepdims: A boolean indicating whether to keep the dimensions of the input tensor when computing the sum. Defaults to`False`. Returns: A tensor containing the logarithm of the sum of exponentials of elements in `x`. Example: >>> x = keras_core.ops.convert_to_tensor([1., 2., 3.]) >>> logsumexp(x) 3.407606 """ if any_symbolic_tensors((x,)): return Logsumexp(axis, keepdims).symbolic_call(x) return backend.math.logsumexp(x, axis=axis, keepdims=keepdims) class Qr(Operation): def __init__(self, mode="reduced"): super().__init__() if mode not in {"reduced", "complete"}: raise ValueError( "`mode` argument value not supported. " "Expected one of {'reduced', 'complete'}. " f"Received: mode={mode}" ) self.mode = mode def compute_output_spec(self, x): if len(x.shape) < 2: raise ValueError( "Input should have rank >= 2. Received: " f"input.shape = {x.shape}" ) m = x.shape[-2] n = x.shape[-1] if m is None or n is None: raise ValueError( "Input should have its last 2 dimensions " "fully-defined. Received: " f"input.shape = {x.shape}" ) k = min(m, n) base = tuple(x.shape[:-2]) if self.mode == "reduced": return ( KerasTensor(shape=base + (m, k), dtype=x.dtype), KerasTensor(shape=base + (k, n), dtype=x.dtype), ) # 'complete' mode. return ( KerasTensor(shape=base + (m, m), dtype=x.dtype), KerasTensor(shape=base + (m, n), dtype=x.dtype), ) def call(self, x): return backend.math.qr(x, mode=self.mode) @keras_core_export("keras_core.ops.qr") def qr(x, mode="reduced"): """Computes the QR decomposition of a tensor. Args: x: Input tensor. mode: A string specifying the mode of the QR decomposition. - 'reduced': Returns the reduced QR decomposition. (default) - 'complete': Returns the complete QR decomposition. Returns: A tuple containing two tensors. The first tensor represents the orthogonal matrix Q, and the second tensor represents the upper triangular matrix R. Example: >>> x = keras_core.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]]) >>> q, r = qr(x) >>> print(q) array([[-0.16903079 0.897085] [-0.5070925 0.2760267 ] [-0.8451542 -0.34503305]], shape=(3, 2), dtype=float32) """ if any_symbolic_tensors((x,)): return Qr(mode=mode).symbolic_call(x) return backend.math.qr(x, mode=mode) class ExtractSequences(Operation): def __init__(self, sequence_length, sequence_stride): super().__init__() self.sequence_length = sequence_length self.sequence_stride = sequence_stride def compute_output_spec(self, x): if len(x.shape) < 1: raise ValueError( f"Input should have rank >= 1. " f"Received: input.shape = {x.shape}" ) if x.shape[-1] is not None: num_sequences = ( 1 + (x.shape[-1] - self.sequence_length) // self.sequence_stride ) else: num_sequences = None new_shape = x.shape[:-1] + (num_sequences, self.sequence_length) return KerasTensor(shape=new_shape, dtype=x.dtype) def call(self, x): return backend.math.extract_sequences( x, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride, ) @keras_core_export("keras_core.ops.extract_sequences") def extract_sequences(x, sequence_length, sequence_stride): """Expands the dimension of last axis into sequences of `sequence_length`. Slides a window of size `sequence_length` over the last axis of the input with a stride of `sequence_stride`, replacing the last axis with `[num_sequences, sequence_length]` sequences. If the dimension along the last axis is N, the number of sequences can be computed by: `num_sequences = 1 + (N - sequence_length) // sequence_stride` Args: x: Input tensor. sequence_length: An integer representing the sequences length. sequence_stride: An integer representing the sequences hop size. Returns: A tensor of sequences with shape [..., num_sequences, sequence_length]. Example: >>> x = keras_core.ops.convert_to_tensor([1, 2, 3, 4, 5, 6]) >>> extract_sequences(x, 3, 2) array([[1, 2, 3], [3, 4, 5]]) """ if any_symbolic_tensors((x,)): return ExtractSequences(sequence_length, sequence_stride).symbolic_call( x ) return backend.math.extract_sequences(x, sequence_length, sequence_stride) class FFT(Operation): def compute_output_spec(self, x): if not isinstance(x, (tuple, list)) or len(x) != 2: raise ValueError( "Input `x` should be a tuple of two tensors - real and " f"imaginary. Received: x={x}" ) real, imag = x # Both real and imaginary parts should have the same shape. if real.shape != imag.shape: raise ValueError( "Input `x` should be a tuple of two tensors - real and " "imaginary. Both the real and imaginary parts should have the " f"same shape. Received: x[0].shape = {real.shape}, " f"x[1].shape = {imag.shape}" ) # We are calculating 1D FFT. Hence, rank >= 1. if len(real.shape) < 1: raise ValueError( f"Input should have rank >= 1. " f"Received: input.shape = {real.shape}" ) # The axis along which we are calculating FFT should be fully-defined. m = real.shape[-1] if m is None: raise ValueError( f"Input should have its {self.axis}th axis fully-defined. " f"Received: input.shape = {real.shape}" ) return ( KerasTensor(shape=real.shape, dtype=real.dtype), KerasTensor(shape=imag.shape, dtype=imag.dtype), ) def call(self, x): return backend.math.fft(x) @keras_core_export("keras_core.ops.fft") def fft(x): """Computes the Fast Fourier Transform along last axis of input. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. Returns: A tuple containing two tensors - the real and imaginary parts of the output tensor. Example: >>> x = ( ... keras_core.ops.convert_to_tensor([1., 2.]), ... keras_core.ops.convert_to_tensor([0., 1.]), ... ) >>> fft(x) (array([ 3., -1.], dtype=float32), array([ 1., -1.], dtype=float32)) """ if any_symbolic_tensors(x): return FFT().symbolic_call(x) return backend.math.fft(x) class FFT2(Operation): def compute_output_spec(self, x): if not isinstance(x, (tuple, list)) or len(x) != 2: raise ValueError( "Input `x` should be a tuple of two tensors - real and " f"imaginary. Received: x={x}" ) real, imag = x # Both real and imaginary parts should have the same shape. if real.shape != imag.shape: raise ValueError( "Input `x` should be a tuple of two tensors - real and " "imaginary. Both the real and imaginary parts should have the " f"same shape. Received: x[0].shape = {real.shape}, " f"x[1].shape = {imag.shape}" ) # We are calculating 2D FFT. Hence, rank >= 2. if len(real.shape) < 2: raise ValueError( f"Input should have rank >= 2. " f"Received: input.shape = {real.shape}" ) # The axes along which we are calculating FFT should be fully-defined. m = real.shape[-1] n = real.shape[-2] if m is None or n is None: raise ValueError( f"Input should have its {self.axes} axes fully-defined. " f"Received: input.shape = {real.shape}" ) return ( KerasTensor(shape=real.shape, dtype=real.dtype), KerasTensor(shape=imag.shape, dtype=imag.dtype), ) def call(self, x): return backend.math.fft2(x) @keras_core_export("keras_core.ops.fft2") def fft2(x): """Computes the 2D Fast Fourier Transform along the last two axes of input. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. Returns: A tuple containing two tensors - the real and imaginary parts of the output. Example: >>> x = ( ... keras_core.ops.convert_to_tensor([[1., 2.], [2., 1.]]), ... keras_core.ops.convert_to_tensor([[0., 1.], [1., 0.]]), ... ) >>> fft2(x) (array([[ 6., 0.], [ 0., -2.]], dtype=float32), array([[ 2., 0.], [ 0., -2.]], dtype=float32)) """ if any_symbolic_tensors(x): return FFT2().symbolic_call(x) return backend.math.fft2(x) class RFFT(Operation): def __init__(self, fft_length=None): super().__init__() self.fft_length = fft_length def compute_output_spec(self, x): # We are calculating 1D RFFT. Hence, rank >= 1. if len(x.shape) < 1: raise ValueError( f"Input should have rank >= 1. " f"Received: input.shape = {x.shape}" ) if self.fft_length is not None: new_last_dimension = self.fft_length // 2 + 1 else: if x.shape[-1] is not None: new_last_dimension = x.shape[-1] // 2 + 1 else: new_last_dimension = None new_shape = x.shape[:-1] + (new_last_dimension,) return ( KerasTensor(shape=new_shape, dtype=x.dtype), KerasTensor(shape=new_shape, dtype=x.dtype), ) def call(self, x): return backend.math.rfft(x, fft_length=self.fft_length) @keras_core_export("keras_core.ops.rfft") def rfft(x, fft_length=None): """Real-valued Fast Fourier Transform along the last axis of the input. Computes the 1D Discrete Fourier Transform of a real-valued signal over the inner-most dimension of input. Since the Discrete Fourier Transform of a real-valued signal is Hermitian-symmetric, RFFT only returns the `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, followed by the `fft_length / 2` positive-frequency terms. Along the axis RFFT is computed on, if `fft_length` is smaller than the corresponding dimension of the input, the dimension is cropped. If it is larger, the dimension is padded with zeros. Args: x: Input tensor. fft_length: An integer representing the number of the fft length. If not specified, it is inferred from the length of the last axis of `x`. Defaults to `None`. Returns: A tuple containing two tensors - the real and imaginary parts of the output. Examples: >>> x = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> rfft(x) (array([10.0, -2.5, -2.5]), array([0.0, 3.4409548, 0.81229924])) >>> rfft(x, 3) (array([3.0, -1.5]), array([0.0, 0.8660254])) """ if any_symbolic_tensors((x,)): return RFFT(fft_length).symbolic_call(x) return backend.math.rfft(x, fft_length) class IRFFT(Operation): def __init__(self, fft_length=None): super().__init__() self.fft_length = fft_length def compute_output_spec(self, x): if not isinstance(x, (tuple, list)) or len(x) != 2: raise ValueError( "Input `x` should be a tuple of two tensors - real and " f"imaginary. Received: x={x}" ) real, imag = x # Both real and imaginary parts should have the same shape. if real.shape != imag.shape: raise ValueError( "Input `x` should be a tuple of two tensors - real and " "imaginary. Both the real and imaginary parts should have the " f"same shape. Received: x[0].shape = {real.shape}, " f"x[1].shape = {imag.shape}" ) # We are calculating 1D IRFFT. Hence, rank >= 1. if len(real.shape) < 1: raise ValueError( f"Input should have rank >= 1. " f"Received: input.shape = {real.shape}" ) if self.fft_length is not None: new_last_dimension = self.fft_length else: if real.shape[-1] is not None: new_last_dimension = 2 * (real.shape[-1] - 1) else: new_last_dimension = None new_shape = real.shape[:-1] + (new_last_dimension,) return KerasTensor(shape=new_shape, dtype=real.dtype) def call(self, x): return backend.math.irfft(x, fft_length=self.fft_length) @keras_core_export("keras_core.ops.irfft") def irfft(x, fft_length=None): """Inverse real-valued Fast Fourier transform along the last axis. Computes the inverse 1D Discrete Fourier Transform of a real-valued signal over the inner-most dimension of input. The inner-most dimension of the input is assumed to be the result of RFFT: the `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If `fft_length` is not provided, it is computed from the size of the inner-most dimension of the input `(fft_length = 2 * (inner - 1))`. If the FFT length used to compute is odd, it should be provided since it cannot be inferred properly. Along the axis IRFFT is computed on, if `fft_length / 2 + 1` is smaller than the corresponding dimension of the input, the dimension is cropped. If it is larger, the dimension is padded with zeros. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. fft_length: An integer representing the number of the fft length. If not specified, it is inferred from the length of the last axis of `x`. Defaults to `None`. Returns: A tensor containing the inverse real-valued Fast Fourier Transform along the last axis of `x`. Examples: >>> real = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> imag = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> irfft((real, imag)) array([0.66666667, -0.9106836, 0.24401694]) >>> irfft(rfft(real, 5), 5) array([0.0, 1.0, 2.0, 3.0, 4.0]) """ if any_symbolic_tensors(x): return IRFFT(fft_length).symbolic_call(x) return backend.math.irfft(x, fft_length) class STFT(Operation): def __init__( self, sequence_length, sequence_stride, fft_length, window="hann", center=True, ): super().__init__() self.sequence_length = sequence_length self.sequence_stride = sequence_stride self.fft_length = fft_length self.window = window self.center = center def compute_output_spec(self, x): if x.shape[-1] is not None: padded = 0 if self.center is False else (self.fft_length // 2) * 2 num_sequences = ( 1 + (x.shape[-1] + padded - self.fft_length) // self.sequence_stride ) else: num_sequences = None new_shape = x.shape[:-1] + (num_sequences, self.fft_length // 2 + 1) return ( KerasTensor(shape=new_shape, dtype=x.dtype), KerasTensor(shape=new_shape, dtype=x.dtype), ) def call(self, x): return backend.math.stft( x, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride, fft_length=self.fft_length, window=self.window, center=self.center, ) @keras_core_export("keras_core.ops.stft") def stft( x, sequence_length, sequence_stride, fft_length, window="hann", center=True ): """Short-Time Fourier Transform along the last axis of the input. The STFT computes the Fourier transform of short overlapping windows of the input. This giving frequency components of the signal as they change over time. Args: x: Input tensor. sequence_length: An integer representing the sequence length. sequence_stride: An integer representing the sequence hop size. fft_length: An integer representing the size of the FFT to apply. If not specified, uses the smallest power of 2 enclosing `sequence_length`. window: A string, a tensor of the window or `None`. If `window` is a string, available values are `"hann"` and `"hamming"`. If `window` is a tensor, it will be used directly as the window and its length must be `sequence_length`. If `window` is `None`, no windowing is used. Defaults to `"hann"`. center: Whether to pad `x` on both sides so that the t-th sequence is centered at time `t * sequence_stride`. Otherwise, the t-th sequence begins at time `t * sequence_stride`. Defaults to `True`. Returns: A tuple containing two tensors - the real and imaginary parts of the STFT output. Example: >>> x = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> stft(x, 3, 2, 3) (array([[0.75, -0.375], [3.75, -1.875], [5.25, -2.625]]), array([[0.0, 0.64951905], [0.0, 0.64951905], [0.0, -0.64951905]])) """ if any_symbolic_tensors((x,)): return STFT( sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, window=window, center=center, ).symbolic_call(x) return backend.math.stft( x, sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, window=window, center=center, ) class ISTFT(Operation): def __init__( self, sequence_length, sequence_stride, fft_length, length=None, window="hann", center=True, ): super().__init__() self.sequence_length = sequence_length self.sequence_stride = sequence_stride self.fft_length = fft_length self.length = length self.window = window self.center = center def compute_output_spec(self, x): if not isinstance(x, (tuple, list)) or len(x) != 2: raise ValueError( "Input `x` should be a tuple of two tensors - real and " f"imaginary. Received: x={x}" ) real, imag = x # Both real and imaginary parts should have the same shape. if real.shape != imag.shape: raise ValueError( "Input `x` should be a tuple of two tensors - real and " "imaginary. Both the real and imaginary parts should have the " f"same shape. Received: x[0].shape = {real.shape}, " f"x[1].shape = {imag.shape}" ) if len(real.shape) < 2: raise ValueError( f"Input should have rank >= 2. " f"Received: input.shape = {real.shape}" ) if real.shape[-2] is not None: output_size = ( real.shape[-2] - 1 ) * self.sequence_stride + self.fft_length if self.length is not None: output_size = self.length elif self.center: output_size = output_size - (self.fft_length // 2) * 2 else: output_size = None new_shape = real.shape[:-2] + (output_size,) return KerasTensor(shape=new_shape, dtype=real.dtype) def call(self, x): return backend.math.istft( x, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride, fft_length=self.fft_length, length=self.length, window=self.window, center=self.center, ) @keras_core_export("keras_core.ops.istft") def istft( x, sequence_length, sequence_stride, fft_length, length=None, window="hann", center=True, ): """Inverse Short-Time Fourier Transform along the last axis of the input. To reconstruct an original waveform, the parameters should be the same in `stft`. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. sequence_length: An integer representing the sequence length. sequence_stride: An integer representing the sequence hop size. fft_length: An integer representing the size of the FFT that produced `stft`. length: An integer representing the output is clipped to exactly length. If not specified, no padding or clipping take place. Defaults to `None`. window: A string, a tensor of the window or `None`. If `window` is a string, available values are `"hann"` and `"hamming"`. If `window` is a tensor, it will be used directly as the window and its length must be `sequence_length`. If `window` is `None`, no windowing is used. Defaults to `"hann"`. center: Whether `x` was padded on both sides so that the t-th sequence is centered at time `t * sequence_stride`. Defaults to `True`. Returns: A tensor containing the inverse Short-Time Fourier Transform along the last axis of `x`. Example: >>> x = keras_core.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> istft(stft(x, 1, 1, 1), 1, 1, 1) array([0.0, 1.0, 2.0, 3.0, 4.0]) """ if any_symbolic_tensors(x): return ISTFT( sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, window=window, center=center, ).symbolic_call(x) return backend.math.istft( x, sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, length=length, window=window, center=center, ) class Rsqrt(Operation): """Computes reciprocal of square root of x element-wise. Args: x: input tensor Returns: A tensor with the same type as `x`. Example: >>> data = keras_core.ops.convert_to_tensor([1.0, 10.0, 100.0]) >>> keras_core.ops.rsqrt(data) array([1.0, 0.31622776, 0.1], dtype=float32) """ def call(self, x): x = backend.convert_to_tensor(x) return backend.math.rsqrt(x) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype=x.dtype) @keras_core_export("keras_core.ops.rsqrt") def rsqrt(x): if any_symbolic_tensors((x,)): return Rsqrt().symbolic_call(x) x = backend.convert_to_tensor(x) return backend.math.rsqrt(x)
keras-core/keras_core/ops/math.py/0
{ "file_path": "keras-core/keras_core/ops/math.py", "repo_id": "keras-core", "token_count": 14228 }
54
import numpy as np from keras_core import backend from keras_core import ops from keras_core import testing from keras_core.optimizers.adadelta import Adadelta class AdadeltaTest(testing.TestCase): def test_config(self): optimizer = Adadelta( learning_rate=0.5, rho=0.9, epsilon=1e-5, ) self.run_class_serialization_test(optimizer) def test_single_step(self): optimizer = Adadelta(learning_rate=0.5) grads = ops.array([1.0, 6.0, 7.0, 2.0]) vars = backend.Variable([1.0, 2.0, 3.0, 4.0]) optimizer.apply_gradients(zip([grads], [vars])) self.assertAllClose( vars, [0.9993, 1.9993, 2.9993, 3.9993], rtol=1e-4, atol=1e-4 ) def test_weight_decay(self): grads, var1, var2, var3 = ( ops.zeros(()), backend.Variable(2.0), backend.Variable(2.0, name="exclude"), backend.Variable(2.0), ) optimizer_1 = Adadelta(learning_rate=1.0, weight_decay=0.004) optimizer_1.apply_gradients(zip([grads], [var1])) optimizer_2 = Adadelta(learning_rate=1.0, weight_decay=0.004) optimizer_2.exclude_from_weight_decay(var_names=["exclude"]) optimizer_2.apply_gradients(zip([grads, grads], [var1, var2])) optimizer_3 = Adadelta(learning_rate=1.0, weight_decay=0.004) optimizer_3.exclude_from_weight_decay(var_list=[var3]) optimizer_3.apply_gradients(zip([grads, grads], [var1, var3])) self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6) self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6) self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6) def test_correctness_with_golden(self): optimizer = Adadelta(learning_rate=1.0, rho=0.8, epsilon=1e-6) x = backend.Variable(np.ones([10])) grads = ops.arange(0.1, 1.1, 0.1) first_grads = ops.full((10,), 0.01) golden = np.tile( [[0.9978], [0.9947], [0.9915], [0.9882], [0.9849]], (1, 10) ) optimizer.apply_gradients(zip([first_grads], [x])) for i in range(5): self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4) optimizer.apply_gradients(zip([grads], [x])) def test_clip_norm(self): optimizer = Adadelta(clipnorm=1) grad = [np.array([100.0, 100.0])] clipped_grad = optimizer._clip_gradients(grad) self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2]) def test_clip_value(self): optimizer = Adadelta(clipvalue=1) grad = [np.array([100.0, 100.0])] clipped_grad = optimizer._clip_gradients(grad) self.assertAllClose(clipped_grad[0], [1.0, 1.0])
keras-core/keras_core/optimizers/adadelta_test.py/0
{ "file_path": "keras-core/keras_core/optimizers/adadelta_test.py", "repo_id": "keras-core", "token_count": 1392 }
55
from keras_core import backend from keras_core import initializers from keras_core import ops from keras_core.api_export import keras_core_export from keras_core.optimizers import optimizer from keras_core.saving import serialization_lib from keras_core.utils import tracking @keras_core_export( [ "keras_core.optimizers.LossScaleOptimizer", "keras_core.mixed_precision.LossScaleOptimizer", ] ) class LossScaleOptimizer(optimizer.Optimizer): """An optimizer that dynamically scales the loss to prevent underflow. Loss scaling is a technique to prevent numeric underflow in intermediate gradients when float16 is used. To prevent underflow, the loss is multiplied (or "scaled") by a certain factor called the "loss scale", which causes intermediate gradients to be scaled by the loss scale as well. The final gradients are divided (or "unscaled") by the loss scale to bring them back to their original value. `LossScaleOptimizer` wraps another optimizer and applies dynamic loss scaling to it. This loss scale is dynamically updated over time as follows: - On any train step, if a nonfinite gradient is encountered, the loss scale is halved, and the train step is skipped. - If `dynamic_growth_steps` have ocurred since the last time the loss scale was updated, and no nonfinite gradients have occurred, the loss scale is doubled. Args: inner_optimizer: The `keras_core.optimizers.Optimizer` instance to wrap. initial_scale: Float. The initial loss scale. This scale will be updated during training. It is recommended for this to be a very high number, because a loss scale that is too high gets lowered far more quickly than a loss scale that is too low gets raised. dynamic_growth_steps: Int. How often to update the scale upwards. After every `dynamic_growth_steps` steps with finite gradients, the loss scale is doubled. {{base_optimizer_keyword_args}} """ def __init__( self, inner_optimizer, initial_scale=2.0**15, dynamic_growth_steps=2000, **kwargs, ): if not kwargs.pop("dynamic", True): raise ValueError( "LossScaleOptimizer no longer suports `dynamic=False`. " "Instead, simply set `loss_scale_factor` directly on the " "`inner_optimizer`." ) super().__init__(learning_rate=0.0, **kwargs) self.inner_optimizer = inner_optimizer self.initial_scale = initial_scale self.dynamic_growth_steps = dynamic_growth_steps @tracking.no_automatic_dependency_tracking def build(self, var_list): self.step_counter = self.add_variable( shape=(), dtype="int", initializer=initializers.Zeros(), name="step_counter", ) self.dynamic_scale = self.add_variable( shape=(), dtype="float32", initializer=initializers.Constant(self.initial_scale), name="dynamic_scale", ) self.inner_optimizer.build(var_list) self.built = True @property def variables(self): return self._variables + self.inner_optimizer.variables def stateless_apply(self, optimizer_variables, grads, trainable_variables): if not self.built: raise ValueError( f"To call `stateless_apply`, {self.__class__.__name__} " "must be built (i.e. its variables must have been created). " "You can build it via `optimizer.build(trainable_variables)`." ) def handle_finite_grads(): def upscale(): mapping = list(zip(self.variables, optimizer_variables)) with backend.StatelessScope(state_mapping=mapping) as scope: self.step_counter.assign(0) self.dynamic_scale.assign(self.dynamic_scale * 2.0) return [scope.get_current_value(v) for v in self._variables] def increment(): mapping = list(zip(self.variables, optimizer_variables)) with backend.StatelessScope(state_mapping=mapping) as scope: self.step_counter.assign_add(1) return [scope.get_current_value(v) for v in self._variables] mapping = list(zip(self.variables, optimizer_variables)) with backend.StatelessScope(state_mapping=mapping): # Potentially upscale loss and reset counter. own_variables = ops.cond( ops.equal(self.step_counter, self.dynamic_growth_steps - 1), upscale, increment, ) # Unscale gradients. scale = self.dynamic_scale unscaled_grads = [ g if g is None else ops.divide(g, scale) for g in grads ] ( new_trainable_variables, new_inner_variables, ) = self.inner_optimizer.stateless_apply( self.inner_optimizer.variables, unscaled_grads, trainable_variables, ) new_optimizer_variables = own_variables + new_inner_variables return new_trainable_variables, new_optimizer_variables def handle_non_finite_grads(): mapping = list(zip(self.variables, optimizer_variables)) with backend.StatelessScope(state_mapping=mapping) as scope: self.step_counter.assign(0) self.dynamic_scale.assign(self.dynamic_scale / 2.0) new_optimizer_variables = [] for v in self.variables: new_optimizer_variables.append(scope.get_current_value(v)) return trainable_variables, new_optimizer_variables finite = self.check_finite(grads) return ops.cond(finite, handle_finite_grads, handle_non_finite_grads) def apply(self, grads, trainable_variables=None): # Optionally build optimizer. if not self.built: with backend.name_scope(self.name, caller=self): self.build(trainable_variables) self.built = True def handle_finite_grads(): scale = self.dynamic_scale # Unscale gradients. unscaled_grads = [ g if g is None else ops.divide(g, scale) for g in grads ] self.inner_optimizer.apply( unscaled_grads, trainable_variables=trainable_variables ) def upscale(): self.step_counter.assign(0) self.dynamic_scale.assign(self.dynamic_scale * 2.0) def increment(): self.step_counter.assign_add(1) # Potentially upscale loss and reset counter. ops.cond( ops.equal(self.step_counter, self.dynamic_growth_steps - 1), upscale, increment, ) def handle_non_finite_grads(): # If any inf or nan in grads, downscale loss and reset counter. self.step_counter.assign(0) self.dynamic_scale.assign(self.dynamic_scale / 2.0) finite = self.check_finite(grads) ops.cond(finite, handle_finite_grads, handle_non_finite_grads) def check_finite(self, grads): tensor_grads = [g for g in grads if g is not None] finite_grads = [ops.all(ops.isfinite(g)) for g in tensor_grads] return ops.all(ops.convert_to_tensor(finite_grads)) @property def learning_rate(self): return self.inner_optimizer.learning_rate @learning_rate.setter def learning_rate(self, learning_rate): self.inner_optimizer.learning_rate = learning_rate def scale_loss(self, loss): scale = self.dynamic_scale if self.built else self.initial_scale return loss * scale def finalize_variable_values(self, var_list): self.inner_optimizer.finalize_variable_values(var_list) def get_config(self): config = super().get_config() inner_optimizer_config = serialization_lib.serialize_keras_object( self.inner_optimizer ) config.update( { "inner_optimizer": inner_optimizer_config, "initial_scale": self.initial_scale, "dynamic_growth_steps": self.dynamic_growth_steps, } ) del config["learning_rate"] return config @classmethod def from_config(cls, config, custom_objects=None): inner_optimizer = serialization_lib.deserialize_keras_object( config.pop("inner_optimizer"), custom_objects=custom_objects, ) return cls(inner_optimizer, **config) LossScaleOptimizer.__doc__ = LossScaleOptimizer.__doc__.replace( "{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args )
keras-core/keras_core/optimizers/loss_scale_optimizer.py/0
{ "file_path": "keras-core/keras_core/optimizers/loss_scale_optimizer.py", "repo_id": "keras-core", "token_count": 4121 }
56
import random as python_random import numpy as np from keras_core import backend from keras_core.api_export import keras_core_export from keras_core.backend.common import global_state from keras_core.utils import jax_utils @keras_core_export("keras_core.random.SeedGenerator") class SeedGenerator: """Generates variable seeds upon each call to a RNG-using function. In Keras, all RNG-using methods (such as `keras_core.random.normal()`) are stateless, meaning that if you pass an integer seed to them (such as `seed=42`), they will return the same values at each call. In order to get different values at each call, you must use a `SeedGenerator` instead as the seed argument. The `SeedGenerator` object is stateful. Example: ```python seed_gen = keras_core.random.SeedGenerator(seed=42) values = keras_core.random.normal(shape=(2, 3), seed=seed_gen) new_values = keras_core.random.normal(shape=(2, 3), seed=seed_gen) ``` Usage in a layer: ```python class Dropout(keras_core.Layer): def __init__(self, **kwargs): super().__init__(**kwargs) self.seed_generator = keras_core.random.SeedGenerator(1337) def call(self, x, training=False): if training: return keras_core.random.dropout( x, rate=0.5, seed=self.seed_generator ) return x ``` """ def __init__(self, seed=None, **kwargs): custom_backend = kwargs.pop("backend", None) if kwargs: raise ValueError(f"Unrecognized keyword arguments: {kwargs}") if custom_backend is not None: self.backend = custom_backend else: self.backend = backend self._initial_seed = seed if seed is None: seed = make_default_seed() if not isinstance(seed, int): raise ValueError( "Argument `seed` must be an integer. " f"Received: seed={seed}" ) def seed_initializer(*args, **kwargs): dtype = kwargs.get("dtype", None) return self.backend.convert_to_tensor([seed, 0], dtype=dtype) self.state = self.backend.Variable( seed_initializer, shape=(2,), dtype="uint32", trainable=False, name="seed_generator_state", ) def next(self, ordered=True): seed_state = self.state # Use * 1 to create a copy new_seed_value = seed_state.value * 1 if ordered: increment = self.backend.convert_to_tensor( np.array([0, 1]), dtype="uint32" ) self.state.assign(seed_state + increment) else: # This produces a sequence of near-unique numbers # between 0 and 1M self.state.assign((seed_state + 1) * 5387 % 933199) return new_seed_value def global_seed_generator(): if jax_utils.is_in_jax_tracing_scope(): raise ValueError( "When tracing a JAX function, " "you should only use seeded random ops, e.g. " "you should create a `SeedGenerator` instance, attach it " "to your layer/model, and pass the instance as the `seed` " "argument when calling random ops. Unseeded random ops " "would get incorrectly traced by JAX and would become constant " "after tracing." ) gen = global_state.get_global_attribute("global_seed_generator") if gen is None: gen = SeedGenerator() global_state.set_global_attribute("global_seed_generator", gen) return gen def make_default_seed(): return python_random.randint(1, int(1e9)) def draw_seed(seed): from keras_core.backend import convert_to_tensor if isinstance(seed, SeedGenerator): return seed.next() elif isinstance(seed, int): return convert_to_tensor([seed, 0], dtype="uint32") elif seed is None: return global_seed_generator().next(ordered=False) raise ValueError( "Argument `seed` must be either an integer " "or an instance of `SeedGenerator`. " f"Received: seed={seed} (of type {type(seed)})" )
keras-core/keras_core/random/seed_generator.py/0
{ "file_path": "keras-core/keras_core/random/seed_generator.py", "repo_id": "keras-core", "token_count": 1866 }
57
""" Separation of concerns: DataAdapter: - x, y - sample_weight - class_weight - shuffle - batch_size - steps, as it relates to batch_size for array data EpochIterator: - whether to yield numpy or tf data - steps - most argument validation Trainer: - steps_per_execution - validation_split - validation_data - callbacks - validation_freq - epochs - initial_epoch - any backend-specific concern such as distribution PyDataset: - num_workers - use_multiprocessing - max_queue_size EpochIterator steps: 1. Look at data type and select correct DataHandler 2. Instantiate DataHandler with correct arguments 3. Raise or warn on unused arguments 4. in __iter__, iterate, either for a fixed number of steps or until there is no data """ import types import warnings from keras_core.trainers.data_adapters import array_data_adapter from keras_core.trainers.data_adapters import generator_data_adapter from keras_core.trainers.data_adapters import py_dataset_adapter from keras_core.trainers.data_adapters import tf_dataset_adapter from keras_core.trainers.data_adapters import torch_data_adapter from keras_core.utils.module_utils import tensorflow as tf class EpochIterator: def __init__( self, x, y=None, sample_weight=None, batch_size=None, steps_per_epoch=None, shuffle=False, class_weight=None, steps_per_execution=1, ): self.steps_per_epoch = steps_per_epoch self.steps_per_execution = steps_per_execution if steps_per_epoch: self._current_iterator = None self._insufficient_data = False if array_data_adapter.can_convert_arrays((x, y, sample_weight)): self.data_adapter = array_data_adapter.ArrayDataAdapter( x, y, sample_weight=sample_weight, class_weight=class_weight, shuffle=shuffle, batch_size=batch_size, steps=steps_per_epoch, ) elif tf.available and isinstance(x, tf.data.Dataset): self.data_adapter = tf_dataset_adapter.TFDatasetAdapter( x, class_weight=class_weight ) # Unsupported args: y, sample_weight, shuffle if y is not None: raise_unsupported_arg("y", "the targets", "tf.data.Dataset") if sample_weight is not None: raise_unsupported_arg( "sample_weights", "the sample weights", "tf.data.Dataset" ) # TODO: should we warn or not? # warnings.warn( # "`shuffle=True` was passed, but will be ignored since the " # "data `x` was provided as a tf.data.Dataset. The Dataset is " # "expected to already be shuffled " # "(via `.shuffle(tf.data.AUTOTUNE)`)" # ) elif isinstance(x, py_dataset_adapter.PyDataset): self.data_adapter = py_dataset_adapter.PyDatasetAdapter( x, class_weight=class_weight, shuffle=shuffle ) if y is not None: raise_unsupported_arg("y", "the targets", "PyDataset") if sample_weight is not None: raise_unsupported_arg( "sample_weights", "the sample weights", "PyDataset" ) elif is_torch_dataloader(x): self.data_adapter = torch_data_adapter.TorchDataLoaderAdapter(x) if y is not None: raise_unsupported_arg("y", "the targets", "torch DataLoader") if sample_weight is not None: raise_unsupported_arg( "sample_weights", "the sample weights", "torch DataLoader" ) if class_weight is not None: raise ValueError( "Argument `class_weight` is not supported for torch " f"DataLoader inputs. Received: class_weight={class_weight}" ) # TODO: should we warn or not? # warnings.warn( # "`shuffle=True` was passed, but will be ignored since the " # "data `x` was provided as a torch DataLoader. The DataLoader " # "is expected to already be shuffled." # ) elif isinstance(x, types.GeneratorType): self.data_adapter = generator_data_adapter.GeneratorDataAdapter(x) if y is not None: raise_unsupported_arg("y", "the targets", "PyDataset") if sample_weight is not None: raise_unsupported_arg( "sample_weights", "the sample weights", "PyDataset" ) if class_weight is not None: raise ValueError( "Argument `class_weight` is not supported for Python " f"generator inputs. Received: class_weight={class_weight}" ) if shuffle: raise ValueError( "Argument `shuffle` is not supported for Python generator " f"inputs. Received: shuffle={shuffle}" ) else: raise ValueError( f"Unrecognized data type: x={x} (of type {type(x)})" ) self._num_batches = self.data_adapter.num_batches def _get_iterator(self, return_type): if return_type not in ("np", "tf"): raise ValueError( "Argument `return_type` must be one of `{'np', 'tf'}`. " f"Received instead: return_type={return_type}" ) if return_type == "np": iterator = self.data_adapter.get_numpy_iterator() else: iterator = self.data_adapter.get_tf_dataset() return iterator def enumerate_epoch(self, return_type="np"): buffer = [] if self.steps_per_epoch: if not self._current_iterator: self._current_iterator = self._get_iterator(return_type) self._insufficient_data = False for step in range(self.steps_per_epoch): if self._insufficient_data: break if tf.available: errors = (StopIteration, tf.errors.OutOfRangeError) else: errors = (StopIteration,) try: data = next(self._current_iterator) buffer.append(data) if len(buffer) == self.steps_per_execution: yield step - len(buffer) + 1, buffer buffer = [] except errors: warnings.warn( "Your input ran out of data; interrupting epoch. " "Make sure that your dataset or generator can generate " "at least `steps_per_epoch * epochs` batches. " "You may need to use the `.repeat()` " "function when building your dataset.", stacklevel=2, ) self._current_iterator = None self._insufficient_data = True if buffer: yield step - len(buffer) + 1, buffer else: for step, data in enumerate(self._get_iterator(return_type)): buffer.append(data) if len(buffer) == self.steps_per_execution: yield step - len(buffer) + 1, buffer buffer = [] if buffer: yield step - len(buffer) + 1, buffer if not self._num_batches: # Infer the number of batches returned by the data_adater. # Assumed static. self._num_batches = step + 1 self.data_adapter.on_epoch_end() @property def num_batches(self): if self.steps_per_epoch: return self.steps_per_epoch # Either copied from the data_adapter, or # inferred at the end of an iteration. return self._num_batches def raise_unsupported_arg(arg_name, arg_description, input_type): raise ValueError( f"When providing `x` as a {input_type}, `{arg_name}` " f"should not be passed. Instead, {arg_description} should " f"be included as part of the {input_type}." ) def is_torch_dataloader(x): if hasattr(x, "__class__"): for parent in x.__class__.__mro__: if parent.__name__ == "DataLoader" and str( parent.__module__ ).startswith("torch.utils.data"): return True return False
keras-core/keras_core/trainers/epoch_iterator.py/0
{ "file_path": "keras-core/keras_core/trainers/epoch_iterator.py", "repo_id": "keras-core", "token_count": 4387 }
58
import hashlib import os import pathlib import shutil import tarfile import tempfile import urllib import zipfile from unittest.mock import patch from keras_core.testing import test_case from keras_core.utils import file_utils class PathToStringTest(test_case.TestCase): def test_path_to_string_with_string_path(self): path = "/path/to/file.txt" string_path = file_utils.path_to_string(path) self.assertEqual(string_path, path) def test_path_to_string_with_PathLike_object(self): path = pathlib.Path("/path/to/file.txt") string_path = file_utils.path_to_string(path) self.assertEqual(string_path, str(path)) def test_path_to_string_with_non_string_typed_path_object(self): class NonStringTypedPathObject: def __fspath__(self): return "/path/to/file.txt" path = NonStringTypedPathObject() string_path = file_utils.path_to_string(path) self.assertEqual(string_path, "/path/to/file.txt") def test_path_to_string_with_none_path(self): string_path = file_utils.path_to_string(None) self.assertEqual(string_path, None) class ResolvePathTest(test_case.TestCase): def test_resolve_path_with_absolute_path(self): path = "/path/to/file.txt" resolved_path = file_utils.resolve_path(path) self.assertEqual(resolved_path, os.path.realpath(os.path.abspath(path))) def test_resolve_path_with_relative_path(self): path = "./file.txt" resolved_path = file_utils.resolve_path(path) self.assertEqual(resolved_path, os.path.realpath(os.path.abspath(path))) class IsPathInDirTest(test_case.TestCase): def test_is_path_in_dir_with_absolute_paths(self): base_dir = "/path/to/base_dir" path = "/path/to/base_dir/file.txt" self.assertTrue(file_utils.is_path_in_dir(path, base_dir)) class IsLinkInDirTest(test_case.TestCase): def setUp(self): self._cleanup("test_path/to/base_dir") self._cleanup("./base_dir") def _cleanup(self, base_dir): if os.path.exists(base_dir): shutil.rmtree(base_dir) def test_is_link_in_dir_with_absolute_paths(self): base_dir = "test_path/to/base_dir" link_path = os.path.join(base_dir, "symlink") target_path = os.path.join(base_dir, "file.txt") # Create the base_dir directory if it does not exist. os.makedirs(base_dir, exist_ok=True) # Create the file.txt file. with open(target_path, "w") as f: f.write("Hello, world!") os.symlink(target_path, link_path) # Creating a stat_result-like object with a name attribute info = os.lstat(link_path) info = type( "stat_with_name", (object,), { "name": os.path.basename(link_path), "linkname": os.readlink(link_path), }, ) self.assertTrue(file_utils.is_link_in_dir(info, base_dir)) def test_is_link_in_dir_with_relative_paths(self): base_dir = "./base_dir" link_path = os.path.join(base_dir, "symlink") target_path = os.path.join(base_dir, "file.txt") # Create the base_dir directory if it does not exist. os.makedirs(base_dir, exist_ok=True) # Create the file.txt file. with open(target_path, "w") as f: f.write("Hello, world!") os.symlink(target_path, link_path) # Creating a stat_result-like object with a name attribute info = os.lstat(link_path) info = type( "stat_with_name", (object,), { "name": os.path.basename(link_path), "linkname": os.readlink(link_path), }, ) self.assertTrue(file_utils.is_link_in_dir(info, base_dir)) def tearDown(self): self._cleanup("test_path/to/base_dir") self._cleanup("./base_dir") class FilterSafePathsTest(test_case.TestCase): def setUp(self): self.base_dir = os.path.join(os.getcwd(), "temp_dir") os.makedirs(self.base_dir, exist_ok=True) self.tar_path = os.path.join(self.base_dir, "test.tar") def tearDown(self): os.remove(self.tar_path) shutil.rmtree(self.base_dir) def test_member_within_base_dir(self): """Test a member within the base directory.""" with tarfile.open(self.tar_path, "w") as tar: tar.add(__file__, arcname="safe_path.txt") with tarfile.open(self.tar_path, "r") as tar: members = list(file_utils.filter_safe_paths(tar.getmembers())) self.assertEqual(len(members), 1) self.assertEqual(members[0].name, "safe_path.txt") def test_symlink_within_base_dir(self): """Test a symlink pointing within the base directory.""" symlink_path = os.path.join(self.base_dir, "symlink.txt") target_path = os.path.join(self.base_dir, "target.txt") with open(target_path, "w") as f: f.write("target") os.symlink(target_path, symlink_path) with tarfile.open(self.tar_path, "w") as tar: tar.add(symlink_path, arcname="symlink.txt") with tarfile.open(self.tar_path, "r") as tar: members = list(file_utils.filter_safe_paths(tar.getmembers())) self.assertEqual(len(members), 1) self.assertEqual(members[0].name, "symlink.txt") os.remove(symlink_path) os.remove(target_path) def test_invalid_path_warning(self): """Test warning for an invalid path during archive extraction.""" invalid_path = os.path.join(os.getcwd(), "invalid.txt") with open(invalid_path, "w") as f: f.write("invalid") with tarfile.open(self.tar_path, "w") as tar: tar.add( invalid_path, arcname="../../invalid.txt" ) # Path intended to be outside of base dir with tarfile.open(self.tar_path, "r") as tar: with patch("warnings.warn") as mock_warn: _ = list(file_utils.filter_safe_paths(tar.getmembers())) warning_msg = ( "Skipping invalid path during archive extraction: " "'../../invalid.txt'." ) mock_warn.assert_called_with(warning_msg, stacklevel=2) os.remove(invalid_path) def test_symbolic_link_in_base_dir(self): """symbolic link within the base directory is correctly processed.""" symlink_path = os.path.join(self.base_dir, "symlink.txt") target_path = os.path.join(self.base_dir, "target.txt") # Create a target file and then a symbolic link pointing to it. with open(target_path, "w") as f: f.write("target") os.symlink(target_path, symlink_path) # Add the symbolic link to the tar archive. with tarfile.open(self.tar_path, "w") as tar: tar.add(symlink_path, arcname="symlink.txt") with tarfile.open(self.tar_path, "r") as tar: members = list(file_utils.filter_safe_paths(tar.getmembers())) self.assertEqual(len(members), 1) self.assertEqual(members[0].name, "symlink.txt") self.assertTrue( members[0].issym() ) # Explicitly assert it's a symbolic link. os.remove(symlink_path) os.remove(target_path) class ExtractArchiveTest(test_case.TestCase): def setUp(self): """Create temporary directories and files for testing.""" self.temp_dir = tempfile.mkdtemp() self.file_content = "Hello, world!" # Create sample files to be archived with open(os.path.join(self.temp_dir, "sample.txt"), "w") as f: f.write(self.file_content) def tearDown(self): """Clean up temporary directories.""" shutil.rmtree(self.temp_dir) def create_tar(self): archive_path = os.path.join(self.temp_dir, "sample.tar") with tarfile.open(archive_path, "w") as archive: archive.add( os.path.join(self.temp_dir, "sample.txt"), arcname="sample.txt" ) return archive_path def create_zip(self): archive_path = os.path.join(self.temp_dir, "sample.zip") with zipfile.ZipFile(archive_path, "w") as archive: archive.write( os.path.join(self.temp_dir, "sample.txt"), arcname="sample.txt" ) return archive_path def test_extract_tar(self): archive_path = self.create_tar() extract_path = os.path.join(self.temp_dir, "extract_tar") result = file_utils.extract_archive(archive_path, extract_path, "tar") self.assertTrue(result) with open(os.path.join(extract_path, "sample.txt"), "r") as f: self.assertEqual(f.read(), self.file_content) def test_extract_zip(self): archive_path = self.create_zip() extract_path = os.path.join(self.temp_dir, "extract_zip") result = file_utils.extract_archive(archive_path, extract_path, "zip") self.assertTrue(result) with open(os.path.join(extract_path, "sample.txt"), "r") as f: self.assertEqual(f.read(), self.file_content) def test_extract_auto(self): # This will test the 'auto' functionality tar_archive_path = self.create_tar() zip_archive_path = self.create_zip() extract_tar_path = os.path.join(self.temp_dir, "extract_auto_tar") extract_zip_path = os.path.join(self.temp_dir, "extract_auto_zip") self.assertTrue( file_utils.extract_archive(tar_archive_path, extract_tar_path) ) self.assertTrue( file_utils.extract_archive(zip_archive_path, extract_zip_path) ) with open(os.path.join(extract_tar_path, "sample.txt"), "r") as f: self.assertEqual(f.read(), self.file_content) with open(os.path.join(extract_zip_path, "sample.txt"), "r") as f: self.assertEqual(f.read(), self.file_content) def test_non_existent_file(self): extract_path = os.path.join(self.temp_dir, "non_existent") with self.assertRaises(FileNotFoundError): file_utils.extract_archive("non_existent.tar", extract_path) def test_archive_format_none(self): archive_path = self.create_tar() extract_path = os.path.join(self.temp_dir, "none_format") result = file_utils.extract_archive(archive_path, extract_path, None) self.assertFalse(result) def test_runtime_error_during_extraction(self): tar_path = self.create_tar() extract_path = os.path.join(self.temp_dir, "runtime_error_extraction") with patch.object( tarfile.TarFile, "extractall", side_effect=RuntimeError ): with self.assertRaises(RuntimeError): file_utils.extract_archive(tar_path, extract_path, "tar") self.assertFalse(os.path.exists(extract_path)) def test_keyboard_interrupt_during_extraction(self): tar_path = self.create_tar() extract_path = os.path.join( self.temp_dir, "keyboard_interrupt_extraction" ) with patch.object( tarfile.TarFile, "extractall", side_effect=KeyboardInterrupt ): with self.assertRaises(KeyboardInterrupt): file_utils.extract_archive(tar_path, extract_path, "tar") self.assertFalse(os.path.exists(extract_path)) class GetFileTest(test_case.TestCase): def setUp(self): """Set up temporary directories and sample files.""" self.temp_dir = self.get_temp_dir() self.file_path = os.path.join(self.temp_dir, "sample_file.txt") with open(self.file_path, "w") as f: f.write("Sample content") def test_valid_tar_extraction(self): """Test valid tar.gz extraction and hash validation.""" dest_dir = self.get_temp_dir() orig_dir = self.get_temp_dir() text_file_path, tar_file_path = self._create_tar_file(orig_dir) self._test_file_extraction_and_validation( dest_dir, tar_file_path, "tar.gz" ) def test_valid_zip_extraction(self): """Test valid zip extraction and hash validation.""" dest_dir = self.get_temp_dir() orig_dir = self.get_temp_dir() text_file_path, zip_file_path = self._create_zip_file(orig_dir) self._test_file_extraction_and_validation( dest_dir, zip_file_path, "zip" ) def test_valid_text_file_download(self): """Test valid text file download and hash validation.""" dest_dir = self.get_temp_dir() orig_dir = self.get_temp_dir() text_file_path = os.path.join(orig_dir, "test.txt") with open(text_file_path, "w") as text_file: text_file.write("Float like a butterfly, sting like a bee.") self._test_file_extraction_and_validation( dest_dir, text_file_path, None ) def test_get_file_with_tgz_extension(self): """Test extraction of file with .tar.gz extension.""" dest_dir = self.get_temp_dir() orig_dir = dest_dir text_file_path, tar_file_path = self._create_tar_file(orig_dir) origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(tar_file_path)), ) path = file_utils.get_file( "test.txt.tar.gz", origin, untar=True, cache_subdir=dest_dir ) self.assertTrue(path.endswith(".txt")) self.assertTrue(os.path.exists(path)) def test_get_file_with_integrity_check(self): """Test file download with integrity check.""" orig_dir = self.get_temp_dir() file_path = os.path.join(orig_dir, "test.txt") with open(file_path, "w") as text_file: text_file.write("Float like a butterfly, sting like a bee.") hashval = file_utils.hash_file(file_path) origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(file_path)) ) path = file_utils.get_file("test.txt", origin, file_hash=hashval) self.assertTrue(os.path.exists(path)) def test_get_file_with_failed_integrity_check(self): """Test file download with failed integrity check.""" orig_dir = self.get_temp_dir() file_path = os.path.join(orig_dir, "test.txt") with open(file_path, "w") as text_file: text_file.write("Float like a butterfly, sting like a bee.") hashval = "0" * 64 origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(file_path)) ) with self.assertRaisesRegex( ValueError, "Incomplete or corrupted file.*" ): _ = file_utils.get_file("test.txt", origin, file_hash=hashval) def _create_tar_file(self, directory): """Helper function to create a tar file.""" text_file_path = os.path.join(directory, "test.txt") tar_file_path = os.path.join(directory, "test.tar.gz") with open(text_file_path, "w") as text_file: text_file.write("Float like a butterfly, sting like a bee.") with tarfile.open(tar_file_path, "w:gz") as tar_file: tar_file.add(text_file_path) return text_file_path, tar_file_path def _create_zip_file(self, directory): """Helper function to create a zip file.""" text_file_path = os.path.join(directory, "test.txt") zip_file_path = os.path.join(directory, "test.zip") with open(text_file_path, "w") as text_file: text_file.write("Float like a butterfly, sting like a bee.") with zipfile.ZipFile(zip_file_path, "w") as zip_file: zip_file.write(text_file_path) return text_file_path, zip_file_path def _test_file_extraction_and_validation( self, dest_dir, file_path, archive_type ): """Helper function for file extraction and validation.""" origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(file_path)), ) hashval_sha256 = file_utils.hash_file(file_path) hashval_md5 = file_utils.hash_file(file_path, algorithm="md5") if archive_type: extract = True else: extract = False path = file_utils.get_file( "test", origin, md5_hash=hashval_md5, extract=extract, cache_subdir=dest_dir, ) path = file_utils.get_file( "test", origin, file_hash=hashval_sha256, extract=extract, cache_subdir=dest_dir, ) self.assertTrue(os.path.exists(path)) self.assertTrue(file_utils.validate_file(path, hashval_sha256)) self.assertTrue(file_utils.validate_file(path, hashval_md5)) os.remove(path) def test_exists(self): temp_dir = self.get_temp_dir() file_path = os.path.join(temp_dir, "test_exists.txt") with open(file_path, "w") as f: f.write("test") self.assertTrue(file_utils.exists(file_path)) self.assertFalse( file_utils.exists(os.path.join(temp_dir, "non_existent.txt")) ) def test_file_open_read(self): temp_dir = self.get_temp_dir() file_path = os.path.join(temp_dir, "test_file.txt") content = "test content" with open(file_path, "w") as f: f.write(content) with file_utils.File(file_path, "r") as f: self.assertEqual(f.read(), content) def test_file_open_write(self): temp_dir = self.get_temp_dir() file_path = os.path.join(temp_dir, "test_file_write.txt") content = "test write content" with file_utils.File(file_path, "w") as f: f.write(content) with open(file_path, "r") as f: self.assertEqual(f.read(), content) def test_isdir(self): temp_dir = self.get_temp_dir() self.assertTrue(file_utils.isdir(temp_dir)) file_path = os.path.join(temp_dir, "test_isdir.txt") with open(file_path, "w") as f: f.write("test") self.assertFalse(file_utils.isdir(file_path)) def test_join_simple(self): self.assertEqual(file_utils.join("/path", "to", "dir"), "/path/to/dir") def test_join_single_directory(self): self.assertEqual(file_utils.join("/path"), "/path") def test_listdir(self): content = file_utils.listdir(self.temp_dir) self.assertIn("sample_file.txt", content) def test_makedirs_and_rmtree(self): new_dir = os.path.join(self.temp_dir, "new_directory") file_utils.makedirs(new_dir) self.assertTrue(os.path.isdir(new_dir)) file_utils.rmtree(new_dir) self.assertFalse(os.path.exists(new_dir)) def test_copy(self): dest_path = os.path.join(self.temp_dir, "copy_sample_file.txt") file_utils.copy(self.file_path, dest_path) self.assertTrue(os.path.exists(dest_path)) with open(dest_path, "r") as f: content = f.read() self.assertEqual(content, "Sample content") def test_remove_sub_directory(self): parent_dir = os.path.join(self.get_temp_dir(), "parent_directory") child_dir = os.path.join(parent_dir, "child_directory") file_utils.makedirs(child_dir) file_utils.rmtree(parent_dir) self.assertFalse(os.path.exists(parent_dir)) self.assertFalse(os.path.exists(child_dir)) def test_remove_files_inside_directory(self): dir_path = os.path.join(self.get_temp_dir(), "test_directory") file_path = os.path.join(dir_path, "test.txt") file_utils.makedirs(dir_path) with open(file_path, "w") as f: f.write("Test content") file_utils.rmtree(dir_path) self.assertFalse(os.path.exists(dir_path)) self.assertFalse(os.path.exists(file_path)) def test_handle_complex_paths(self): complex_dir = os.path.join(self.get_temp_dir(), "complex dir@#%&!") file_utils.makedirs(complex_dir) file_utils.rmtree(complex_dir) self.assertFalse(os.path.exists(complex_dir)) class HashFileTest(test_case.TestCase): def setUp(self): self.test_content = b"Hello, World!" self.temp_file = tempfile.NamedTemporaryFile(delete=False) self.temp_file.write(self.test_content) self.temp_file.close() def tearDown(self): os.remove(self.temp_file.name) def test_hash_file_sha256(self): """Test SHA256 hashing of a file.""" expected_sha256 = ( "dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f" ) calculated_sha256 = file_utils.hash_file( self.temp_file.name, algorithm="sha256" ) self.assertEqual(expected_sha256, calculated_sha256) def test_hash_file_md5(self): """Test MD5 hashing of a file.""" expected_md5 = "65a8e27d8879283831b664bd8b7f0ad4" calculated_md5 = file_utils.hash_file( self.temp_file.name, algorithm="md5" ) self.assertEqual(expected_md5, calculated_md5) class TestValidateFile(test_case.TestCase): def setUp(self): self.tmp_file = tempfile.NamedTemporaryFile(delete=False) self.tmp_file.write(b"Hello, World!") self.tmp_file.close() self.sha256_hash = ( "dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f" ) self.md5_hash = "65a8e27d8879283831b664bd8b7f0ad4" def test_validate_file_sha256(self): """Validate SHA256 hash of a file.""" self.assertTrue( file_utils.validate_file( self.tmp_file.name, self.sha256_hash, "sha256" ) ) def test_validate_file_md5(self): """Validate MD5 hash of a file.""" self.assertTrue( file_utils.validate_file(self.tmp_file.name, self.md5_hash, "md5") ) def test_validate_file_auto_sha256(self): """Auto-detect and validate SHA256 hash.""" self.assertTrue( file_utils.validate_file( self.tmp_file.name, self.sha256_hash, "auto" ) ) def test_validate_file_auto_md5(self): """Auto-detect and validate MD5 hash.""" self.assertTrue( file_utils.validate_file(self.tmp_file.name, self.md5_hash, "auto") ) def test_validate_file_wrong_hash(self): """Test validation with incorrect hash.""" wrong_hash = "deadbeef" * 8 self.assertFalse( file_utils.validate_file(self.tmp_file.name, wrong_hash, "sha256") ) def tearDown(self): os.remove(self.tmp_file.name) class ResolveHasherTest(test_case.TestCase): def test_resolve_hasher_sha256(self): """Test resolving hasher for sha256 algorithm.""" hasher = file_utils.resolve_hasher("sha256") self.assertIsInstance(hasher, type(hashlib.sha256())) def test_resolve_hasher_auto_sha256(self): """Auto-detect and resolve hasher for sha256.""" hasher = file_utils.resolve_hasher("auto", file_hash="a" * 64) self.assertIsInstance(hasher, type(hashlib.sha256())) def test_resolve_hasher_auto_md5(self): """Auto-detect and resolve hasher for md5.""" hasher = file_utils.resolve_hasher("auto", file_hash="a" * 32) self.assertIsInstance(hasher, type(hashlib.md5())) def test_resolve_hasher_default(self): """Resolve hasher with a random algorithm value.""" hasher = file_utils.resolve_hasher("random_value") self.assertIsInstance(hasher, type(hashlib.md5())) class IsRemotePathTest(test_case.TestCase): def test_gcs_remote_path(self): self.assertTrue(file_utils.is_remote_path("/gcs/some/path/to/file.txt")) self.assertTrue(file_utils.is_remote_path("/gcs/another/directory/")) self.assertTrue(file_utils.is_remote_path("gcs://bucket/some/file.txt")) def test_hdfs_remote_path(self): self.assertTrue(file_utils.is_remote_path("hdfs://some/path/on/hdfs")) self.assertTrue(file_utils.is_remote_path("/hdfs/some/local/path")) def test_cns_remote_path(self): self.assertTrue(file_utils.is_remote_path("/cns/some/path")) def test_cfs_remote_path(self): self.assertTrue(file_utils.is_remote_path("/cfs/some/path")) def test_non_remote_paths(self): self.assertFalse(file_utils.is_remote_path("/local/path/to/file.txt")) self.assertFalse( file_utils.is_remote_path("C:\\local\\path\\on\\windows\\file.txt") ) self.assertFalse(file_utils.is_remote_path("~/relative/path/")) self.assertFalse(file_utils.is_remote_path("./another/relative/path")) self.assertFalse(file_utils.is_remote_path("/local/path")) self.assertFalse(file_utils.is_remote_path("./relative/path")) self.assertFalse(file_utils.is_remote_path("~/relative/path")) class TestRaiseIfNoGFile(test_case.TestCase): def test_raise_if_no_gfile_raises_correct_message(self): path = "gs://bucket/some/file.txt" expected_error_msg = ( "Handling remote paths requires installing TensorFlow " f".*Received path: {path}" ) with self.assertRaisesRegex(ValueError, expected_error_msg): file_utils._raise_if_no_gfile(path)
keras-core/keras_core/utils/file_utils_test.py/0
{ "file_path": "keras-core/keras_core/utils/file_utils_test.py", "repo_id": "keras-core", "token_count": 11922 }
59
import base64 import marshal from keras_core import testing from keras_core.utils import python_utils class PythonUtilsTest(testing.TestCase): def test_func_dump_and_load(self): def my_function(x, y=1, **kwargs): return x + y serialized = python_utils.func_dump(my_function) deserialized = python_utils.func_load(serialized) self.assertEqual(deserialized(2, y=3), 5) def test_removesuffix(self): x = "model.keras" self.assertEqual(python_utils.removesuffix(x, ".keras"), "model") self.assertEqual(python_utils.removesuffix(x, "model"), x) def test_removeprefix(self): x = "model.keras" self.assertEqual(python_utils.removeprefix(x, "model"), ".keras") self.assertEqual(python_utils.removeprefix(x, ".keras"), x) def test_func_load_defaults_as_tuple(self): # Using tuple as a default argument def dummy_function(x=(1, 2, 3)): pass serialized = python_utils.func_dump(dummy_function) deserialized = python_utils.func_load(serialized) # Ensure that the defaults are still a tuple self.assertIsInstance(deserialized.__defaults__[0], tuple) # Ensure that the tuple default remains unchanged self.assertEqual(deserialized.__defaults__[0], (1, 2, 3)) def test_remove_long_seq_standard_case(self): sequences = [[1], [2, 2], [3, 3, 3], [4, 4, 4, 4]] labels = [1, 2, 3, 4] new_sequences, new_labels = python_utils.remove_long_seq( 3, sequences, labels ) self.assertEqual(new_sequences, [[1], [2, 2]]) self.assertEqual(new_labels, [1, 2]) def test_func_load_with_closure(self): def outer_fn(x): def inner_fn(y): return x + y return inner_fn func_with_closure = outer_fn(10) serialized = python_utils.func_dump(func_with_closure) deserialized = python_utils.func_load(serialized) self.assertEqual(deserialized(5), 15) def test_func_load_closure_conversion(self): def my_function_with_closure(x): return x + y y = 5 serialized = python_utils.func_dump(my_function_with_closure) deserialized = python_utils.func_load(serialized) self.assertEqual(deserialized(5), 10) def test_ensure_value_to_cell(self): value_to_test = "test_value" def dummy_fn(): value_to_test cell_value = dummy_fn.__closure__[0].cell_contents self.assertEqual(value_to_test, cell_value) def test_closure_processing(self): def simple_function(x): return x + 10 serialized = python_utils.func_dump(simple_function) deserialized = python_utils.func_load(serialized) self.assertEqual(deserialized(5), 15) def test_func_load_valid_encoded_code(self): def another_simple_function(x): return x * 2 raw_data = marshal.dumps(another_simple_function.__code__) valid_encoded_code = base64.b64encode(raw_data).decode("utf-8") try: python_utils.func_load(valid_encoded_code) except (UnicodeEncodeError, ValueError): self.fail("Expected no error for valid code, but got an error.") def test_func_load_bad_encoded_code(self): bad_encoded_code = "This isn't valid base64!" with self.assertRaises(AttributeError): python_utils.func_load(bad_encoded_code)
keras-core/keras_core/utils/python_utils_test.py/0
{ "file_path": "keras-core/keras_core/utils/python_utils_test.py", "repo_id": "keras-core", "token_count": 1568 }
60
from functools import wraps from keras_core.backend.common.global_state import get_global_attribute from keras_core.backend.common.global_state import set_global_attribute class DotNotTrackScope: def __enter__(self): self.original_value = is_tracking_enabled() set_global_attribute("tracking_on", False) def __exit__(self, *args, **kwargs): set_global_attribute("tracking_on", self.original_value) def is_tracking_enabled(): return get_global_attribute("tracking_on", True) def no_automatic_dependency_tracking(fn): @wraps(fn) def wrapper(*args, **kwargs): with DotNotTrackScope(): return fn(*args, **kwargs) return wrapper class Tracker: """Attribute tracker, used for e.g. Variable tracking. Monitors certain attribute types and put them in appropriate lists in case of a match. Also passively tracks certain mutable collections (dict, list) so that items added to them later still get tracked. This is done by wrapping these collections into an equivalent, tracking-aware object. Usage: ```python def __init__(self): self.tracker = Tracker( # Format: `name: (test_fn, store)` { "variables": (lambda x: isinstance(x, Variable), self._variables), "metrics": (lambda x: isinstance(x, Metric), self._metrics), "layers": (lambda x: isinstance(x, Layer), self._layers), } ) def __setattr__(self, name, value): if hasattr(self, "_tracker"): value = self._tracker.track(value) return super().__setattr__(name, value) ``` """ def __init__(self, config): self.config = config self.stored_ids = {name: set() for name in self.config.keys()} self.locked = False self._lock_violation_msg = None def track(self, attr): if not is_tracking_enabled(): return attr for name, (is_attr_type, _) in self.config.items(): if is_attr_type(attr): if id(attr) not in self.stored_ids[name]: self.add_to_store(name, attr) return attr if isinstance(attr, tuple): wrapped_attr = [] for e in attr: wrapped_attr.append(self.track(e)) # This should cover tuples and nametuples return attr.__class__(wrapped_attr) elif isinstance(attr, list): return TrackedList(attr, self) elif isinstance(attr, dict): # TODO: OrderedDict? return TrackedDict(attr, self) elif isinstance(attr, set): return TrackedSet(attr, self) return attr def lock(self, msg): self.locked = True self._lock_violation_msg = msg def add_to_store(self, store_name, value): if self.locked: raise ValueError(self._lock_violation_msg) self.config[store_name][1].append(value) self.stored_ids[store_name].add(id(value)) class TrackedList(list): # TODO: override item removal methods? def __init__(self, values=None, tracker=None): self.tracker = tracker if tracker and values: values = [tracker.track(v) for v in values] super().__init__(values or []) def append(self, value): if self.tracker: self.tracker.track(value) super().append(value) def insert(self, value): if self.tracker: self.tracker.track(value) super().insert(value) def extend(self, values): if self.tracker: values = [self.tracker.track(v) for v in values] super().extend(values) class TrackedDict(dict): # TODO: override item removal methods? def __init__(self, values=None, tracker=None): self.tracker = tracker if tracker and values: values = {k: tracker.track(v) for k, v in values.items()} super().__init__(values or []) def __setitem__(self, key, value): if self.tracker: self.tracker.track(value) super().__setitem__(key, value) def update(self, mapping): if self.tracker: mapping = {k: self.tracker.track(v) for k, v in mapping.items()} super().update(mapping) class TrackedSet(set): # TODO: override item removal methods? def __init__(self, values=None, tracker=None): self.tracker = tracker if tracker and values: values = {tracker.track(v) for v in values} super().__init__(values or []) def add(self, value): if self.tracker: self.tracker.track(value) super().add(value) def update(self, values): if self.tracker: values = [self.tracker.track(v) for v in values] super().update(values)
keras-core/keras_core/utils/tracking.py/0
{ "file_path": "keras-core/keras_core/utils/tracking.py", "repo_id": "keras-core", "token_count": 2167 }
61
import time import warnings from unittest.mock import MagicMock import numpy as np import tensorflow as tf from matplotlib import pyplot as plt import keras_cv from keras_cv import bounding_box from keras_cv.layers import RandomShear from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) from keras_cv.utils import preprocessing # Copied from: # https://github.com/keras-team/keras-cv/blob/cd12204b1f6df37b15359b6adf222b9ef0f67dc8/keras_cv/layers/preprocessing/random_shear.py#L27 class OldRandomShear(BaseImageAugmentationLayer): """A preprocessing layer which randomly shears images during training. This layer will apply random shearings to each image, filling empty space according to `fill_mode`. By default, random shears are only applied during training. At inference time, the layer does nothing. If you need to apply random shear at inference time, set `training` to True when calling the layer. Input pixel values can be of any range and any data type. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Args: x_factor: A tuple of two floats, a single float or a `keras_cv.FactorSampler`. For each augmented image a value is sampled from the provided range. If a float is passed, the range is interpreted as `(0, x_factor)`. Values represent a percentage of the image to shear over. For example, 0.3 shears pixels up to 30% of the way across the image. All provided values should be positive. If `None` is passed, no shear occurs on the X axis. Defaults to `None`. y_factor: A tuple of two floats, a single float or a `keras_cv.FactorSampler`. For each augmented image a value is sampled from the provided range. If a float is passed, the range is interpreted as `(0, y_factor)`. Values represent a percentage of the image to shear over. For example, 0.3 shears pixels up to 30% of the way across the image. All provided values should be positive. If `None` is passed, no shear occurs on the Y axis. Defaults to `None`. interpolation: interpolation method used in the `ImageProjectiveTransformV3` op. Supported values are `"nearest"` and `"bilinear"`. Defaults to `"bilinear"`. fill_mode: fill_mode in the `ImageProjectiveTransformV3` op. Supported values are `"reflect"`, `"wrap"`, `"constant"`, and `"nearest"`. Defaults to `"reflect"`. fill_value: fill_value in the `ImageProjectiveTransformV3` op. A `Tensor` of type `float32`. The value to be filled when fill_mode is constant". Defaults to `0.0`. bounding_box_format: The format of bounding boxes of input dataset. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py for more details on supported bounding box formats. seed: Integer. Used to create a random seed. """ def __init__( self, x_factor=None, y_factor=None, interpolation="bilinear", fill_mode="reflect", fill_value=0.0, bounding_box_format=None, seed=None, **kwargs, ): super().__init__(seed=seed, **kwargs) if x_factor is not None: self.x_factor = preprocessing.parse_factor( x_factor, max_value=None, param_name="x_factor", seed=seed ) else: self.x_factor = x_factor if y_factor is not None: self.y_factor = preprocessing.parse_factor( y_factor, max_value=None, param_name="y_factor", seed=seed ) else: self.y_factor = y_factor if x_factor is None and y_factor is None: warnings.warn( "RandomShear received both `x_factor=None` and " "`y_factor=None`. As a result, the layer will perform no " "augmentation." ) self.interpolation = interpolation self.fill_mode = fill_mode self.fill_value = fill_value self.seed = seed self.bounding_box_format = bounding_box_format def get_random_transformation(self, **kwargs): x = self._get_shear_amount(self.x_factor) y = self._get_shear_amount(self.y_factor) return (x, y) def _get_shear_amount(self, constraint): if constraint is None: return None invert = preprocessing.random_inversion(self._random_generator) return invert * constraint() def augment_image(self, image, transformation=None, **kwargs): image = tf.expand_dims(image, axis=0) x, y = transformation if x is not None: transform_x = OldRandomShear._format_transform( [1.0, x, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) image = preprocessing.transform( images=image, transforms=transform_x, interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, ) if y is not None: transform_y = OldRandomShear._format_transform( [1.0, 0.0, 0.0, y, 1.0, 0.0, 0.0, 0.0] ) image = preprocessing.transform( images=image, transforms=transform_y, interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, ) return tf.squeeze(image, axis=0) def augment_label(self, label, transformation=None, **kwargs): return label def augment_bounding_boxes( self, bounding_boxes, transformation, image=None, **kwargs ): if self.bounding_box_format is None: raise ValueError( "`RandomShear()` was called with bounding boxes, " "but no `bounding_box_format` was specified in the " "constructor. Please specify a bounding box format in the " "constructor. i.e. `RandomShear(bounding_box_format='xyxy')`" ) bounding_boxes = keras_cv.bounding_box.convert_format( bounding_boxes, source=self.bounding_box_format, target="rel_xyxy", images=image, dtype=self.compute_dtype, ) x, y = transformation extended_boxes = self._convert_to_extended_corners_format( bounding_boxes["boxes"] ) if x is not None: extended_boxes = ( self._apply_horizontal_transformation_to_bounding_box( extended_boxes, x ) ) # apply vertical shear if y is not None: extended_boxes = ( self._apply_vertical_transformation_to_bounding_box( extended_boxes, y ) ) boxes = self._convert_to_four_coordinate(extended_boxes, x, y) bounding_boxes = bounding_boxes.copy() bounding_boxes["boxes"] = boxes bounding_boxes = bounding_box.clip_to_image( bounding_boxes, images=image, bounding_box_format="rel_xyxy" ) bounding_boxes = keras_cv.bounding_box.convert_format( bounding_boxes, source="rel_xyxy", target=self.bounding_box_format, images=image, dtype=self.compute_dtype, ) return bounding_boxes def get_config(self): config = super().get_config() config.update( { "x_factor": self.x_factor, "y_factor": self.y_factor, "interpolation": self.interpolation, "fill_mode": self.fill_mode, "fill_value": self.fill_value, "bounding_box_format": self.bounding_box_format, "seed": self.seed, } ) return config @staticmethod def _format_transform(transform): transform = tf.convert_to_tensor(transform, dtype=tf.float32) return transform[tf.newaxis] @staticmethod def _convert_to_four_coordinate(extended_bboxes, x, y): """convert from extended coordinates to 4 coordinates system""" ( top_left_x, top_left_y, bottom_right_x, bottom_right_y, top_right_x, top_right_y, bottom_left_x, bottom_left_y, ) = tf.split(extended_bboxes, 8, axis=1) # choose x1,x2 when x>0 def positive_case_x(): final_x1 = bottom_left_x final_x2 = top_right_x return final_x1, final_x2 # choose x1,x2 when x<0 def negative_case_x(): final_x1 = top_left_x final_x2 = bottom_right_x return final_x1, final_x2 if x is not None: final_x1, final_x2 = tf.cond( tf.less(x, 0), negative_case_x, positive_case_x ) else: final_x1, final_x2 = top_left_x, bottom_right_x # choose y1,y2 when y > 0 def positive_case_y(): final_y1 = top_right_y final_y2 = bottom_left_y return final_y1, final_y2 # choose y1,y2 when y < 0 def negative_case_y(): final_y1 = top_left_y final_y2 = bottom_right_y return final_y1, final_y2 if y is not None: final_y1, final_y2 = tf.cond( tf.less(y, 0), negative_case_y, positive_case_y ) else: final_y1, final_y2 = top_left_y, bottom_right_y return tf.concat( [final_x1, final_y1, final_x2, final_y2], axis=1, ) @staticmethod def _apply_horizontal_transformation_to_bounding_box( extended_bounding_boxes, x ): # create transformation matrix [1,4] matrix = tf.stack([1.0, -x, 0, 1.0], axis=0) # reshape it to [2,2] matrix = tf.reshape(matrix, (2, 2)) # reshape unnormalized bboxes from [N,8] -> [N*4,2] new_bboxes = tf.reshape(extended_bounding_boxes, (-1, 2)) # [[1,x`],[y`,1]]*[x,y]->[new_x,new_y] transformed_bboxes = tf.reshape( tf.einsum("ij,kj->ki", matrix, new_bboxes), (-1, 8) ) return transformed_bboxes @staticmethod def _apply_vertical_transformation_to_bounding_box( extended_bounding_boxes, y ): # create transformation matrix [1,4] matrix = tf.stack([1.0, 0, -y, 1.0], axis=0) # reshape it to [2,2] matrix = tf.reshape(matrix, (2, 2)) # reshape unnormalized bboxes from [N,8] -> [N*4,2] new_bboxes = tf.reshape(extended_bounding_boxes, (-1, 2)) # [[1,x`],[y`,1]]*[x,y]->[new_x,new_y] transformed_bboxes = tf.reshape( tf.einsum("ij,kj->ki", matrix, new_bboxes), (-1, 8) ) return transformed_bboxes @staticmethod def _convert_to_extended_corners_format(boxes): """splits corner boxes top left,bottom right to 4 corners top left, bottom right,top right and bottom left""" x1, y1, x2, y2 = tf.split(boxes, [1, 1, 1, 1], axis=-1) new_boxes = tf.concat( [x1, y1, x2, y2, x2, y1, x1, y2], axis=-1, ) return new_boxes # End copy class RandomShearTest(tf.test.TestCase): def test_consistency_with_old_implementation(self): # Prepare inputs batch_size = 2 images = tf.random.uniform(shape=(batch_size, 64, 64, 3)) shear_x = tf.random.uniform(shape=()) shear_y = tf.random.uniform(shape=()) bounding_boxes = { "boxes": tf.constant( [ [[10.0, 20.0, 40.0, 50.0], [12.0, 22.0, 42.0, 54.0]], [[15.0, 16.0, 17, 18], [12.0, 22.0, 42.0, 54.0]], ], dtype=tf.float32, ), "classes": tf.constant([[0, 0], [0, 0]], dtype=tf.float32), } # Build layers old_layer = OldRandomShear( x_factor=(shear_x, shear_x), y_factor=(shear_y, shear_y), seed=1234, bounding_box_format="xyxy", ) new_layer = RandomShear( x_factor=(shear_x, shear_x), y_factor=(shear_y, shear_y), seed=1234, bounding_box_format="xyxy", ) # Disable random negation to get deterministic factor old_layer.get_random_transformation = MagicMock( return_value=( old_layer.x_factor(), old_layer.y_factor(), ) ) new_layer.get_random_transformation_batch = MagicMock( return_value={ "shear_x": new_layer.x_factor((batch_size, 1)), "shear_y": new_layer.y_factor((batch_size, 1)), } ) # Run inference + compare outputs: old_output = old_layer( {"images": images, "bounding_boxes": bounding_boxes} ) output = new_layer({"images": images, "bounding_boxes": bounding_boxes}) self.assertAllClose(output["images"], old_output["images"]) self.assertAllClose( output["bounding_boxes"]["boxes"].to_tensor(), old_output["bounding_boxes"]["boxes"].to_tensor(), ) self.assertAllClose( output["bounding_boxes"]["classes"], old_output["bounding_boxes"]["classes"], ) if __name__ == "__main__": # Run benchmark (x_train, _), _ = tf.keras.datasets.cifar10.load_data() x_train = x_train.astype(np.float32) num_images = [1000, 2000, 5000, 10000] results = {} aug_candidates = [RandomShear, OldRandomShear] aug_args = {"x_factor": (5, 5), "y_factor": (5, 5)} for aug in aug_candidates: # Eager Mode c = aug.__name__ layer = aug(**aug_args) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup layer(x_train[:n_images]) t0 = time.time() r1 = layer(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}") results[c] = runtimes # Graph Mode c = aug.__name__ + " Graph Mode" layer = aug(**aug_args) @tf.function() def apply_aug(inputs): return layer(inputs) runtimes = [] print(f"Timing {c}") for n_images in num_images: # warmup apply_aug(x_train[:n_images]) t0 = time.time() r1 = apply_aug(x_train[:n_images]) t1 = time.time() runtimes.append(t1 - t0) print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}") results[c] = runtimes # Not running with XLA as it does not support ImageProjectiveTransformV3 plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison.png") # So we can actually see more relevant margins del results[aug_candidates[1].__name__] plt.figure() for key in results: plt.plot(num_images, results[key], label=key) plt.xlabel("Number images") plt.ylabel("Runtime (seconds)") plt.legend() plt.savefig("comparison_no_old_eager.png") # Run unit tests tf.test.main()
keras-cv/benchmarks/vectorized_random_shear.py/0
{ "file_path": "keras-cv/benchmarks/vectorized_random_shear.py", "repo_id": "keras-cv", "token_count": 7923 }
62
# ImageNet Classification Training TODO(ianjjohnson): Write a README describing this directory.
keras-cv/examples/training/classification/imagenet/README.md/0
{ "file_path": "keras-cv/examples/training/classification/imagenet/README.md", "repo_id": "keras-cv", "token_count": 25 }
63
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def detect_if_tensorflow_uses_keras_3(): # We follow the version of keras that tensorflow is configured to use. try: from tensorflow import keras # Note that only recent versions of keras have a `version()` function. if hasattr(keras, "version") and keras.version().startswith("3."): return True except: raise ValueError( "Unable to import `keras` with `tensorflow`. Please check your " "Keras and Tensorflow version are compatible; Keras 3 requires " "TensorFlow 2.15 or later. See keras.io/getting_started for more " "information on installing Keras." ) # No `keras.version()` means we are on an old version of keras. return False _USE_KERAS_3 = detect_if_tensorflow_uses_keras_3() def keras_3(): """Check if Keras 3 is being used.""" return _USE_KERAS_3 def backend(): """Check the backend framework.""" if not keras_3(): return "tensorflow" import keras return keras.config.backend()
keras-cv/keras_cv/backend/config.py/0
{ "file_path": "keras-cv/keras_cv/backend/config.py", "repo_id": "keras-cv", "token_count": 574 }
64
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import tensorflow as tf from keras_cv import bounding_box from keras_cv.backend import ops from keras_cv.backend import random from keras_cv.tests.test_case import TestCase class MaskInvalidDetectionsTest(TestCase): def test_correctly_masks_based_on_max_dets(self): bounding_boxes = { "boxes": random.uniform((4, 100, 4)), "num_detections": ops.array([2, 3, 4, 2]), "classes": random.uniform((4, 100)), } result = bounding_box.mask_invalid_detections(bounding_boxes) negative_one_boxes = result["boxes"][:, 5:, :] self.assertAllClose( negative_one_boxes, -np.ones_like(ops.convert_to_numpy(negative_one_boxes)), ) preserved_boxes = result["boxes"][:, :2, :] self.assertAllClose(preserved_boxes, bounding_boxes["boxes"][:, :2, :]) boxes_from_image_3 = result["boxes"][2, :4, :] self.assertAllClose( boxes_from_image_3, bounding_boxes["boxes"][2, :4, :] ) @pytest.mark.tf_keras_only def test_ragged_outputs(self): bounding_boxes = { "boxes": np.stack( [ np.random.uniform(size=(10, 4)), np.random.uniform(size=(10, 4)), ] ), "num_detections": np.array([2, 3]), "classes": np.stack( [np.random.uniform(size=(10,)), np.random.uniform(size=(10,))] ), } result = bounding_box.mask_invalid_detections( bounding_boxes, output_ragged=True ) self.assertTrue(isinstance(result["boxes"], tf.RaggedTensor)) self.assertEqual(result["boxes"][0].shape[0], 2) self.assertEqual(result["boxes"][1].shape[0], 3) @pytest.mark.tf_keras_only def test_correctly_masks_confidence(self): bounding_boxes = { "boxes": np.stack( [ np.random.uniform(size=(10, 4)), np.random.uniform(size=(10, 4)), ] ), "confidence": np.random.uniform(size=(2, 10)), "num_detections": np.array([2, 3]), "classes": np.stack( [np.random.uniform(size=(10,)), np.random.uniform(size=(10,))] ), } result = bounding_box.mask_invalid_detections( bounding_boxes, output_ragged=True ) self.assertTrue(isinstance(result["boxes"], tf.RaggedTensor)) self.assertEqual(result["boxes"][0].shape[0], 2) self.assertEqual(result["boxes"][1].shape[0], 3) self.assertEqual(result["confidence"][0].shape[0], 2) self.assertEqual(result["confidence"][1].shape[0], 3)
keras-cv/keras_cv/bounding_box/mask_invalid_detections_test.py/0
{ "file_path": "keras-cv/keras_cv/bounding_box/mask_invalid_detections_test.py", "repo_id": "keras-cv", "token_count": 1562 }
65
/* Copyright 2022 The KerasCV Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" using namespace tensorflow; REGISTER_OP("KcvPairwiseIou3D") .Input("boxes_a: float") .Input("boxes_b: float") .Output("iou: float") .SetShapeFn([](tensorflow::shape_inference::InferenceContext* c) { c->set_output( 0, c->MakeShape({c->Dim(c->input(0), 0), c->Dim(c->input(1), 0)})); return tensorflow::Status(); }) .Doc(R"doc( Calculate pairwise IoUs between two set of 3D bboxes. Every bbox is represented as [center_x, center_y, center_z, dim_x, dim_y, dim_z, heading]. boxes_a: A tensor of shape [num_boxes_a, 7] boxes_b: A tensor of shape [num_boxes_b, 7] )doc");
keras-cv/keras_cv/custom_ops/ops/pairwise_iou_op.cc/0
{ "file_path": "keras-cv/keras_cv/custom_ops/ops/pairwise_iou_op.cc", "repo_id": "keras-cv", "token_count": 454 }
66
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from keras_cv.tests.test_case import TestCase try: from keras_cv.datasets.waymo import load except ImportError: # Waymo Open Dataset dependency may be missing, in which case we expect # these tests will be skipped based on the TEST_WAYMO_DEPS environment var. pass class WaymoOpenDatasetLoadTest(TestCase): def setUp(self): super().setUp() self.test_data_path = os.path.abspath( os.path.join(os.path.abspath(__file__), os.path.pardir, "test_data") ) self.test_data_file = "wod_one_frame.tfrecord" @pytest.mark.skipif( "TEST_WAYMO_DEPS" not in os.environ or os.environ["TEST_WAYMO_DEPS"] != "true", reason="Requires Waymo Open Dataset package", ) def test_load_from_directory(self): dataset = load(self.test_data_path) # Extract records into a list dataset = [record for record in dataset] self.assertEquals(len(dataset), 1) self.assertNotEqual(dataset[0]["timestamp_micros"], 0) @pytest.mark.skipif( "TEST_WAYMO_DEPS" not in os.environ or os.environ["TEST_WAYMO_DEPS"] != "true", reason="Requires Waymo Open Dataset package", ) def test_load_from_files(self): dataset = load([os.path.join(self.test_data_path, self.test_data_file)]) # Extract records into a list dataset = [record for record in dataset] self.assertEquals(len(dataset), 1) self.assertNotEqual(dataset[0]["timestamp_micros"], 0)
keras-cv/keras_cv/datasets/waymo/load_test.py/0
{ "file_path": "keras-cv/keras_cv/datasets/waymo/load_test.py", "repo_id": "keras-cv", "token_count": 818 }
67
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras BN_AXIS = 3 CONV_KERNEL_INITIALIZER = { "class_name": "VarianceScaling", "config": { "scale": 2.0, "mode": "fan_out", "distribution": "truncated_normal", }, } @keras_cv_export("keras_cv.layers.FusedMBConvBlock") class FusedMBConvBlock(keras.layers.Layer): """ Implementation of the FusedMBConv block (Fused Mobile Inverted Residual Bottleneck) from: [EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML](https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html) [EfficientNetV2: Smaller Models and Faster Training](https://arxiv.org/abs/2104.00298v3). FusedMBConv blocks are based on MBConv blocks, and replace the depthwise and 1x1 output convolution blocks with a single 3x3 convolution block, fusing them together - hence the name "FusedMBConv". Alongside MBConv blocks, they can be used in mobile-oriented and efficient architectures, and are present in architectures EfficientNet. FusedMBConv blocks follow a narrow-wide-narrow structure - expanding a 1x1 convolution, performing Squeeze-Excitation and then applying a 3x3 convolution, which is a more efficient operation than conventional wide-narrow-wide structures. As they're frequently used for models to be deployed to edge devices, they're implemented as a layer for ease of use and re-use. Args: input_filters: int, the number of input filters output_filters: int, the number of output filters expand_ratio: default 1, the ratio by which input_filters are multiplied to expand the structure in the middle expansion phase kernel_size: default 3, the kernel_size to apply to the expansion phase convolutions strides: default 1, the strides to apply to the expansion phase convolutions se_ratio: default 0.0, The filters used in the Squeeze-Excitation phase, and are chosen as the maximum between 1 and input_filters*se_ratio bn_momentum: default 0.9, the BatchNormalization momentum activation: default "swish", the activation function used between convolution operations survival_probability: float, the optional dropout rate to apply before the output convolution, defaults to 0.8 Returns: A `tf.Tensor` representing a feature map, passed through the FusedMBConv block Example usage: ``` inputs = tf.random.normal(shape=(1, 64, 64, 32), dtype=tf.float32) layer = keras_cv.layers.FusedMBConvBlock( input_filters=32, output_filters=32 ) output = layer(inputs) output.shape # TensorShape([1, 224, 224, 48]) ``` """ # noqa: E501 def __init__( self, input_filters: int, output_filters: int, expand_ratio=1, kernel_size=3, strides=1, se_ratio=0.0, bn_momentum=0.9, activation="swish", survival_probability: float = 0.8, **kwargs ): super().__init__(**kwargs) self.input_filters = input_filters self.output_filters = output_filters self.expand_ratio = expand_ratio self.kernel_size = kernel_size self.strides = strides self.se_ratio = se_ratio self.bn_momentum = bn_momentum self.activation = activation self.survival_probability = survival_probability self.filters = self.input_filters * self.expand_ratio self.filters_se = max(1, int(input_filters * se_ratio)) self.conv1 = keras.layers.Conv2D( filters=self.filters, kernel_size=kernel_size, strides=strides, kernel_initializer=CONV_KERNEL_INITIALIZER, padding="same", data_format="channels_last", use_bias=False, name=self.name + "expand_conv", ) self.bn1 = keras.layers.BatchNormalization( axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "expand_bn", ) self.act = keras.layers.Activation( self.activation, name=self.name + "expand_activation" ) self.bn2 = keras.layers.BatchNormalization( axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "bn" ) self.se_conv1 = keras.layers.Conv2D( self.filters_se, 1, padding="same", activation=self.activation, kernel_initializer=CONV_KERNEL_INITIALIZER, name=self.name + "se_reduce", ) self.se_conv2 = keras.layers.Conv2D( self.filters, 1, padding="same", activation="sigmoid", kernel_initializer=CONV_KERNEL_INITIALIZER, name=self.name + "se_expand", ) self.output_conv = keras.layers.Conv2D( filters=self.output_filters, kernel_size=1 if expand_ratio != 1 else kernel_size, strides=1, kernel_initializer=CONV_KERNEL_INITIALIZER, padding="same", data_format="channels_last", use_bias=False, name=self.name + "project_conv", ) self.bn3 = keras.layers.BatchNormalization( axis=BN_AXIS, momentum=self.bn_momentum, name=self.name + "project_bn", ) if self.survival_probability: self.dropout = keras.layers.Dropout( self.survival_probability, noise_shape=(None, 1, 1, 1), name=self.name + "drop", ) def build(self, input_shape): if self.name is None: self.name = keras.backend.get_uid("block0") def call(self, inputs): # Expansion phase if self.expand_ratio != 1: x = self.conv1(inputs) x = self.bn1(x) x = self.act(x) else: x = inputs # Squeeze and excite if 0 < self.se_ratio <= 1: se = keras.layers.GlobalAveragePooling2D( name=self.name + "se_squeeze" )(x) if BN_AXIS == 1: se_shape = (self.filters, 1, 1) else: se_shape = (1, 1, self.filters) se = keras.layers.Reshape(se_shape, name=self.name + "se_reshape")( se ) se = self.se_conv1(se) se = self.se_conv2(se) x = keras.layers.multiply([x, se], name=self.name + "se_excite") # Output phase: x = self.output_conv(x) x = self.bn3(x) if self.expand_ratio == 1: x = self.act(x) # Residual: if self.strides == 1 and self.input_filters == self.output_filters: if self.survival_probability: x = self.dropout(x) x = keras.layers.Add(name=self.name + "add")([x, inputs]) return x def get_config(self): config = { "input_filters": self.input_filters, "output_filters": self.output_filters, "expand_ratio": self.expand_ratio, "kernel_size": self.kernel_size, "strides": self.strides, "se_ratio": self.se_ratio, "bn_momentum": self.bn_momentum, "activation": self.activation, "survival_probability": self.survival_probability, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/fusedmbconv.py/0
{ "file_path": "keras-cv/keras_cv/layers/fusedmbconv.py", "repo_id": "keras-cv", "token_count": 3862 }
68
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import tensorflow as tf from keras_cv.layers.object_detection.roi_generator import ROIGenerator from keras_cv.tests.test_case import TestCase @pytest.mark.tf_keras_only class ROIGeneratorTest(TestCase): def test_single_tensor(self): roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.96) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], ] ) expected_rois = tf.gather(rpn_boxes, [[1, 3, 2]], batch_dims=1) expected_rois = tf.concat([expected_rois, tf.zeros([1, 1, 4])], axis=1) rpn_scores = tf.constant( [ [0.6, 0.9, 0.2, 0.3], ] ) # selecting the 1st, then 3rd, then 2nd as they don't overlap # 0th box overlaps with 1st box expected_roi_scores = tf.gather(rpn_scores, [[1, 3, 2]], batch_dims=1) expected_roi_scores = tf.concat( [expected_roi_scores, tf.zeros([1, 1])], axis=1 ) rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_single_level_single_batch_roi_ignore_box(self): roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.96) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], ] ) expected_rois = tf.gather(rpn_boxes, [[1, 3, 2]], batch_dims=1) expected_rois = tf.concat([expected_rois, tf.zeros([1, 1, 4])], axis=1) rpn_boxes = {2: rpn_boxes} rpn_scores = tf.constant( [ [0.6, 0.9, 0.2, 0.3], ] ) # selecting the 1st, then 3rd, then 2nd as they don't overlap # 0th box overlaps with 1st box expected_roi_scores = tf.gather(rpn_scores, [[1, 3, 2]], batch_dims=1) expected_roi_scores = tf.concat( [expected_roi_scores, tf.zeros([1, 1])], axis=1 ) rpn_scores = {2: rpn_scores} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_single_level_single_batch_roi_all_box(self): # for iou between 1st and 2nd box is 0.9604, so setting to 0.97 to # such that NMS would treat them as different ROIs roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.97) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], ] ) expected_rois = tf.gather(rpn_boxes, [[1, 0, 3, 2]], batch_dims=1) rpn_boxes = {2: rpn_boxes} rpn_scores = tf.constant( [ [0.6, 0.9, 0.2, 0.3], ] ) # selecting the 1st, then 0th, then 3rd, then 2nd as they don't overlap expected_roi_scores = tf.gather( rpn_scores, [[1, 0, 3, 2]], batch_dims=1 ) rpn_scores = {2: rpn_scores} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_single_level_propose_rois(self): roi_generator = ROIGenerator("xyxy") rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], [ [2, 2, 4, 4], [3, 3, 6, 6], [3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8], ], ] ) expected_rois = tf.gather( rpn_boxes, [[1, 3, 2], [1, 3, 0]], batch_dims=1 ) expected_rois = tf.concat([expected_rois, tf.zeros([2, 1, 4])], axis=1) rpn_boxes = {2: rpn_boxes} rpn_scores = tf.constant([[0.6, 0.9, 0.2, 0.3], [0.1, 0.8, 0.3, 0.5]]) # 1st batch -- selecting the 1st, then 3rd, then 2nd as they don't # overlap # 2nd batch -- selecting the 1st, then 3rd, then 0th as they don't # overlap expected_roi_scores = tf.gather( rpn_scores, [[1, 3, 2], [1, 3, 0]], batch_dims=1 ) expected_roi_scores = tf.concat( [expected_roi_scores, tf.zeros([2, 1])], axis=1 ) rpn_scores = {2: rpn_scores} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_two_level_single_batch_propose_rois_ignore_box(self): roi_generator = ROIGenerator("xyxy") rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], [ [2, 2, 4, 4], [3, 3, 6, 6], [3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8], ], ] ) expected_rois = tf.constant( [ [ [0.1, 0.1, 9.9, 9.9], [3, 3, 6, 6], [1, 1, 8, 8], [2, 2, 8, 8], [5, 5, 10, 10], [2, 2, 4, 4], [0, 0, 0, 0], [0, 0, 0, 0], ] ] ) rpn_boxes = {2: rpn_boxes[0:1], 3: rpn_boxes[1:2]} rpn_scores = tf.constant([[0.6, 0.9, 0.2, 0.3], [0.1, 0.8, 0.3, 0.5]]) # 1st batch -- selecting the 1st, then 3rd, then 2nd as they don't # overlap # 2nd batch -- selecting the 1st, then 3rd, then 0th as they don't # overlap expected_roi_scores = [ [ 0.9, 0.8, 0.5, 0.3, 0.2, 0.1, 0.0, 0.0, ] ] rpn_scores = {2: rpn_scores[0:1], 3: rpn_scores[1:2]} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores) def test_two_level_single_batch_propose_rois_all_box(self): roi_generator = ROIGenerator("xyxy", nms_iou_threshold_train=0.99) rpn_boxes = tf.constant( [ [ [0, 0, 10, 10], [0.1, 0.1, 9.9, 9.9], [5, 5, 10, 10], [2, 2, 8, 8], ], [ [2, 2, 4, 4], [3, 3, 6, 6], [3.1, 3.1, 6.1, 6.1], [1, 1, 8, 8], ], ] ) expected_rois = tf.constant( [ [ [0.1, 0.1, 9.9, 9.9], [3, 3, 6, 6], [0, 0, 10, 10], [1, 1, 8, 8], [2, 2, 8, 8], [3.1, 3.1, 6.1, 6.1], [5, 5, 10, 10], [2, 2, 4, 4], ] ] ) rpn_boxes = {2: rpn_boxes[0:1], 3: rpn_boxes[1:2]} rpn_scores = tf.constant([[0.6, 0.9, 0.2, 0.3], [0.1, 0.8, 0.3, 0.5]]) # 1st batch -- selecting the 1st, then 0th, then 3rd, then 2nd as they # don't overlap # 2nd batch -- selecting the 1st, then 3rd, then 2nd, then 0th as they # don't overlap expected_roi_scores = [ [ 0.9, 0.8, 0.6, 0.5, 0.3, 0.3, 0.2, 0.1, ] ] rpn_scores = {2: rpn_scores[0:1], 3: rpn_scores[1:2]} rois, roi_scores = roi_generator(rpn_boxes, rpn_scores, training=True) self.assertAllClose(expected_rois, rois) self.assertAllClose(expected_roi_scores, roi_scores)
keras-cv/keras_cv/layers/object_detection/roi_generator_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection/roi_generator_test.py", "repo_id": "keras-cv", "token_count": 5746 }
69
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.backend import keras from keras_cv.layers.object_detection_3d.voxelization import DynamicVoxelization from keras_cv.tests.test_case import TestCase class VoxelizationTest(TestCase): def test_voxelization_output_shape_no_z(self): layer = DynamicVoxelization( voxel_size=[0.1, 0.1, 1000], spatial_size=[-20, 20, -20, 20, -20, 20], ) point_xyz = tf.random.uniform( shape=[1, 1000, 3], minval=-5, maxval=5, dtype=tf.float32 ) point_feature = tf.random.uniform( shape=[1, 1000, 4], minval=-10, maxval=10, dtype=tf.float32 ) point_mask = tf.cast( tf.random.uniform( shape=[1, 1000], minval=0, maxval=2, dtype=tf.int32 ), tf.bool, ) output = layer(point_xyz, point_feature, point_mask) # (20 - (-20)) / 0.1 = 400, (20 - (-20) ) / 1000 = 0.4 # the last dimension is replaced with MLP dimension, z dimension is # skipped self.assertEqual(output.shape, (1, 400, 400, 128)) def test_voxelization_output_shape_with_z(self): layer = DynamicVoxelization( voxel_size=[0.1, 0.1, 1], spatial_size=[-20, 20, -20, 20, -15, 15], ) point_xyz = tf.random.uniform( shape=[1, 1000, 3], minval=-5, maxval=5, dtype=tf.float32 ) point_feature = tf.random.uniform( shape=[1, 1000, 4], minval=-10, maxval=10, dtype=tf.float32 ) point_mask = tf.cast( tf.random.uniform( shape=[1, 1000], minval=0, maxval=2, dtype=tf.int32 ), tf.bool, ) output = layer(point_xyz, point_feature, point_mask) # (20 - (-20)) / 0.1 = 400, (20 - (-20) ) / 1000 = 0.4 # (15 - (-15)) / 1 = 30 # the last dimension is replaced with MLP dimension, z dimension is # skipped self.assertEqual(output.shape, (1, 400, 400, 30, 128)) def test_voxelization_numerical(self): layer = DynamicVoxelization( voxel_size=[1.0, 1.0, 10.0], spatial_size=[-5, 5, -5, 5, -2, 2], ) # Make the point net a no-op to allow us to verify the voxelization. layer.point_net_dense = keras.layers.Identity() # TODO(ianstenbit): use Identity here once it supports masking layer.point_net_norm = keras.layers.Lambda(lambda x: x) layer.point_net_activation = keras.layers.Identity() point_xyz = tf.constant( [ [ [-4.9, -4.9, 0.0], [4.4, 4.4, 0.0], ] ] ) point_feature = tf.constant( [ [ [1.0], [2.0], ] ] ) point_mask = tf.constant([True], shape=[1, 2]) output = layer(point_xyz, point_feature, point_mask) # [-4.9, -4.9, 0] will the mapped to the upper leftmost voxel, # the first element is point feature, # the second / third element is -4.9 - (-5) = 0.1 self.assertAllClose(output[0][0][0], [1.0, 0.1, 0.1, 0]) # [4.4, 4.4, 0] will the mapped to the lower rightmost voxel, # the first element is point feature # the second / third element is 4.4 - 4 = 0.4, because the # voxel range is [-5, 4] for 10 voxels. self.assertAllClose(output[0][-1][-1], [2.0, 0.4, 0.4, 0])
keras-cv/keras_cv/layers/object_detection_3d/voxelization_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxelization_test.py", "repo_id": "keras-cv", "token_count": 2012 }
70
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.base_image_augmentation_layer import ( BaseImageAugmentationLayer, ) @keras_cv_export("keras_cv.layers.FourierMix") class FourierMix(BaseImageAugmentationLayer): """FourierMix implements the FMix data augmentation technique. Args: alpha: Float value for beta distribution. Inverse scale parameter for the gamma distribution. This controls the shape of the distribution from which the smoothing values are sampled. Defaults to 0.5, which is a recommended value in the paper. decay_power: A float value representing the decay power, defaults to 3, as recommended in the paper. seed: Integer. Used to create a random seed. References: - [FMix paper](https://arxiv.org/abs/2002.12047). Sample usage: ```python (images, labels), _ = keras.datasets.cifar10.load_data() fourier_mix = keras_cv.layers.preprocessing.FourierMix(0.5) augmented_images, updated_labels = fourier_mix( {'images': images, 'labels': labels} ) # output == {'images': updated_images, 'labels': updated_labels} ``` """ def __init__(self, alpha=0.5, decay_power=3, seed=None, **kwargs): super().__init__(seed=seed, **kwargs) self.alpha = alpha self.decay_power = decay_power self.seed = seed def _sample_from_beta(self, alpha, beta, shape): sample_alpha = tf.random.gamma( shape, alpha=alpha, ) sample_beta = tf.random.gamma( shape, alpha=beta, ) return sample_alpha / (sample_alpha + sample_beta) @staticmethod def _fftfreq(signal_size, sample_spacing=1): """This function returns the sample frequencies of a discrete fourier transform. The result array contains the frequency bin centers starting at 0 using the sample spacing. """ results = tf.concat( [ tf.range((signal_size - 1) / 2 + 1, dtype=tf.int32), tf.range(-(signal_size // 2), 0, dtype=tf.int32), ], 0, ) return results / (signal_size * sample_spacing) def _apply_fftfreq(self, h, w): # Applying the fourier transform across 2 dimensions (height and width). fx = FourierMix._fftfreq(w)[: w // 2 + 1 + w % 2] fy = FourierMix._fftfreq(h) fy = tf.expand_dims(fy, -1) return tf.math.sqrt(fx * fx + fy * fy) def _get_spectrum(self, freqs, decay_power, channel, h, w): # Function to apply a low pass filter by decaying its high frequency # components. scale = tf.ones(1) / tf.cast( tf.math.maximum( freqs, tf.convert_to_tensor([1 / tf.reduce_max([w, h])]) ) ** decay_power, tf.float32, ) param_size = tf.concat( [tf.constant([channel]), tf.shape(freqs), tf.constant([2])], 0 ) param = self._random_generator.normal(param_size) scale = tf.expand_dims(scale, -1)[None, :] return scale * param def _sample_mask_from_transform(self, decay, shape, ch=1): # Sampling low frequency map from fourier transform. freqs = self._apply_fftfreq(shape[0], shape[1]) spectrum = self._get_spectrum(freqs, decay, ch, shape[0], shape[1]) spectrum = tf.complex(spectrum[:, 0], spectrum[:, 1]) mask = tf.math.real(tf.signal.irfft2d(spectrum, shape)) mask = mask[:1, : shape[0], : shape[1]] mask = mask - tf.reduce_min(mask) mask = mask / tf.reduce_max(mask) return mask def _binarise_mask(self, mask, lam, in_shape): # Create the final mask from the sampled values. idx = tf.argsort(tf.reshape(mask, [-1]), direction="DESCENDING") mask = tf.reshape(mask, [-1]) num = tf.cast( tf.math.round(lam * tf.cast(tf.size(mask), tf.float32)), tf.int32 ) updates = tf.concat( [ tf.ones((num,), tf.float32), tf.zeros((tf.size(mask) - num,), tf.float32), ], 0, ) mask = tf.scatter_nd( tf.expand_dims(idx, -1), updates, tf.expand_dims(tf.size(mask), -1) ) mask = tf.reshape(mask, in_shape) return mask def _batch_augment(self, inputs): images = inputs.get("images", None) labels = inputs.get("labels", None) segmentation_masks = inputs.get("segmentation_masks", None) if images is None or (labels is None and segmentation_masks is None): raise ValueError( "FourierMix expects inputs in a dictionary with format " '{"images": images, "labels": labels}.' '{"images": images, "segmentation_masks": segmentation_masks}.' f"Got: inputs = {inputs}" ) images, masks, lambda_sample, permutation_order = self._fourier_mix( images ) if labels is not None: labels = self._update_labels( labels, lambda_sample, permutation_order ) inputs["labels"] = labels if segmentation_masks is not None: segmentation_masks = self._update_segmentation_masks( segmentation_masks, masks, permutation_order ) inputs["segmentation_masks"] = segmentation_masks inputs["images"] = images return inputs def _augment(self, inputs): raise ValueError( "FourierMix received a single image to `call`. The layer relies on " "combining multiple examples, and as such will not behave as " "expected. Please call the layer with 2 or more samples." ) def _fourier_mix(self, images): shape = tf.shape(images) permutation_order = tf.random.shuffle( tf.range(0, shape[0]), seed=self.seed ) lambda_sample = self._sample_from_beta( self.alpha, self.alpha, (shape[0],) ) # generate masks utilizing mapped calls masks = tf.map_fn( lambda x: self._sample_mask_from_transform( self.decay_power, shape[1:-1] ), tf.range(shape[0], dtype=tf.float32), ) # binarise masks utilizing mapped calls masks = tf.map_fn( lambda i: self._binarise_mask( masks[i], lambda_sample[i], shape[1:-1] ), tf.range(shape[0], dtype=tf.int32), fn_output_signature=tf.float32, ) masks = tf.expand_dims(masks, -1) fmix_images = tf.gather(images, permutation_order) images = masks * images + (1.0 - masks) * fmix_images return images, masks, lambda_sample, permutation_order def _update_labels(self, labels, lambda_sample, permutation_order): labels_for_fmix = tf.gather(labels, permutation_order) # for broadcasting batch_size = tf.expand_dims(tf.shape(labels)[0], -1) labels_rank = tf.rank(labels) broadcast_shape = tf.concat( [batch_size, tf.ones(labels_rank - 1, tf.int32)], 0 ) lambda_sample = tf.reshape(lambda_sample, broadcast_shape) labels = ( lambda_sample * labels + (1.0 - lambda_sample) * labels_for_fmix ) return labels def _update_segmentation_masks( self, segmentation_masks, masks, permutation_order ): fmix_segmentation_masks = tf.gather( segmentation_masks, permutation_order ) segmentation_masks = ( masks * segmentation_masks + (1.0 - masks) * fmix_segmentation_masks ) return segmentation_masks def get_config(self): config = { "alpha": self.alpha, "decay_power": self.decay_power, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/preprocessing/fourier_mix.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/fourier_mix.py", "repo_id": "keras-cv", "token_count": 3974 }
71
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from absl.testing import parameterized from keras_cv import layers from keras_cv.backend import ops from keras_cv.tests.test_case import TestCase class RandAugmentTest(TestCase): def test_zero_rate_pass_through(self): rand_augment = layers.RandAugment( value_range=(0, 255), rate=0.0, ) xs = np.ones((2, 512, 512, 3)) ys = rand_augment(xs) self.assertAllClose(ys, xs) @parameterized.named_parameters( ("0", 0), ("20", 0.2), ("55", 0.55), ("10", 1.0), ) def test_runs_with_magnitude(self, magnitude): rand_augment = layers.RandAugment( value_range=(0, 255), rate=0.5, magnitude=magnitude ) xs = np.ones((2, 512, 512, 3)) ys = rand_augment(xs) self.assertEqual(ys.shape, (2, 512, 512, 3)) @parameterized.named_parameters( ("0_255", 0, 255), ("neg_1_1", -1, 1), ("0_1", 0, 1), ) def test_runs_with_value_range(self, low, high): rand_augment = layers.RandAugment( augmentations_per_image=3, magnitude=0.5, rate=1.0, value_range=(low, high), ) xs = tf.random.uniform((2, 512, 512, 3), low, high, dtype=tf.float32) ys = ops.convert_to_numpy(rand_augment(xs)) self.assertTrue(np.all(np.logical_and(ys >= low, ys <= high))) @parameterized.named_parameters( ("float32", "float32"), ("int32", "int32"), ("uint8", "uint8"), ) def test_runs_with_dtype_input(self, dtype): rand_augment = layers.RandAugment(value_range=(0, 255)) xs = np.ones((2, 512, 512, 3), dtype=dtype) ys = rand_augment(xs) self.assertEqual(ys.shape, (2, 512, 512, 3)) @parameterized.named_parameters( ("0_255", 0, 255), ("neg1_1", -1, 1), ("0_1", 0, 1), ) def test_standard_policy_respects_value_range(self, lower, upper): my_layers = layers.RandAugment.get_standard_policy( value_range=(lower, upper), magnitude=1.0, magnitude_stddev=0.2 ) rand_augment = layers.RandomAugmentationPipeline( layers=my_layers, augmentations_per_image=3 ) xs = tf.random.uniform((2, 512, 512, 3), lower, upper, dtype=tf.float32) ys = ops.convert_to_numpy(rand_augment(xs)) self.assertLessEqual(np.max(ys), upper) self.assertGreaterEqual(np.min(ys), lower) def test_runs_unbatched(self): rand_augment = layers.RandAugment( augmentations_per_image=3, magnitude=0.5, rate=1.0, value_range=(0, 255), ) xs = tf.random.uniform((512, 512, 3), 0, 255, dtype=tf.float32) ys = rand_augment(xs) self.assertEqual(xs.shape, ys.shape) def test_runs_no_geo(self): rand_augment = layers.RandAugment( augmentations_per_image=2, magnitude=0.5, rate=1.0, geometric=False, value_range=(0, 255), ) self.assertFalse( any( [ isinstance(x, layers.RandomTranslation) for x in rand_augment.layers ] ) ) self.assertFalse( any( [isinstance(x, layers.RandomShear) for x in rand_augment.layers] ) )
keras-cv/keras_cv/layers/preprocessing/rand_augment_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/rand_augment_test.py", "repo_id": "keras-cv", "token_count": 1973 }
72
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import tensorflow as tf from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase class RandomColorJitterTest(TestCase): # Test 1: Check input and output shape. It should match. def test_return_shapes(self): batch_input = np.ones((2, 512, 512, 3)) non_square_batch_input = np.ones((2, 1024, 512, 3)) unbatch_input = np.ones((512, 512, 3)) layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=0.5, contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=0.5, ) batch_output = layer(batch_input, training=True) non_square_batch_output = layer(non_square_batch_input, training=True) unbatch_output = layer(unbatch_input, training=True) self.assertEqual(batch_output.shape, (2, 512, 512, 3)) self.assertEqual(non_square_batch_output.shape, (2, 1024, 512, 3)) self.assertEqual(unbatch_output.shape, (512, 512, 3)) # Test 2: Check if the factor ranges are set properly. def test_factor_range(self): layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=(-0.2, 0.5), contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=(0.5, 0.9), ) self.assertEqual(layer.brightness_factor, (-0.2, 0.5)) self.assertEqual(layer.contrast_factor, (0.5, 0.9)) self.assertEqual(layer.saturation_factor, (0.5, 0.9)) self.assertEqual(layer.hue_factor, (0.5, 0.9)) # Test 3: Test if it is OK to run on graph mode. @pytest.mark.tf_only def test_in_tf_function(self): inputs = np.ones((2, 512, 512, 3)) layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=0.5, contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=0.5, ) @tf.function def augment(x): return layer(x, training=True) outputs = augment(inputs) self.assertNotAllClose(inputs, outputs) # Test 4: Check if get_config and from_config work as expected. def test_config(self): layer = preprocessing.RandomColorJitter( value_range=(0, 255), brightness_factor=0.5, contrast_factor=(0.5, 0.9), saturation_factor=(0.5, 0.9), hue_factor=0.5, ) config = layer.get_config() self.assertEqual(config["brightness_factor"], 0.5) self.assertEqual(config["contrast_factor"], (0.5, 0.9)) self.assertEqual(config["saturation_factor"], (0.5, 0.9)) self.assertEqual(config["hue_factor"], 0.5) reconstructed_layer = preprocessing.RandomColorJitter.from_config( config ) self.assertEqual( reconstructed_layer.brightness_factor, layer.brightness_factor ) self.assertEqual( reconstructed_layer.contrast_factor, layer.contrast_factor ) self.assertEqual( reconstructed_layer.saturation_factor, layer.saturation_factor ) self.assertEqual(reconstructed_layer.hue_factor, layer.hue_factor)
keras-cv/keras_cv/layers/preprocessing/random_color_jitter_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_jitter_test.py", "repo_id": "keras-cv", "token_count": 1717 }
73
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase class RandomJpegQualityTest(TestCase): def test_return_shapes(self): layer = preprocessing.RandomJpegQuality(factor=[0, 100]) # RGB xs = np.ones((2, 512, 512, 3)) xs = layer(xs) self.assertEqual(xs.shape, (2, 512, 512, 3)) # greyscale xs = np.ones((2, 512, 512, 1)) xs = layer(xs) self.assertEqual(xs.shape, (2, 512, 512, 1)) def test_in_single_image(self): layer = preprocessing.RandomJpegQuality(factor=[0, 100]) # RGB xs = tf.cast( np.ones((512, 512, 3)), dtype="float32", ) xs = layer(xs) self.assertEqual(xs.shape, (512, 512, 3)) # greyscale xs = tf.cast( np.ones((512, 512, 1)), dtype="float32", ) xs = layer(xs) self.assertEqual(xs.shape, (512, 512, 1)) def test_non_square_images(self): layer = preprocessing.RandomJpegQuality(factor=[0, 100]) # RGB xs = np.ones((2, 256, 512, 3)) xs = layer(xs) self.assertEqual(xs.shape, (2, 256, 512, 3)) # greyscale xs = np.ones((2, 256, 512, 1)) xs = layer(xs) self.assertEqual(xs.shape, (2, 256, 512, 1))
keras-cv/keras_cv/layers/preprocessing/random_jpeg_quality_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_jpeg_quality_test.py", "repo_id": "keras-cv", "token_count": 855 }
74
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_cv.backend import ops from keras_cv.layers.preprocessing.rescaling import Rescaling from keras_cv.tests.test_case import TestCase class RescalingTest(TestCase): def test_rescaling_correctness_float(self): layer = Rescaling(scale=1.0 / 127.5, offset=-1.0) inputs = tf.random.uniform((2, 4, 5, 3)) outputs = layer(inputs) self.assertAllClose(outputs, inputs * (1.0 / 127.5) - 1) def test_rescaling_correctness_int(self): layer = Rescaling(scale=1.0 / 127.5, offset=-1) inputs = tf.random.uniform((2, 4, 5, 3), 0, 100, dtype="int32") outputs = layer(inputs) outputs = ops.convert_to_numpy(outputs) self.assertEqual(outputs.dtype.name, "float32") self.assertAllClose( outputs, ops.convert_to_numpy(inputs) * (1.0 / 127.5) - 1 ) def test_config_with_custom_name(self): layer = Rescaling(0.5, name="rescaling") config = layer.get_config() layer_1 = Rescaling.from_config(config) self.assertEqual(layer_1.name, layer.name) def test_unbatched_image(self): layer = Rescaling(scale=1.0 / 127.5, offset=-1) inputs = tf.random.uniform((4, 5, 3)) outputs = layer(inputs) self.assertAllClose(outputs, inputs * (1.0 / 127.5) - 1) def test_output_dtypes(self): inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64") layer = Rescaling(0.5) self.assertAllEqual( ops.convert_to_numpy(layer(inputs)).dtype.name, "float32" ) layer = Rescaling(0.5, dtype="uint8") self.assertAllEqual( ops.convert_to_numpy(layer(inputs)).dtype.name, "uint8" )
keras-cv/keras_cv/layers/preprocessing/rescaling_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/rescaling_test.py", "repo_id": "keras-cv", "token_count": 973 }
75
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import tensorflow as tf from keras_cv import point_cloud from keras_cv.api_export import keras_cv_export from keras_cv.backend import random from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX @keras_cv_export("keras_cv.layers.FrustumRandomDroppingPoints") class FrustumRandomDroppingPoints( base_augmentation_layer_3d.BaseAugmentationLayer3D ): """A preprocessing layer which randomly drops point within a randomly generated frustum during training. This layer will randomly select a point from the point cloud as the center of a frustum then generate a frustum based on r_distance, theta_width, and phi_width. Points inside the selected frustum are randomly dropped (setting all features to zero) based on drop_rate. The point_clouds tensor shape must be specific and cannot be dynamic. Input shape: point_clouds: 3D (multi frames) float32 Tensor with shape [num of frames, num of points, num of point features]. The first 5 features are [x, y, z, class, range]. bounding_boxes: 3D (multi frames) float32 Tensor with shape [num of frames, num of boxes, num of box features]. The first 7 features are [x, y, z, dx, dy, dz, phi]. Output shape: A dictionary of Tensors with the same shape as input Tensors. Arguments: r_distance: A float scalar sets the starting distance of a frustum. theta_width: A float scalar sets the theta width of a frustum. phi_width: A float scalar sets the phi width of a frustum. drop_rate: A float scalar sets the probability threshold for dropping the points. exclude_classes: An optional int scalar or a list of ints. Points with the specified class(es) will not be dropped. """ def __init__( self, r_distance, theta_width, phi_width, drop_rate=None, exclude_classes=None, **kwargs, ): super().__init__(**kwargs) if not isinstance(exclude_classes, (tuple, list)): exclude_classes = [exclude_classes] if r_distance < 0: raise ValueError( f"r_distance must be >=0, but got r_distance={r_distance}" ) if theta_width < 0: raise ValueError( f"theta_width must be >=0, but got theta_width={theta_width}" ) if phi_width < 0: raise ValueError( f"phi_width must be >=0, but got phi_width={phi_width}" ) drop_rate = drop_rate if drop_rate else 0.0 if drop_rate > 1: raise ValueError( f"drop_rate must be <=1, but got drop_rate={drop_rate}" ) self._r_distance = r_distance self._theta_width = theta_width self._phi_width = phi_width keep_probability = 1 - drop_rate self._keep_probability = keep_probability self._exclude_classes = exclude_classes def get_config(self): return { "r_distance": self._r_distance, "theta_width": self._theta_width, "phi_width": self._phi_width, "drop_rate": 1 - self._keep_probability, "exclude_classes": self._exclude_classes, } def get_random_transformation(self, point_clouds, **kwargs): # Randomly select a point from the first frame as the center of the # frustum. valid_points = point_clouds[0, :, POINTCLOUD_LABEL_INDEX] > 0 num_valid_points = tf.math.reduce_sum(tf.cast(valid_points, tf.int32)) randomly_select_point_index = tf.random.uniform( (), minval=0, maxval=num_valid_points, dtype=tf.int32 ) randomly_select_frustum_center = tf.boolean_mask( point_clouds[0], valid_points, axis=0 )[randomly_select_point_index, :POINTCLOUD_LABEL_INDEX] num_frames, num_points, _ = point_clouds.get_shape().as_list() frustum_mask = [] for f in range(num_frames): frustum_mask.append( point_cloud.within_a_frustum( point_clouds[f], randomly_select_frustum_center, self._r_distance, self._theta_width, self._phi_width, )[tf.newaxis, :, tf.newaxis] ) frustum_mask = tf.concat(frustum_mask, axis=0) # Generate mask along point dimension. random_point_mask = ( random.uniform( [1, num_points, 1], minval=0.0, maxval=1, seed=self._random_generator, ) < self._keep_probability ) # Do not drop points outside the frustum mask. random_point_mask = tf.where(~frustum_mask, True, random_point_mask) return {"point_mask": random_point_mask} def augment_point_clouds_bounding_boxes( self, point_clouds, bounding_boxes, transformation, **kwargs ): point_mask = transformation["point_mask"] # Do not drop points that are protected by setting the corresponding # point_mask = 1.0. protected_points = tf.zeros_like(point_clouds[0, :, -1], dtype=tf.bool) for excluded_class in self._exclude_classes: protected_points |= point_clouds[0, :, -1] == excluded_class point_mask = tf.where( protected_points[tf.newaxis, :, tf.newaxis], True, point_mask ) point_clouds = tf.where(point_mask, point_clouds, 0.0) return (point_clouds, bounding_boxes)
keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points.py", "repo_id": "keras-cv", "token_count": 2680 }
76
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.backend import random from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d from keras_cv.ops import iou_3d from keras_cv.point_cloud import is_within_any_box3d POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES @keras_cv_export("keras_cv.layers.RandomCopyPaste") class RandomCopyPaste(base_augmentation_layer_3d.BaseAugmentationLayer3D): """A preprocessing layer which randomly pastes object point clouds and bounding boxes during training. This layer will randomly paste object point clouds and bounding boxes. OBJECT_POINT_CLOUDS and OBJECT_BOUNDING_BOXES are generated by running group_points_by_bounding_boxes function on additional input frames. We use the first frame to check overlap between existing bounding boxes and pasted bounding boxes. If a to-be-pasted bounding box overlaps with an existing bounding box and object point clouds, we do not paste the additional bounding box. We load 5 times max_paste_bounding_boxes to check overlap. If a to-be-pasted bounding box overlaps with existing background point clouds, we paste the additional bounding box and replace the background point clouds with object point clouds. Input shape: point_clouds: 3D (multi frames) float32 Tensor with shape [num of frames, num of points, num of point features]. The first 5 features are [x, y, z, class, range]. bounding_boxes: 3D (multi frames) float32 Tensor with shape [num of frames, num of boxes, num of box features]. Boxes are expected to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py Output shape: A tuple of two Tensors (point_clouds, bounding_boxes) with the same shape as input Tensors. Arguments: label_index: An optional int scalar sets the target object index. Bounding boxes and corresponding point clouds with box class == label_index will be saved as OBJECT_BOUNDING_BOXES and OBJECT_POINT_CLOUDS. If label index is None, all valid bounding boxes (box class !=0) are used. min_paste_bounding_boxes: A int scalar sets the min number of pasted bounding boxes. max_paste_bounding_boxes: A int scalar sets the max number of pasted bounding boxes. """ def __init__( self, label_index=None, min_paste_bounding_boxes=0, max_paste_bounding_boxes=10, **kwargs ): super().__init__(**kwargs) if label_index and label_index < 0: raise ValueError("label_index must be >=0.") if min_paste_bounding_boxes < 0: raise ValueError("min_paste_bounding_boxes must be >=0.") if max_paste_bounding_boxes < 0: raise ValueError("max_paste_bounding_boxes must be >=0.") if max_paste_bounding_boxes < min_paste_bounding_boxes: raise ValueError( "max_paste_bounding_boxes must be >= min_paste_bounding_boxes." ) self._label_index = label_index self._min_paste_bounding_boxes = min_paste_bounding_boxes self._max_paste_bounding_boxes = max_paste_bounding_boxes def get_config(self): return { "label_index": self._label_index, "min_paste_bounding_boxes": self._min_paste_bounding_boxes, "max_paste_bounding_boxes": self._max_paste_bounding_boxes, } def get_random_transformation( self, point_clouds, bounding_boxes, object_point_clouds, object_bounding_boxes, **kwargs ): del point_clouds num_paste_bounding_boxes = random.uniform( (), minval=self._min_paste_bounding_boxes, maxval=self._max_paste_bounding_boxes, seed=self._random_generator, ) num_paste_bounding_boxes = tf.cast( num_paste_bounding_boxes, dtype=tf.int32 ) num_existing_bounding_boxes = tf.shape(bounding_boxes)[1] if self._label_index: object_mask = ( object_bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] == self._label_index ) object_point_clouds = tf.boolean_mask( object_point_clouds, object_mask, axis=1 ) object_bounding_boxes = tf.boolean_mask( object_bounding_boxes, object_mask, axis=1 ) shuffle_index = tf.range(tf.shape(object_point_clouds)[1]) shuffle_index = tf.random.shuffle(shuffle_index) object_point_clouds = tf.gather( object_point_clouds, shuffle_index, axis=1 ) object_bounding_boxes = tf.gather( object_bounding_boxes, shuffle_index, axis=1 ) # Load at most 5 times num_paste_bounding_boxes to check overlaps. num_compare_bounding_boxes = tf.math.minimum( num_paste_bounding_boxes * 5, tf.shape(object_point_clouds)[1], ) object_point_clouds = object_point_clouds[ :, :num_compare_bounding_boxes, : ] object_bounding_boxes = object_bounding_boxes[ :, :num_compare_bounding_boxes, : ] # Use the current frame to check overlap between existing bounding boxes # and pasted bounding boxes all_bounding_boxes = tf.concat( [bounding_boxes, object_bounding_boxes], axis=1 )[0, :, :7] iou = iou_3d(all_bounding_boxes, all_bounding_boxes) iou = tf.linalg.band_part(iou, -1, 0) iou_sum = tf.reduce_sum(iou[num_existing_bounding_boxes:], axis=1) # A non overlapping bounding box has a 1.0 IoU with itself. non_overlapping_mask = tf.reshape(iou_sum <= 1, [-1]) object_point_clouds = tf.boolean_mask( object_point_clouds, non_overlapping_mask, axis=1 ) object_bounding_boxes = tf.boolean_mask( object_bounding_boxes, non_overlapping_mask, axis=1 ) object_point_clouds = object_point_clouds[ :, :num_paste_bounding_boxes, : ] object_bounding_boxes = object_bounding_boxes[ :, :num_paste_bounding_boxes, : ] return { OBJECT_POINT_CLOUDS: object_point_clouds, OBJECT_BOUNDING_BOXES: object_bounding_boxes, } def augment_point_clouds_bounding_boxes( self, point_clouds, bounding_boxes, transformation, **kwargs ): additional_object_point_clouds = transformation[OBJECT_POINT_CLOUDS] additional_object_bounding_boxes = transformation[OBJECT_BOUNDING_BOXES] original_point_clouds_shape = point_clouds.get_shape().as_list() original_object_bounding_boxes = bounding_boxes.get_shape().as_list() points_in_paste_bounding_boxes = is_within_any_box3d( point_clouds[..., :3], additional_object_bounding_boxes[..., :7] ) num_frames = point_clouds.get_shape().as_list()[0] point_clouds_list = [] bounding_boxes_list = [] for frame_index in range(num_frames): # Remove background point clouds that are in object_bounding_boxes. existing_point_clouds_mask = ~points_in_paste_bounding_boxes[ frame_index, : ] & tf.math.greater(point_clouds[frame_index, :, 3], 0.0) existing_point_clouds = tf.boolean_mask( point_clouds[frame_index], existing_point_clouds_mask, axis=0 ) paste_point_clouds = tf.boolean_mask( additional_object_point_clouds[frame_index], tf.math.greater( additional_object_point_clouds[frame_index, :, :, 3], 0.0 ), axis=0, ) point_clouds_list += [ tf.concat([paste_point_clouds, existing_point_clouds], axis=0) ] existing_bounding_boxes = tf.boolean_mask( bounding_boxes[frame_index], tf.math.greater( bounding_boxes[frame_index, :, CENTER_XYZ_DXDYDZ_PHI.CLASS], 0.0, ), ) paste_bounding_boxes = tf.boolean_mask( additional_object_bounding_boxes[frame_index], tf.math.greater( additional_object_bounding_boxes[ frame_index, :, CENTER_XYZ_DXDYDZ_PHI.CLASS ], 0.0, ), axis=0, ) bounding_boxes_list += [ tf.concat( [paste_bounding_boxes, existing_bounding_boxes], axis=0 ) ] point_clouds = tf.ragged.stack(point_clouds_list) bounding_boxes = tf.ragged.stack(bounding_boxes_list) return ( point_clouds.to_tensor(shape=original_point_clouds_shape), bounding_boxes.to_tensor(shape=original_object_bounding_boxes), ) def _augment(self, inputs): result = inputs point_clouds = inputs.get(POINT_CLOUDS, None) bounding_boxes = inputs.get(BOUNDING_BOXES, None) object_point_clouds = inputs.get(OBJECT_POINT_CLOUDS, None) object_bounding_boxes = inputs.get(OBJECT_BOUNDING_BOXES, None) transformation = self.get_random_transformation( point_clouds=point_clouds, bounding_boxes=bounding_boxes, object_point_clouds=object_point_clouds, object_bounding_boxes=object_bounding_boxes, ) point_clouds, bounding_boxes = self.augment_point_clouds_bounding_boxes( point_clouds, bounding_boxes=bounding_boxes, transformation=transformation, ) result.update( {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes} ) return result def call(self, inputs): # TODO(ianstenbit): Support the model input format. point_clouds = inputs[POINT_CLOUDS] bounding_boxes = inputs[BOUNDING_BOXES] if point_clouds.shape.rank == 3 and bounding_boxes.shape.rank == 3: return self._augment(inputs) elif point_clouds.shape.rank == 4 and bounding_boxes.shape.rank == 4: batch = point_clouds.get_shape().as_list()[0] point_clouds_list = [] bounding_boxes_list = [] for i in range(batch): no_batch_inputs = { POINT_CLOUDS: inputs[POINT_CLOUDS][i], BOUNDING_BOXES: inputs[BOUNDING_BOXES][i], OBJECT_POINT_CLOUDS: inputs[OBJECT_POINT_CLOUDS][i], OBJECT_BOUNDING_BOXES: inputs[OBJECT_BOUNDING_BOXES][i], } no_batch_result = self._augment(no_batch_inputs) point_clouds_list += [ no_batch_result[POINT_CLOUDS][tf.newaxis, ...] ] bounding_boxes_list += [ no_batch_result[BOUNDING_BOXES][tf.newaxis, ...] ] inputs[POINT_CLOUDS] = tf.concat(point_clouds_list, axis=0) inputs[BOUNDING_BOXES] = tf.concat(bounding_boxes_list, axis=0) return inputs else: raise ValueError( "Point clouds augmentation layers are expecting inputs " "point clouds and bounding boxes to be rank 3D (Frame, " "Point, Feature) or 4D (Batch, Frame, Point, Feature) " "tensors. Got shape: {} and {}".format( point_clouds.shape, bounding_boxes.shape ) )
keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste.py", "repo_id": "keras-cv", "token_count": 5867 }
77
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from absl.testing import parameterized from tensorflow import keras from keras_cv import layers as cv_layers from keras_cv.backend.config import keras_3 from keras_cv.layers.vit_layers import PatchingAndEmbedding from keras_cv.tests.test_case import TestCase from keras_cv.utils import test_utils class SerializationTest(TestCase): @parameterized.named_parameters( ("AutoContrast", cv_layers.AutoContrast, {"value_range": (0, 255)}), ("ChannelShuffle", cv_layers.ChannelShuffle, {"seed": 1}), ("CutMix", cv_layers.CutMix, {"seed": 1}), ("Equalization", cv_layers.Equalization, {"value_range": (0, 255)}), ("Grayscale", cv_layers.Grayscale, {}), ("GridMask", cv_layers.GridMask, {"seed": 1}), ("MixUp", cv_layers.MixUp, {"seed": 1}), ("Mosaic", cv_layers.Mosaic, {"seed": 1}), ( "RepeatedAugmentation", cv_layers.RepeatedAugmentation, { "augmenters": [ cv_layers.RandAugment(value_range=(0, 1)), cv_layers.RandomFlip(), ] }, ), ( "RandomChannelShift", cv_layers.RandomChannelShift, {"value_range": (0, 255), "factor": 0.5}, ), ( "RandomTranslation", cv_layers.RandomTranslation, {"width_factor": (0, 0.5), "height_factor": 0.5}, ), ( "Posterization", cv_layers.Posterization, {"bits": 3, "value_range": (0, 255)}, ), ( "RandomColorDegeneration", cv_layers.RandomColorDegeneration, {"factor": 0.5, "seed": 1}, ), ( "RandomCutout", cv_layers.RandomCutout, {"height_factor": 0.2, "width_factor": 0.2, "seed": 1}, ), ( "RandomHue", cv_layers.RandomHue, {"factor": 0.5, "value_range": (0, 255), "seed": 1}, ), ( "RandomSaturation", cv_layers.RandomSaturation, {"factor": 0.5, "seed": 1}, ), ( "RandomSharpness", cv_layers.RandomSharpness, {"factor": 0.5, "value_range": (0, 255), "seed": 1}, ), ( "RandomShear", cv_layers.RandomShear, {"x_factor": 0.3, "x_factor": 0.3, "seed": 1}, ), ( "JitteredResize", cv_layers.JitteredResize, { "target_size": (640, 640), "scale_factor": (0.8, 1.25), "bounding_box_format": "xywh", }, ), ("Solarization", cv_layers.Solarization, {"value_range": (0, 255)}), ( "RandAugment", cv_layers.RandAugment, { "value_range": (0, 255), "magnitude": 0.5, "augmentations_per_image": 3, "rate": 0.3, "magnitude_stddev": 0.1, }, ), ( "RandomAugmentationPipeline", cv_layers.RandomAugmentationPipeline, { "layers": [ cv_layers.RandomSaturation(factor=0.5), cv_layers.RandomColorDegeneration(factor=0.5), ], "augmentations_per_image": 1, "rate": 1.0, }, ), ("RandomBrightness", cv_layers.RandomBrightness, {"factor": 0.5}), ( "RandomChoice", cv_layers.RandomChoice, {"layers": [], "seed": 3, "auto_vectorize": False}, ), ( "RandomColorJitter", cv_layers.RandomColorJitter, { "value_range": (0, 255), "brightness_factor": (-0.2, 0.5), "contrast_factor": (0.5, 0.9), "saturation_factor": (0.5, 0.9), "hue_factor": (0.5, 0.9), "seed": 1, }, ), ( "RandomContrast", cv_layers.RandomContrast, {"value_range": (0, 255), "factor": 0.5}, ), ( "RandomCropAndResize", cv_layers.RandomCropAndResize, { "target_size": (224, 224), "crop_area_factor": (0.8, 1.0), "aspect_ratio_factor": (3 / 4, 4 / 3), }, ), ( "DropBlock2D", cv_layers.DropBlock2D, {"rate": 0.1, "block_size": (7, 7), "seed": 1234}, ), ( "StochasticDepth", cv_layers.StochasticDepth, {"rate": 0.1}, ), ( "SqueezeAndExcite2D", cv_layers.SqueezeAndExcite2D, { "filters": 16, "bottleneck_filters": 4, "squeeze_activation": keras.layers.ReLU(), "excite_activation": keras.activations.relu, }, ), ( "DropPath", cv_layers.DropPath, { "rate": 0.2, }, ), ( "RandomApply", cv_layers.RandomApply, { "rate": 0.5, "layer": None, "seed": 1234, }, ), ( "RandomJpegQuality", cv_layers.RandomJpegQuality, {"factor": (75, 100)}, ), ( "AugMix", cv_layers.AugMix, { "value_range": (0, 255), "severity": 0.3, "num_chains": 3, "chain_depth": -1, "alpha": 1.0, "seed": 1, }, ), ( "RandomRotation", cv_layers.RandomRotation, { "factor": 0.5, }, ), ( "RandomAspectRatio", cv_layers.RandomAspectRatio, { "factor": (0.9, 1.1), "seed": 1233, }, ), ( "SpatialPyramidPooling", cv_layers.SpatialPyramidPooling, { "dilation_rates": [6, 12, 18], "num_channels": 256, "activation": "relu", "dropout": 0.1, }, ), ( "PatchingAndEmbedding", PatchingAndEmbedding, {"project_dim": 128, "patch_size": 16}, ), ( "TransformerEncoder", cv_layers.TransformerEncoder, { "project_dim": 128, "num_heads": 2, "mlp_dim": 128, "mlp_dropout": 0.1, "attention_dropout": 0.1, "activation": "gelu", "layer_norm_epsilon": 1e-06, }, ), ( "FrustumRandomDroppingPoints", cv_layers.FrustumRandomDroppingPoints, { "r_distance": 10.0, "theta_width": 1.0, "phi_width": 2.0, "drop_rate": 0.1, }, ), ( "FrustumRandomPointFeatureNoise", cv_layers.FrustumRandomPointFeatureNoise, { "r_distance": 10.0, "theta_width": 1.0, "phi_width": 2.0, "max_noise_level": 0.1, }, ), ( "GlobalRandomDroppingPoints", cv_layers.GlobalRandomDroppingPoints, {"drop_rate": 0.1}, ), ( "GlobalRandomFlip", cv_layers.GlobalRandomFlip, {}, ), ( "GlobalRandomRotation", cv_layers.GlobalRandomRotation, { "max_rotation_angle_x": 0.5, "max_rotation_angle_y": 0.6, "max_rotation_angle_z": 0.7, }, ), ( "GlobalRandomScaling", cv_layers.GlobalRandomScaling, { "x_factor": (0.2, 1.0), "y_factor": (0.3, 1.1), "z_factor": (0.4, 1.3), "preserve_aspect_ratio": False, }, ), ( "GlobalRandomTranslation", cv_layers.GlobalRandomTranslation, {"x_stddev": 0.2, "y_stddev": 1.0, "z_stddev": 0.0}, ), ( "GroupPointsByBoundingBoxes", cv_layers.GroupPointsByBoundingBoxes, { "label_index": 1, "min_points_per_bounding_boxes": 1, "max_points_per_bounding_boxes": 4, }, ), ( "RandomCopyPaste", cv_layers.RandomCopyPaste, { "label_index": 1, "min_paste_bounding_boxes": 1, "max_paste_bounding_boxes": 10, }, ), ( "RandomDropBox", cv_layers.RandomDropBox, {"label_index": 1, "max_drop_bounding_boxes": 3}, ), ( "SwapBackground", cv_layers.SwapBackground, {}, ), ( "RandomZoom", cv_layers.RandomZoom, {"height_factor": 0.2, "width_factor": 0.5}, ), ( "RandomCrop", cv_layers.RandomCrop, { "height": 100, "width": 200, }, ), ( "MBConvBlock", cv_layers.MBConvBlock, { "input_filters": 16, "output_filters": 16, }, ), ( "FusedMBConvBlock", cv_layers.FusedMBConvBlock, { "input_filters": 16, "output_filters": 16, }, ), ( "Rescaling", cv_layers.Rescaling, { "scale": 1, "offset": 0.5, }, ), ( "MultiClassNonMaxSuppression", cv_layers.MultiClassNonMaxSuppression, { "bounding_box_format": "yxyx", "from_logits": True, }, ), ( "NonMaxSuppression", cv_layers.NonMaxSuppression, { "bounding_box_format": "yxyx", "from_logits": True, }, ), ) def test_layer_serialization(self, layer_cls, init_args): # TODO: Some layers are not yet compatible with Keras 3. if keras_3: skip_layers = [ cv_layers.DropBlock2D, cv_layers.FrustumRandomDroppingPoints, cv_layers.FrustumRandomPointFeatureNoise, cv_layers.GlobalRandomDroppingPoints, cv_layers.GlobalRandomFlip, cv_layers.GlobalRandomRotation, cv_layers.GlobalRandomScaling, cv_layers.GlobalRandomTranslation, cv_layers.GroupPointsByBoundingBoxes, cv_layers.RandomCopyPaste, cv_layers.RandomDropBox, cv_layers.SwapBackground, cv_layers.SqueezeAndExcite2D, # TODO: Fails in Keras 3 ] if layer_cls in skip_layers: self.skipTest("Not supported on Keras 3") layer = layer_cls(**init_args) config = layer.get_config() self.assertAllInitParametersAreInConfig(layer_cls, config) model = keras.models.Sequential([layer]) model_config = model.get_config() reconstructed_model = keras.Sequential().from_config(model_config) reconstructed_layer = reconstructed_model.layers[0] self.assertTrue( test_utils.config_equals( layer.get_config(), reconstructed_layer.get_config() ) ) def assertAllInitParametersAreInConfig(self, layer_cls, config): excluded_name = ["args", "kwargs", "*"] parameter_names = { v for v in inspect.signature(layer_cls).parameters.keys() if v not in excluded_name } intersection_with_config = { v for v in config.keys() if v in parameter_names } self.assertSetEqual(parameter_names, intersection_with_config)
keras-cv/keras_cv/layers/serialization_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/serialization_test.py", "repo_id": "keras-cv", "token_count": 7877 }
78
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops @keras_cv_export("keras_cv.losses.GIoULoss") class GIoULoss(keras.losses.Loss): """Implements the Generalized IoU Loss GIoU loss is a modified IoU loss commonly used for object detection. This loss aims to directly optimize the IoU score between true boxes and predicted boxes. GIoU loss adds a penalty term to the IoU loss that takes in account the area of the smallest box enclosing both the boxes being considered for the iou. The length of the last dimension should be 4 to represent the bounding boxes. Args: bounding_box_format: a case-insensitive string (for example, "xyxy"). Each bounding box is defined by these 4 values.For detailed information on the supported formats, see the [KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/). axis: the axis along which to mean the ious, defaults to -1. References: - [GIoU paper](https://arxiv.org/pdf/1902.09630) - [TFAddons Implementation](https://www.tensorflow.org/addons/api_docs/python/tfa/losses/GIoULoss) Sample Usage: ```python y_true = np.random.uniform(size=(5, 10, 5), low=0, high=10) y_pred = np.random.uniform(size=(5, 10, 4), low=0, high=10) loss = GIoULoss(bounding_box_format = "xyWH") loss(y_true, y_pred).numpy() ``` Usage with the `compile()` API: ```python model.compile(optimizer='adam', loss=keras_cv.losses.GIoULoss()) ``` """ # noqa: E501 def __init__(self, bounding_box_format, axis=-1, **kwargs): super().__init__(**kwargs) self.bounding_box_format = bounding_box_format self.axis = axis def _compute_enclosure(self, boxes1, boxes2): y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1) y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1) boxes2_rank = len(boxes2.shape) perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1] # [N, M] or [batch_size, N, M] zeros_t = ops.cast(0, boxes1.dtype) enclose_ymin = ops.minimum(y_min1, ops.transpose(y_min2, perm)) enclose_xmin = ops.minimum(x_min1, ops.transpose(x_min2, perm)) enclose_ymax = ops.maximum(y_max1, ops.transpose(y_max2, perm)) enclose_xmax = ops.maximum(x_max1, ops.transpose(x_max2, perm)) enclose_width = ops.maximum(zeros_t, enclose_xmax - enclose_xmin) enclose_height = ops.maximum(zeros_t, enclose_ymax - enclose_ymin) enclose_area = enclose_width * enclose_height return enclose_area def _compute_giou(self, boxes1, boxes2): boxes1_rank = len(boxes1.shape) boxes2_rank = len(boxes2.shape) if boxes1_rank not in [2, 3]: raise ValueError( "compute_iou() expects boxes1 to be batched, or to be " f"unbatched. Received len(boxes1.shape)={boxes1_rank}, " f"len(boxes2.shape)={boxes2_rank}. Expected either " "len(boxes1.shape)=2 AND or len(boxes1.shape)=3." ) if boxes2_rank not in [2, 3]: raise ValueError( "compute_iou() expects boxes2 to be batched, or to be " f"unbatched. Received len(boxes1.shape)={boxes1_rank}, " f"len(boxes2.shape)={boxes2_rank}. Expected either " "len(boxes2.shape)=2 AND or len(boxes2.shape)=3." ) target_format = "yxyx" if bounding_box.is_relative(self.bounding_box_format): target_format = bounding_box.as_relative(target_format) boxes1 = bounding_box.convert_format( boxes1, source=self.bounding_box_format, target=target_format ) boxes2 = bounding_box.convert_format( boxes2, source=self.bounding_box_format, target=target_format ) intersect_area = bounding_box.iou._compute_intersection(boxes1, boxes2) boxes1_area = bounding_box.iou._compute_area(boxes1) boxes2_area = bounding_box.iou._compute_area(boxes2) boxes2_area_rank = len(boxes2_area.shape) boxes2_axis = 1 if (boxes2_area_rank == 2) else 0 boxes1_area = ops.expand_dims(boxes1_area, axis=-1) boxes2_area = ops.expand_dims(boxes2_area, axis=boxes2_axis) union_area = boxes1_area + boxes2_area - intersect_area iou = ops.divide(intersect_area, union_area + keras.backend.epsilon()) # giou calculation enclose_area = self._compute_enclosure(boxes1, boxes2) return iou - ops.divide( (enclose_area - union_area), enclose_area + keras.backend.epsilon() ) def call(self, y_true, y_pred, sample_weight=None): if sample_weight is not None: raise ValueError( "GIoULoss does not support sample_weight. Please ensure " f"sample_weight=None. Got sample_weight={sample_weight}" ) y_pred = ops.convert_to_tensor(y_pred) y_true = ops.cast(y_true, y_pred.dtype) if y_pred.shape[-1] != 4: raise ValueError( "GIoULoss expects y_pred.shape[-1] to be 4 to represent the " f"bounding boxes. Received y_pred.shape[-1]={y_pred.shape[-1]}." ) if y_true.shape[-1] != 4: raise ValueError( "GIoULoss expects y_true.shape[-1] to be 4 to represent the " f"bounding boxes. Received y_true.shape[-1]={y_true.shape[-1]}." ) if y_true.shape[-2] != y_pred.shape[-2]: raise ValueError( "GIoULoss expects number of boxes in y_pred to be equal to the " "number of boxes in y_true. Received number of boxes in " f"y_true={y_true.shape[-2]} and number of boxes in " f"y_pred={y_pred.shape[-2]}." ) giou = self._compute_giou(y_true, y_pred) giou = ops.diagonal( giou, ) if self.axis == "no_reduction": warnings.warn( "`axis='no_reduction'` is a temporary API, and the API " "contract will be replaced in the future with a more generic " "solution covering all losses." ) else: giou = ops.mean(giou, axis=self.axis) return 1 - giou def get_config(self): config = super().get_config() config.update( { "bounding_box_format": self.bounding_box_format, "axis": self.axis, } ) return config
keras-cv/keras_cv/losses/giou_loss.py/0
{ "file_path": "keras-cv/keras_cv/losses/giou_loss.py", "repo_id": "keras-cv", "token_count": 3367 }
79
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import types import numpy as np import tensorflow as tf import tensorflow.keras as keras from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import ops from keras_cv.metrics import coco class HidePrints: """A basic internal only context manager to hide print statements.""" def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, "w") def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout def _box_concat(boxes): """Concatenates two bounding box batches together.""" result = {} for key in ["boxes", "classes"]: result[key] = tf.concat([b[key] for b in boxes], axis=0) if len(boxes) != 0 and "confidence" in boxes[0]: result["confidence"] = tf.concat( [b["confidence"] for b in boxes], axis=0 ) return result METRIC_NAMES = [ "AP", "AP50", "AP75", "APs", "APm", "APl", "ARmax1", "ARmax10", "ARmax100", "ARs", "ARm", "ARl", ] METRIC_MAPPING = { "AP": "MaP", "AP50": "MaP@[IoU=50]", "AP75": "MaP@[IoU=75]", "APs": "MaP@[area=small]", "APm": "MaP@[area=medium]", "APl": "MaP@[area=large]", "ARmax1": "Recall@[max_detections=1]", "ARmax10": "Recall@[max_detections=10]", "ARmax100": "Recall@[max_detections=100]", "ARs": "Recall@[area=small]", "ARm": "Recall@[area=medium]", "ARl": "Recall@[area=large]", } @keras_cv_export("keras_cv.metrics.BoxCOCOMetrics") class BoxCOCOMetrics(keras.metrics.Metric): """BoxCOCOMetrics computes standard object detection metrics. Args: bounding_box_format: the bounding box format for inputs. evaluate_freq: the number of steps to run before each evaluation. Due to the high computational cost of metric evaluation the final results are only updated once every `evaluate_freq` steps. Higher values will allow for faster training times, while lower numbers allow for higher numerical precision in metric reporting. Usage: `BoxCOCOMetrics()` can be used like any standard metric with any KerasCV object detection model. Inputs to `y_true` must be KerasCV bounding box dictionaries, `{"classes": classes, "boxes": boxes}`, and `y_pred` must follow the same format with an additional `confidence` key. Unfortunately, at the moment `BoxCOCOMetrics()` are not TPU compatible with the `fit()` API. If you wish to evaluate `BoxCOCOMetrics()` for a model trained on TPU, we recommend using the `model.predict()` API and manually updating the metric state with the results. Using this metric suite alongside a model is trivial; simply provide it to the `compile()` arguments of the model: ```python images = tf.ones(shape=(1, 512, 512, 3)) labels = { "boxes": [ [ [0, 0, 100, 100], [100, 100, 200, 200], [300, 300, 400, 400], ] ], "classes": [[1, 1, 1]], } model = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", ) # Evaluate model model(images) # Train model model.compile( classification_loss='focal', box_loss='smoothl1', optimizer=tf.optimizers.SGD(global_clipnorm=10.0), metrics=[keras_cv.metrics.BoxCOCOMetrics('xywh')] ) model.fit(images, labels) ``` """ def __init__(self, bounding_box_format, evaluate_freq, name=None, **kwargs): if "dtype" not in kwargs: kwargs["dtype"] = "float32" super().__init__(name=name, **kwargs) self.ground_truths = [] self.predictions = [] self.bounding_box_format = bounding_box_format self.evaluate_freq = evaluate_freq self._eval_step_count = 0 self._cached_result = [0] * len(METRIC_NAMES) def __new__(cls, *args, **kwargs): obj = super(keras.metrics.Metric, cls).__new__(cls) # Wrap the update_state function in a py_function and scope it to /cpu:0 obj_update_state = obj.update_state def update_state_on_cpu( y_true_boxes, y_true_classes, y_pred_boxes, y_pred_classes, y_pred_confidence, sample_weight=None, ): y_true = {"boxes": y_true_boxes, "classes": y_true_classes} y_pred = { "boxes": y_pred_boxes, "classes": y_pred_classes, "confidence": y_pred_confidence, } with tf.device("/cpu:0"): return obj_update_state(y_true, y_pred, sample_weight) obj.update_state_on_cpu = update_state_on_cpu def update_state_fn(self, y_true, y_pred, sample_weight=None): y_true_boxes = y_true["boxes"] y_true_classes = y_true["classes"] y_pred_boxes = y_pred["boxes"] y_pred_classes = y_pred["classes"] y_pred_confidence = y_pred["confidence"] eager_inputs = [ y_true_boxes, y_true_classes, y_pred_boxes, y_pred_classes, y_pred_confidence, ] if sample_weight is not None: eager_inputs.append(sample_weight) return tf.py_function( func=self.update_state_on_cpu, inp=eager_inputs, Tout=[] ) obj.update_state = types.MethodType(update_state_fn, obj) # Wrap the result function in a py_function and scope it to /cpu:0 obj_result = obj.result def result_on_host_cpu(force): with tf.device("/cpu:0"): # Without the call to `constant` `tf.py_function` selects the # first index automatically and just returns obj_result()[0] return tf.constant(obj_result(force), obj.dtype) obj.result_on_host_cpu = result_on_host_cpu def result_fn(self, force=False): py_func_result = tf.py_function( self.result_on_host_cpu, inp=[force], Tout=obj.dtype ) result = {} for i, key in enumerate(METRIC_NAMES): result[self.name_prefix() + METRIC_MAPPING[key]] = ( py_func_result[i] ) return result obj.result = types.MethodType(result_fn, obj) return obj def name_prefix(self): if self.name.startswith("box_coco_metrics"): return "" return self.name + "_" def update_state(self, y_true, y_pred, sample_weight=None): self._eval_step_count += 1 if isinstance(y_true["boxes"], tf.RaggedTensor) != isinstance( y_pred["boxes"], tf.RaggedTensor ): # Make sure we have same ragged/dense status for y_true and y_pred y_true = bounding_box.to_dense(y_true) y_pred = bounding_box.to_dense(y_pred) self.ground_truths.append(y_true) self.predictions.append(y_pred) # Compute on first step, so we don't have an inconsistent list of # metrics in our train_step() results. This will just populate the # metrics with `0.0` until we get to `evaluate_freq`. if self._eval_step_count % self.evaluate_freq == 0: self._cached_result = self._compute_result() def reset_state(self): self.ground_truths = [] self.predictions = [] self._eval_step_count = 0 self._cached_result = [0] * len(METRIC_NAMES) def result(self, force=False): if force: self._cached_result = self._compute_result() return self._cached_result def _compute_result(self): if len(self.predictions) == 0 or len(self.ground_truths) == 0: return dict([(key, 0) for key in METRIC_NAMES]) with HidePrints(): metrics = compute_pycocotools_metric( _box_concat(self.ground_truths), _box_concat(self.predictions), self.bounding_box_format, ) results = [] for key in METRIC_NAMES: # Workaround for the state where there are 0 boxes in a category. results.append(max(metrics[key], 0.0)) return results def compute_pycocotools_metric(y_true, y_pred, bounding_box_format): y_true = bounding_box.to_dense(y_true) y_pred = bounding_box.to_dense(y_pred) box_pred = y_pred["boxes"] cls_pred = y_pred["classes"] confidence_pred = y_pred["confidence"] gt_boxes = y_true["boxes"] gt_classes = y_true["classes"] box_pred = bounding_box.convert_format( box_pred, source=bounding_box_format, target="yxyx" ) gt_boxes = bounding_box.convert_format( gt_boxes, source=bounding_box_format, target="yxyx" ) total_images = gt_boxes.shape[0] source_ids = np.char.mod("%d", np.linspace(1, total_images, total_images)) ground_truth = {} ground_truth["source_id"] = [source_ids] ground_truth["num_detections"] = [ ops.sum(ops.cast(y_true["classes"] >= 0, "int32"), axis=-1) ] ground_truth["boxes"] = [ops.convert_to_numpy(gt_boxes)] ground_truth["classes"] = [ops.convert_to_numpy(gt_classes)] predictions = {} predictions["source_id"] = [source_ids] predictions["detection_boxes"] = [ops.convert_to_numpy(box_pred)] predictions["detection_classes"] = [ops.convert_to_numpy(cls_pred)] predictions["detection_scores"] = [ops.convert_to_numpy(confidence_pred)] predictions["num_detections"] = [ ops.sum(ops.cast(confidence_pred > 0, "int32"), axis=-1) ] return coco.compute_pycoco_metrics(ground_truth, predictions)
keras-cv/keras_cv/metrics/object_detection/box_coco_metrics.py/0
{ "file_path": "keras-cv/keras_cv/metrics/object_detection/box_coco_metrics.py", "repo_id": "keras-cv", "token_count": 4743 }
80
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DenseNet backbone model. Reference: - [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) - [Based on the Original keras.applications DenseNet](https://github.com/keras-team/keras/blob/master/keras/applications/densenet.py) """ # noqa: E501 import copy from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.models import utils from keras_cv.models.backbones.backbone import Backbone from keras_cv.models.backbones.densenet.densenet_backbone_presets import ( backbone_presets, ) from keras_cv.models.backbones.densenet.densenet_backbone_presets import ( backbone_presets_with_weights, ) from keras_cv.utils.python_utils import classproperty BN_AXIS = 3 BN_EPSILON = 1.001e-5 @keras_cv_export("keras_cv.models.DenseNetBackbone") class DenseNetBackbone(Backbone): """Instantiates the DenseNet architecture. Args: stackwise_num_repeats: list of ints, number of repeated convolutional blocks per dense block. include_rescaling: bool, whether to rescale the inputs. If set to `True`, inputs will be passed through a `Rescaling(1/255.0)` layer. input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`) to use as image input for the model. compression_ratio: float, compression rate at transition layers. growth_rate: int, number of filters added by each dense block. Examples: ```python input_data = tf.ones(shape=(8, 224, 224, 3)) # Pretrained backbone model = keras_cv.models.DenseNetBackbone.from_preset("densenet121_imagenet") output = model(input_data) # Randomly initialized backbone with a custom config model = DenseNetBackbone( stackwise_num_repeats=[6, 12, 24, 16], include_rescaling=False, ) output = model(input_data) ``` """ # noqa: E501 def __init__( self, *, stackwise_num_repeats, include_rescaling, input_shape=(None, None, 3), input_tensor=None, compression_ratio=0.5, growth_rate=32, **kwargs, ): inputs = utils.parse_model_inputs(input_shape, input_tensor) x = inputs if include_rescaling: x = keras.layers.Rescaling(1 / 255.0)(x) x = keras.layers.Conv2D( 64, 7, strides=2, use_bias=False, padding="same", name="conv1_conv" )(x) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name="conv1_bn" )(x) x = keras.layers.Activation("relu", name="conv1_relu")(x) x = keras.layers.MaxPooling2D( 3, strides=2, padding="same", name="pool1" )(x) pyramid_level_inputs = {} for stack_index in range(len(stackwise_num_repeats) - 1): index = stack_index + 2 x = apply_dense_block( x, stackwise_num_repeats[stack_index], growth_rate, name=f"conv{index}", ) pyramid_level_inputs[f"P{index}"] = utils.get_tensor_input_name(x) x = apply_transition_block( x, compression_ratio, name=f"pool{index}" ) x = apply_dense_block( x, stackwise_num_repeats[-1], growth_rate, name=f"conv{len(stackwise_num_repeats) + 1}", ) pyramid_level_inputs[f"P{len(stackwise_num_repeats) + 1}"] = ( utils.get_tensor_input_name(x) ) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name="bn" )(x) x = keras.layers.Activation("relu", name="relu")(x) # Create model. super().__init__(inputs=inputs, outputs=x, **kwargs) # All references to `self` below this line self.pyramid_level_inputs = pyramid_level_inputs self.stackwise_num_repeats = stackwise_num_repeats self.include_rescaling = include_rescaling self.input_tensor = input_tensor self.compression_ratio = compression_ratio self.growth_rate = growth_rate def get_config(self): config = super().get_config() config.update( { "stackwise_num_repeats": self.stackwise_num_repeats, "include_rescaling": self.include_rescaling, # Remove batch dimension from `input_shape` "input_shape": self.input_shape[1:], "input_tensor": self.input_tensor, "compression_ratio": self.compression_ratio, "growth_rate": self.growth_rate, } ) return config @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy(backbone_presets) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" # noqa: E501 return copy.deepcopy(backbone_presets_with_weights) def apply_dense_block(x, num_repeats, growth_rate, name=None): """A dense block. Args: x: input tensor. num_repeats: int, number of repeated convolutional blocks. growth_rate: int, number of filters added by each dense block. name: string, block label. """ if name is None: name = f"dense_block_{keras.backend.get_uid('dense_block')}" for i in range(num_repeats): x = apply_conv_block(x, growth_rate, name=f"{name}_block_{i}") return x def apply_transition_block(x, compression_ratio, name=None): """A transition block. Args: x: input tensor. compression_ratio: float, compression rate at transition layers. name: string, block label. """ if name is None: name = f"transition_block_{keras.backend.get_uid('transition_block')}" x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=f"{name}_bn" )(x) x = keras.layers.Activation("relu", name=f"{name}_relu")(x) x = keras.layers.Conv2D( int(x.shape[BN_AXIS] * compression_ratio), 1, use_bias=False, name=f"{name}_conv", )(x) x = keras.layers.AveragePooling2D(2, strides=2, name=f"{name}_pool")(x) return x def apply_conv_block(x, growth_rate, name=None): """A building block for a dense block. Args: x: input tensor. growth_rate: int, number of filters added by each dense block. name: string, block label. """ if name is None: name = f"conv_block_{keras.backend.get_uid('conv_block')}" shortcut = x x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=f"{name}_0_bn" )(x) x = keras.layers.Activation("relu", name=f"{name}_0_relu")(x) x = keras.layers.Conv2D( 4 * growth_rate, 1, use_bias=False, name=f"{name}_1_conv" )(x) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=f"{name}_1_bn" )(x) x = keras.layers.Activation("relu", name=f"{name}_1_relu")(x) x = keras.layers.Conv2D( growth_rate, 3, padding="same", use_bias=False, name=f"{name}_2_conv", )(x) x = keras.layers.Concatenate(axis=BN_AXIS, name=f"{name}_concat")( [shortcut, x] ) return x
keras-cv/keras_cv/models/backbones/densenet/densenet_backbone.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone.py", "repo_id": "keras-cv", "token_count": 3671 }
81
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for loading pretrained model presets.""" import numpy as np import pytest from keras_cv.backend import ops from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import ( MobileNetV3Backbone, ) from keras_cv.tests.test_case import TestCase @pytest.mark.large class MobileNetV3PresetSmokeTest(TestCase): """ A smoke test for MobileNetV3 presets we run continuously. This only tests the smallest weights we have available. Run with: `pytest keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py --run_large` """ # noqa: E501 def setUp(self): self.input_batch = np.ones(shape=(8, 224, 224, 3)) def test_backbone_output(self): model = MobileNetV3Backbone.from_preset("mobilenet_v3_small_imagenet") outputs = model(self.input_batch) # The forward pass from a preset should be stable! # This test should catch cases where we unintentionally change our # network code in a way that would invalidate our preset weights. # We should only update these numbers if we are updating a weights # file, or have found a discrepancy with the upstream source. outputs = outputs[0, 0, 0, :5] expected = [0.25, 1.13, -0.26, 0.10, 0.03] # Keep a high tolerance, so we are robust to different hardware. self.assertAllClose( ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01 ) @pytest.mark.extra_large class MobileNetV3PresetFullTest(TestCase): """ Test the full enumeration of our preset. This tests every preset for MobileNetV3 and is only run manually. Run with: `pytest keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py --run_extra_large` """ # noqa: E501 def test_load_mobilenet_v3(self): input_data = np.ones(shape=(2, 224, 224, 3)) for preset in MobileNetV3Backbone.presets: model = MobileNetV3Backbone.from_preset(preset) model(input_data)
keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets_test.py", "repo_id": "keras-cv", "token_count": 960 }
82
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """CLIP presets.""" clip_presets = { "clip-vit-base-patch16": { "metadata": { "description": ( "The model uses a ViT-B/16 Transformer architecture as an " "image encoder and uses a masked self-attention Transformer as " "a text encoder. These encoders are trained to maximize the " "similarity of (image, text) pairs via a contrastive loss. The " "model uses a patch size of 16 and input images of size (224, " "224)" ), "params": 149620737, "official_name": "CLIP", "path": "clip", }, "kaggle_handle": "kaggle://keras/clip/keras/clip-vit-base-patch16/2", }, "clip-vit-base-patch32": { "metadata": { "description": ( "The model uses a ViT-B/32 Transformer architecture as an " "image encoder and uses a masked self-attention Transformer as " "a text encoder. These encoders are trained to maximize the " "similarity of (image, text) pairs via a contrastive loss.The " "model uses a patch size of 32 and input images of size (224, " "224)" ), "params": 151277313, "official_name": "CLIP", "path": "clip", }, "kaggle_handle": "kaggle://keras/clip/keras/clip-vit-base-patch32/2", }, "clip-vit-large-patch14": { "metadata": { "description": ( "The model uses a ViT-L/14 Transformer architecture as an " "image encoder and uses a masked self-attention Transformer as " "a text encoder. These encoders are trained to maximize the " "similarity of (image, text) pairs via a contrastive loss.The " "model uses a patch size of 14 and input images of size (224, " "224)" ), "params": 427616513, "official_name": "CLIP", "path": "clip", }, "kaggle_handle": "kaggle://keras/clip/keras/clip-vit-large-patch14/2", }, "clip-vit-large-patch14-336": { "metadata": { "description": ( "The model uses a ViT-L/14 Transformer architecture as an " "image encoder and uses a masked self-attention Transformer as " "a text encoder. These encoders are trained to maximize the " "similarity of (image, text) pairs via a contrastive loss.The " "model uses a patch size of 14 and input images of size (336, " "336)" ), "params": 427944193, "official_name": "CLIP", "path": "clip", }, "kaggle_handle": "kaggle://keras/clip/keras/clip-vit-large-patch14-336/2", # noqa: E501 }, }
keras-cv/keras_cv/models/feature_extractor/clip/clip_presets.py/0
{ "file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_presets.py", "repo_id": "keras-cv", "token_count": 1592 }
83
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import math import numpy as np import tensorflow as tf try: import pandas as pd except ImportError: pd = None def unpack_input(data): if type(data) is dict: return data["images"], data["bounding_boxes"] else: return data def _get_tensor_types(): if pd is None: return (tf.Tensor, np.ndarray) else: return (tf.Tensor, np.ndarray, pd.Series, pd.DataFrame) def convert_inputs_to_tf_dataset( x=None, y=None, sample_weight=None, batch_size=None ): if sample_weight is not None: raise ValueError("RetinaNet does not yet support `sample_weight`.") if isinstance(x, tf.data.Dataset): if y is not None or batch_size is not None: raise ValueError( "When `x` is a `tf.data.Dataset`, please do not provide a " f"value for `y` or `batch_size`. Got `y={y}`, " f"`batch_size={batch_size}`." ) return x # batch_size defaults to 32, as it does in fit(). batch_size = batch_size or 32 # Parse inputs inputs = x if y is not None: inputs = (x, y) # Construct tf.data.Dataset dataset = tf.data.Dataset.from_tensor_slices(inputs) if batch_size == "full": dataset = dataset.batch(x.shape[0]) elif batch_size is not None: dataset = dataset.batch(batch_size) return dataset # TODO(lukewood): remove once exported from Keras core. def train_validation_split(arrays, validation_split): """Split arrays into train and validation subsets in deterministic order. The last part of data will become validation data. Args: arrays: Tensors to split. Allowed inputs are arbitrarily nested structures of Tensors and NumPy arrays. validation_split: Float between 0 and 1. The proportion of the dataset to include in the validation split. The rest of the dataset will be included in the training split. Returns: `(train_arrays, validation_arrays)` """ def _can_split(t): tensor_types = _get_tensor_types() return isinstance(t, tensor_types) or t is None flat_arrays = tf.nest.flatten(arrays) unsplitable = [type(t) for t in flat_arrays if not _can_split(t)] if unsplitable: raise ValueError( "`validation_split` is only supported for Tensors or NumPy " "arrays, found following types in the input: {}".format(unsplitable) ) if all(t is None for t in flat_arrays): return arrays, arrays first_non_none = None for t in flat_arrays: if t is not None: first_non_none = t break # Assumes all arrays have the same batch shape or are `None`. batch_dim = int(first_non_none.shape[0]) split_at = int(math.floor(batch_dim * (1.0 - validation_split))) if split_at == 0 or split_at == batch_dim: raise ValueError( "Training data contains {batch_dim} samples, which is not " "sufficient to split it into a validation and training set as " "specified by `validation_split={validation_split}`. Either " "provide more data, or a different value for the " "`validation_split` argument.".format( batch_dim=batch_dim, validation_split=validation_split ) ) def _split(t, start, end): if t is None: return t return t[start:end] train_arrays = tf.nest.map_structure( functools.partial(_split, start=0, end=split_at), arrays ) val_arrays = tf.nest.map_structure( functools.partial(_split, start=split_at, end=batch_dim), arrays ) return train_arrays, val_arrays
keras-cv/keras_cv/models/object_detection/__internal__.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/__internal__.py", "repo_id": "keras-cv", "token_count": 1721 }
84
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest from absl.testing import parameterized import keras_cv from keras_cv import bounding_box from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.backbones.test_backbone_presets import ( test_backbone_presets, ) from keras_cv.models.object_detection.__test_utils__ import ( _create_bounding_box_dataset, ) from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector_presets import ( yolo_v8_detector_presets, ) from keras_cv.tests.test_case import TestCase class YOLOV8DetectorTest(TestCase): @pytest.mark.large # Fit is slow, so mark these large. def test_fit(self): bounding_box_format = "xywh" yolo = keras_cv.models.YOLOV8Detector( num_classes=2, fpn_depth=1, bounding_box_format=bounding_box_format, backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_xs_backbone" ), ) yolo.compile( optimizer="adam", classification_loss="binary_crossentropy", box_loss="ciou", ) xs, ys = _create_bounding_box_dataset(bounding_box_format) yolo.fit(x=xs, y=ys, epochs=1) @pytest.mark.tf_keras_only @pytest.mark.large # Fit is slow, so mark these large. def test_fit_with_ragged_tensors(self): bounding_box_format = "xywh" yolo = keras_cv.models.YOLOV8Detector( num_classes=2, fpn_depth=1, bounding_box_format=bounding_box_format, backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_xs_backbone" ), ) yolo.compile( optimizer="adam", classification_loss="binary_crossentropy", box_loss="ciou", ) xs, ys = _create_bounding_box_dataset(bounding_box_format) ys = bounding_box.to_ragged(ys) yolo.fit(x=xs, y=ys, epochs=1) @pytest.mark.large # Fit is slow, so mark these large. def test_fit_with_no_valid_gt_bbox(self): bounding_box_format = "xywh" yolo = keras_cv.models.YOLOV8Detector( num_classes=1, fpn_depth=1, bounding_box_format=bounding_box_format, backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_xs_backbone" ), ) yolo.compile( optimizer="adam", classification_loss="binary_crossentropy", box_loss="ciou", ) xs, ys = _create_bounding_box_dataset(bounding_box_format) # Make all bounding_boxes invalid and filter out them ys["classes"] = -np.ones_like(ys["classes"]) yolo.fit(x=xs, y=ys, epochs=1) def test_trainable_weight_count(self): yolo = keras_cv.models.YOLOV8Detector( num_classes=2, fpn_depth=1, bounding_box_format="xywh", backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_s_backbone" ), ) self.assertEqual(len(yolo.trainable_weights), 195) def test_bad_loss(self): yolo = keras_cv.models.YOLOV8Detector( num_classes=2, fpn_depth=1, bounding_box_format="xywh", backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_xs_backbone" ), ) with self.assertRaisesRegex( ValueError, "Invalid box loss", ): yolo.compile( box_loss="bad_loss", classification_loss="binary_crossentropy" ) with self.assertRaisesRegex( ValueError, "Invalid classification loss", ): yolo.compile(box_loss="ciou", classification_loss="bad_loss") @pytest.mark.large # Saving is slow, so mark these large. def test_saved_model(self): model = keras_cv.models.YOLOV8Detector( num_classes=20, bounding_box_format="xywh", fpn_depth=1, backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_xs_backbone" ), ) xs, _ = _create_bounding_box_dataset("xywh") model_output = model(xs) save_path = os.path.join( self.get_temp_dir(), "yolo_v8_xs_detector.keras" ) model.save(save_path) # TODO: Remove the need to pass the `custom_objects` parameter. restored_model = keras.saving.load_model( save_path, custom_objects={"YOLOV8Detector": keras_cv.models.YOLOV8Detector}, ) # Check we got the real object back. self.assertIsInstance(restored_model, keras_cv.models.YOLOV8Detector) # Check that output matches. restored_output = restored_model(xs) self.assertAllClose( ops.convert_to_numpy(model_output["boxes"]), ops.convert_to_numpy(restored_output["boxes"]), ) self.assertAllClose( ops.convert_to_numpy(model_output["classes"]), ops.convert_to_numpy(restored_output["classes"]), ) # TODO(tirthasheshpatel): Support updating prediction decoder in Keras Core. @pytest.mark.tf_keras_only def test_update_prediction_decoder(self): yolo = keras_cv.models.YOLOV8Detector( num_classes=2, fpn_depth=1, bounding_box_format="xywh", backbone=keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_s_backbone" ), prediction_decoder=keras_cv.layers.NonMaxSuppression( bounding_box_format="xywh", from_logits=False, confidence_threshold=0.0, iou_threshold=1.0, ), ) image = np.ones((1, 512, 512, 3)) outputs = yolo.predict(image) # We predicted at least 1 box with confidence_threshold 0 self.assertGreater(outputs["boxes"].shape[0], 0) yolo.prediction_decoder = keras_cv.layers.NonMaxSuppression( bounding_box_format="xywh", from_logits=False, confidence_threshold=1.0, iou_threshold=1.0, ) outputs = yolo.predict(image) # We predicted no boxes with confidence threshold 1 self.assertAllEqual(outputs["boxes"], -np.ones_like(outputs["boxes"])) self.assertAllEqual( outputs["confidence"], -np.ones_like(outputs["confidence"]) ) self.assertAllEqual( outputs["classes"], -np.ones_like(outputs["classes"]) ) @pytest.mark.large class YOLOV8DetectorSmokeTest(TestCase): @parameterized.named_parameters( *[(preset, preset) for preset in test_backbone_presets] ) @pytest.mark.extra_large def test_backbone_preset(self, preset): model = keras_cv.models.YOLOV8Detector.from_preset( preset, num_classes=20, bounding_box_format="xywh", ) xs, _ = _create_bounding_box_dataset(bounding_box_format="xywh") output = model(xs) # 64 represents number of parameters in a box # 5376 is the number of anchors for a 512x512 image self.assertEqual(output["boxes"].shape, (xs.shape[0], 5376, 64)) def test_preset_with_forward_pass(self): model = keras_cv.models.YOLOV8Detector.from_preset( "yolo_v8_m_pascalvoc", bounding_box_format="xywh", ) image = np.ones((1, 512, 512, 3)) encoded_predictions = model(image) self.assertAllClose( ops.convert_to_numpy(encoded_predictions["boxes"][0, 0:5, 0]), [-0.8303556, 0.75213313, 1.809204, 1.6576759, 1.4134747], ) self.assertAllClose( ops.convert_to_numpy(encoded_predictions["classes"][0, 0:5, 0]), [ 7.6146556e-08, 8.0103280e-07, 9.7873999e-07, 2.2314548e-06, 2.5051115e-06, ], ) @pytest.mark.extra_large class YOLOV8DetectorPresetFullTest(TestCase): """ Test the full enumeration of our presets. This every presets for YOLOV8Detector and is only run manually. Run with: `pytest keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py --run_extra_large` """ # noqa: E501 def test_load_yolo_v8_detector(self): input_data = np.ones(shape=(2, 224, 224, 3)) for preset in yolo_v8_detector_presets: model = keras_cv.models.YOLOV8Detector.from_preset( preset, bounding_box_format="xywh" ) model(input_data)
keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_test.py", "repo_id": "keras-cv", "token_count": 4662 }
85
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest import tensorflow as tf from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.backend.config import keras_3 from keras_cv.models import MiTBackbone from keras_cv.models import SegFormer from keras_cv.tests.test_case import TestCase class SegFormerTest(TestCase): def test_segformer_construction(self): backbone = MiTBackbone.from_preset("mit_b0", input_shape=[512, 512, 3]) model = SegFormer(backbone=backbone, num_classes=1) model.compile( optimizer="adam", loss=keras.losses.BinaryCrossentropy(), metrics=["accuracy"], ) def test_segformer_preset_construction(self): model = SegFormer.from_preset( "segformer_b0", num_classes=1, input_shape=[512, 512, 3] ) model.compile( optimizer="adam", loss=keras.losses.BinaryCrossentropy(), metrics=["accuracy"], ) def test_segformer_preset_error(self): with self.assertRaises(TypeError): _ = SegFormer.from_preset("segformer_b0") @pytest.mark.large def test_segformer_call(self): backbone = MiTBackbone.from_preset("mit_b0") mit_model = SegFormer(backbone=backbone, num_classes=1) images = np.random.uniform(size=(2, 224, 224, 3)) mit_output = mit_model(images) mit_pred = mit_model.predict(images) seg_model = SegFormer.from_preset("segformer_b0", num_classes=1) seg_output = seg_model(images) seg_pred = seg_model.predict(images) self.assertAllClose(mit_output, seg_output) self.assertAllClose(mit_pred, seg_pred) @pytest.mark.large def test_weights_change(self): target_size = [512, 512, 2] images = tf.ones(shape=[1] + [512, 512, 3]) labels = tf.zeros(shape=[1] + target_size) ds = tf.data.Dataset.from_tensor_slices((images, labels)) ds = ds.repeat(2) ds = ds.batch(2) backbone = MiTBackbone.from_preset("mit_b0", input_shape=[512, 512, 3]) model = SegFormer(backbone=backbone, num_classes=2) model.compile( optimizer="adam", loss=keras.losses.BinaryCrossentropy(), metrics=["accuracy"], ) original_weights = model.get_weights() model.fit(ds, epochs=1) updated_weights = model.get_weights() for w1, w2 in zip(original_weights, updated_weights): self.assertNotAllEqual(w1, w2) self.assertFalse(ops.any(ops.isnan(w2))) @pytest.mark.large # Saving is slow, so mark these large. def test_saved_model(self): target_size = [512, 512, 3] backbone = MiTBackbone.from_preset("mit_b0", input_shape=[512, 512, 3]) model = SegFormer(backbone=backbone, num_classes=1) input_batch = np.ones(shape=[2] + target_size) model_output = model(input_batch) save_path = os.path.join(self.get_temp_dir(), "model.keras") if keras_3(): model.save(save_path) else: model.save(save_path, save_format="keras_v3") restored_model = keras.models.load_model(save_path) # Check we got the real object back. self.assertIsInstance(restored_model, SegFormer) # Check that output matches. restored_output = restored_model(input_batch) self.assertAllClose(model_output, restored_output) @pytest.mark.large # Saving is slow, so mark these large. def test_preset_saved_model(self): target_size = [224, 224, 3] model = SegFormer.from_preset("segformer_b0", num_classes=1) input_batch = np.ones(shape=[2] + target_size) model_output = model(input_batch) save_path = os.path.join(self.get_temp_dir(), "model.keras") if keras_3(): model.save(save_path) else: model.save(save_path, save_format="keras_v3") restored_model = keras.models.load_model(save_path) # Check we got the real object back. self.assertIsInstance(restored_model, SegFormer) # Check that output matches. restored_output = restored_model(input_batch) self.assertAllClose(model_output, restored_output)
keras-cv/keras_cv/models/segmentation/segformer/segformer_test.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer_test.py", "repo_id": "keras-cv", "token_count": 2103 }
86
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.models.stable_diffusion.attention_block import ( # noqa: E501 AttentionBlock, ) from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D from keras_cv.models.stable_diffusion.resnet_block import ResnetBlock @keras_cv_export("keras_cv.models.stable_diffusion.Decoder") class Decoder(keras.Sequential): def __init__(self, img_height, img_width, name=None, download_weights=True): super().__init__( [ keras.layers.Input((img_height // 8, img_width // 8, 4)), keras.layers.Rescaling(1.0 / 0.18215), PaddedConv2D(4, 1), PaddedConv2D(512, 3, padding=1), ResnetBlock(512), AttentionBlock(512), ResnetBlock(512), ResnetBlock(512), ResnetBlock(512), ResnetBlock(512), keras.layers.UpSampling2D(2), PaddedConv2D(512, 3, padding=1), ResnetBlock(512), ResnetBlock(512), ResnetBlock(512), keras.layers.UpSampling2D(2), PaddedConv2D(512, 3, padding=1), ResnetBlock(256), ResnetBlock(256), ResnetBlock(256), keras.layers.UpSampling2D(2), PaddedConv2D(256, 3, padding=1), ResnetBlock(128), ResnetBlock(128), ResnetBlock(128), keras.layers.GroupNormalization(epsilon=1e-5), keras.layers.Activation("swish"), PaddedConv2D(3, 3, padding=1), ], name=name, ) if download_weights: decoder_weights_fpath = keras.utils.get_file( origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/kcv_decoder.h5", # noqa: E501 file_hash="ad350a65cc8bc4a80c8103367e039a3329b4231c2469a1093869a345f55b1962", # noqa: E501 ) self.load_weights(decoder_weights_fpath)
keras-cv/keras_cv/models/stable_diffusion/decoder.py/0
{ "file_path": "keras-cv/keras_cv/models/stable_diffusion/decoder.py", "repo_id": "keras-cv", "token_count": 1336 }
87
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_cv.utils.resource_loader import LazySO custom_ops = LazySO("custom_ops/_keras_cv_custom_ops.so") # TODO(tanzhenyu): remove assumption of non overlapping boxes def within_box3d_index(points, boxes): """Assign point to the box index that it belongs to. If no box contains the point, it will be assigned -1. This v2 function assumes that bounding boxes DO NOT overlap with each other. Args: points: [..., num_points, 3] float32 Tensor for 3d points in xyz format. boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx, dy, dz, phi]. Returns: integer Tensor of shape [..., num_points] indicating which box index each point belongs to. """ points = tf.convert_to_tensor(points) boxes = tf.convert_to_tensor(boxes) if points.shape.rank == 2 and boxes.shape.rank == 2: return custom_ops.ops.kcv_within_box(points, boxes) elif points.shape.rank == 3 and boxes.shape.rank == 3: num_samples = points.get_shape().as_list()[0] results = [] for i in range(num_samples): results.append( custom_ops.ops.kcv_within_box(points[i], boxes[i])[ tf.newaxis, ... ] ) return tf.concat(results, axis=0) else: raise ValueError( "is_within_box3d_v2 are expecting inputs point clouds and bounding " "boxes to be rank 2D (Point, Feature) or 3D (Frame, Point, Feature)" " tensors. Got shape: {} and {}".format(points.shape, boxes.shape) ) def group_points_by_boxes(points, boxes): """Checks if 3d points are within 3d bounding boxes. Currently only xyz format is supported. This function assumes that bounding boxes DO NOT overlap with each other. Args: points: [..., num_points, 3] float32 Tensor for 3d points in xyz format. boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx, dy, dz, phi]. Returns: boolean Ragged Tensor of shape [..., num_boxes, ragged_points] for each box, all the point indices that belong to the box. """ num_boxes = boxes.shape[-2] or tf.shape(boxes)[-2] # [..., num_points] box_indices = within_box3d_index(points, boxes) num_points = points.shape[-2] or tf.shape(points)[-2] point_indices = tf.range(num_points, dtype=tf.int32) def group_per_sample(box_index): point_mask = tf.math.greater_equal(box_index, 0) valid_point_indices = tf.boolean_mask(point_indices, point_mask) valid_box_index = tf.boolean_mask(box_index, point_mask) res = tf.ragged.stack_dynamic_partitions( valid_point_indices, valid_box_index, num_partitions=num_boxes ) return res boxes_rank = len(boxes.shape) if boxes_rank == 2: return group_per_sample(box_indices) elif boxes_rank == 3: num_samples = boxes.get_shape().as_list()[0] res_list = [] for i in range(num_samples): res_list.append(group_per_sample(box_indices[i])) return tf.ragged.stack(res_list) else: raise ValueError( f"Does not support box rank > 3, got boxes shape {boxes.shape}" ) # TODO(lengzhaoqi/tanzhenyu): compare the performance with v1 def is_within_any_box3d_v2(points, boxes, keepdims=False): """Checks if 3d points are within 3d bounding boxes. Currently only xyz format is supported. Args: points: [..., num_points, 3] float32 Tensor for 3d points in xyz format. boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx, dy, dz, phi]. keepdims: boolean. If true, retains reduced dimensions with length 1. Returns: boolean Tensor of shape [..., num_points] indicating whether the point belongs to the box. """ res = tf.greater_equal(within_box3d_index(points, boxes), 0) if keepdims: res = res[..., tf.newaxis] return res def is_within_any_box3d_v3(points, boxes, keepdims=False): """Checks if 3d points are within 3d bounding boxes. Currently only xyz format is supported. Args: points: [..., num_points, 3] float32 Tensor for 3d points in xyz format. boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx, dy, dz, phi]. keepdims: boolean. If true, retains reduced dimensions with length 1. Returns: boolean Tensor of shape [..., num_points] indicating whether the point belongs to the box. """ res = custom_ops.ops.kcv_within_any_box(points, boxes) if keepdims: res = res[..., tf.newaxis] return res def get_rank(tensor): return tensor.shape.ndims or tf.rank(tensor) def wrap_angle_radians(angle_radians, min_val=-np.pi, max_val=np.pi): """Wrap the value of `angles_radians` to the range [min_val, max_val].""" max_min_diff = max_val - min_val return min_val + tf.math.floormod(angle_radians + max_val, max_min_diff) def _get_3d_rotation_matrix(yaw, roll, pitch): """Creates 3x3 rotation matrix from yaw, roll, pitch (angles in radians). Note: Yaw -> Z, Roll -> X, Pitch -> Y Args: yaw: float tensor representing a yaw angle in radians. roll: float tensor representing a roll angle in radians. pitch: float tensor representing a pitch angle in radians. Returns: A [3, 3] tensor corresponding to a rotation matrix. """ def _UnitX(angle): return tf.reshape( [ 1.0, 0.0, 0.0, 0.0, tf.cos(angle), -tf.sin(angle), 0.0, tf.sin(angle), tf.cos(angle), ], shape=[3, 3], ) def _UnitY(angle): return tf.reshape( [ tf.cos(angle), 0.0, tf.sin(angle), 0.0, 1.0, 0.0, -tf.sin(angle), 0.0, tf.cos(angle), ], shape=[3, 3], ) def _UnitZ(angle): return tf.reshape( [ tf.cos(angle), -tf.sin(angle), 0.0, tf.sin(angle), tf.cos(angle), 0.0, 0.0, 0.0, 1.0, ], shape=[3, 3], ) return tf.matmul(tf.matmul(_UnitZ(yaw), _UnitX(roll)), _UnitY(pitch)) def _center_xyzWHD_to_corner_xyz(boxes): """convert from center format to corner format. Args: boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx, dy, dz, phi]. Returns: corners: [..., num_boxes, 8, 3] float32 Tensor for 3d corners in [x, y, z]. """ # relative corners w.r.t to origin point # this will return all corners in top-down counter clockwise instead of # only left top and bottom right. rel_corners = tf.constant( [ [0.5, 0.5, 0.5], # top [-0.5, 0.5, 0.5], # top [-0.5, -0.5, 0.5], # top [0.5, -0.5, 0.5], # top [0.5, 0.5, -0.5], # bottom [-0.5, 0.5, -0.5], # bottom [-0.5, -0.5, -0.5], # bottom [0.5, -0.5, -0.5], # bottom ] ) centers = boxes[..., :3] dimensions = boxes[..., 3:6] phi_world = boxes[..., 6] leading_shapes = _get_shape(boxes) cos = tf.cos(phi_world) sin = tf.sin(phi_world) zero = tf.zeros_like(cos) one = tf.ones_like(cos) rotations = tf.reshape( tf.stack([cos, -sin, zero, sin, cos, zero, zero, zero, one], axis=-1), leading_shapes[:-1] + [3, 3], ) # apply the delta to convert from centers to relative corners format rel_corners = tf.einsum("...ni,ji->...nji", dimensions, rel_corners) # apply rotation matrix on relative corners rel_corners = tf.einsum("...nij,...nkj->...nki", rotations, rel_corners) # translate back to absolute corners format corners = rel_corners + tf.reshape(centers, leading_shapes[:-1] + [1, 3]) return corners def _get_shape(tensor): tensor = tf.convert_to_tensor(tensor) dynamic_shape = tf.shape(tensor) if tensor.shape.ndims is None: return dynamic_shape static_shape = tensor.shape.as_list() shapes = [ static_shape[x] if static_shape[x] is not None else dynamic_shape[x] for x in range(tensor.shape.ndims) ] return shapes def _is_on_lefthand_side(points, v1, v2): """Checks if points lay on a vector direction or to its left. Args: point: float Tensor of [num_points, 2] of points to check v1: float Tensor of [num_points, 2] of starting point of the vector v2: float Tensor of [num_points, 2] of ending point of the vector Returns: a boolean Tensor of [num_points] indicate whether each point is on the left of the vector or on the vector direction. """ # Prepare for broadcast: All point operations are on the right, # and all v1/v2 operations are on the left. This is faster than left/right # under the assumption that we have more points than vertices. points_x = points[..., tf.newaxis, :, 0] points_y = points[..., tf.newaxis, :, 1] v1_x = v1[..., 0, tf.newaxis] v2_x = v2[..., 0, tf.newaxis] v1_y = v1[..., 1, tf.newaxis] v2_y = v2[..., 1, tf.newaxis] d1 = (points_y - v1_y) * (v2_x - v1_x) d2 = (points_x - v1_x) * (v2_y - v1_y) return d1 >= d2 def _box_area(boxes): """Compute the area of 2-d boxes. Vertices must be ordered counter-clockwise. This function can technically handle any kind of convex polygons. Args: boxes: a float Tensor of [..., 4, 2] of boxes. The last coordinates are the four corners of the box and (x, y). The corners must be given in counter-clockwise order. """ boxes_roll = tf.roll(boxes, shift=1, axis=-2) det = ( tf.reduce_sum( boxes[..., 0] * boxes_roll[..., 1] - boxes[..., 1] * boxes_roll[..., 0], axis=-1, keepdims=True, ) / 2.0 ) return tf.abs(det) def is_within_box2d(points, boxes): """Checks if 3d points are within 2d bounding boxes. Currently only xy format is supported. This function returns true if points are strictly inside the box or on edge. Args: points: [num_points, 2] float32 Tensor for 2d points in xy format. boxes: [num_boxes, 4, 2] float32 Tensor for 2d boxes in xy format, counter clockwise. Returns: boolean Tensor of shape [num_points, num_boxes] """ v1, v2, v3, v4 = ( boxes[..., 0, :], boxes[..., 1, :], boxes[..., 2, :], boxes[..., 3, :], ) is_inside = tf.math.logical_and( tf.math.logical_and( _is_on_lefthand_side(points, v1, v2), _is_on_lefthand_side(points, v2, v3), ), tf.math.logical_and( _is_on_lefthand_side(points, v3, v4), _is_on_lefthand_side(points, v4, v1), ), ) valid_area = tf.greater(_box_area(boxes), 0) is_inside = tf.math.logical_and(is_inside, valid_area) # swap the last two dimensions is_inside = tf.einsum("...ij->...ji", tf.cast(is_inside, tf.int32)) return tf.cast(is_inside, tf.bool) def is_within_any_box3d(points, boxes, keepdims=False): """Checks if 3d points are within any 3d bounding boxes. Currently only xyz format is supported. Args: points: [..., num_points, 3] float32 Tensor for 3d points in xyz format. boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx, dy, dz, phi]. keepdims: boolean. If true, retains reduced dimensions with length 1. Returns: boolean Tensor of shape [..., num_points] indicating whether the point belongs to the box. """ res = is_within_box3d(points, boxes) return tf.reduce_any(res, axis=-1, keepdims=keepdims) def is_within_box3d(points, boxes): """Checks if 3d points are within 3d bounding boxes. Currently only xyz format is supported. Args: points: [..., num_points, 3] float32 Tensor for 3d points in xyz format. boxes: [..., num_boxes, 7] float32 Tensor for 3d boxes in [x, y, z, dx, dy, dz, phi]. Returns: boolean Tensor of shape [..., num_points, num_boxes] indicating whether the point belongs to the box. """ # step 1 -- determine if points are within xy range # convert from center format to corner format boxes_corner = _center_xyzWHD_to_corner_xyz(boxes) # project to 2d boxes by only taking x, y on top plane boxes_2d = boxes_corner[..., 0:4, 0:2] # project to 2d points by only taking x, y points_2d = points[..., :2] # check whether points are within 2d boxes, [..., num_points, num_boxes] is_inside_2d = is_within_box2d(points_2d, boxes_2d) # step 2 -- determine if points are within z range [_, _, z, _, _, dz, _] = tf.split(boxes, 7, axis=-1) z = z[..., 0] dz = dz[..., 0] bottom = z - dz / 2.0 # [..., 1, num_boxes] bottom = bottom[..., tf.newaxis, :] top = z + dz / 2.0 top = top[..., tf.newaxis, :] # [..., num_points, 1] points_z = points[..., 2:] # [..., num_points, num_boxes] is_inside_z = tf.math.logical_and( tf.less_equal(points_z, top), tf.greater_equal(points_z, bottom) ) return tf.math.logical_and(is_inside_z, is_inside_2d) def coordinate_transform(points, pose): """ Translate 'points' to coordinates according to 'pose' vector. pose should contain 6 floating point values: translate_x, translate_y, translate_z: The translation to apply. yaw, roll, pitch: The rotation angles in radians. Args: points: Float shape [..., 3]: Points to transform to new coordinates. pose: Float shape [6]: [translate_x, translate_y, translate_z, yaw, roll, pitch]. The pose in the frame that 'points' comes from, and the definition of the rotation and translation angles to apply to points. Returns: 'points' transformed to the coordinates defined by 'pose'. """ translate_x = pose[0] translate_y = pose[1] translate_z = pose[2] # Translate the points so the origin is the pose's center. translation = tf.reshape([translate_x, translate_y, translate_z], shape=[3]) translated_points = points + translation # Compose the rotations along the three axes. # # Note: Yaw->Z, Roll->X, Pitch->Y. yaw, roll, pitch = pose[3], pose[4], pose[5] rotation_matrix = _get_3d_rotation_matrix(yaw, roll, pitch) # Finally, rotate the points about the pose's origin according to the # rotation matrix. rotated_points = tf.einsum( "...i,...ij->...j", translated_points, rotation_matrix ) return rotated_points def spherical_coordinate_transform(points): """Converts points from xyz coordinates to spherical coordinates. https://en.wikipedia.org/wiki/Spherical_coordinate_system#Coordinate_system_conversions for definitions of the transformations. Args: points: A floating point tensor with shape [..., 3], where the inner 3 dimensions correspond to xyz coordinates. Returns: A floating point tensor with the same shape [..., 3], where the inner dimensions correspond to (dist, theta, phi), where phi corresponds to azimuth/yaw (rotation around z), and theta corresponds to pitch/inclination (rotation around y). """ dist = tf.sqrt(tf.reduce_sum(tf.square(points), axis=-1)) theta = tf.acos(points[..., 2] / tf.maximum(dist, 1e-7)) # Note: tf.atan2 takes in (y, x). phi = tf.atan2(points[..., 1], points[..., 0]) return tf.stack([dist, theta, phi], axis=-1) def within_a_frustum(points, center, r_distance, theta_width, phi_width): """Check if 3d points are within a 3d frustum. https://en.wikipedia.org/wiki/Spherical_coordinate_system for definitions of r, theta, and phi. https://en.wikipedia.org/wiki/Viewing_frustum for definition of a viewing frustum. Here, we use a conical shaped frustum (https://mathworld.wolfram.com/ConicalFrustum.html). Currently, only xyz format is supported. Args: points: [num_points, 3] float32 Tensor for 3d points in xyz format. center: [3, ] float32 Tensor for the frustum center in xyz format. r_distance: A float scalar sets the starting distance of a frustum. theta_width: A float scalar sets the theta width of a frustum. phi_width: A float scalar sets the phi width of a frustum. Returns: boolean Tensor of shape [num_points] indicating whether points are within the frustum. """ r, theta, phi = tf.unstack( spherical_coordinate_transform(points[:, :3]), axis=-1 ) _, center_theta, center_phi = tf.unstack( spherical_coordinate_transform(center[tf.newaxis, :]), axis=-1 ) theta_half_width = theta_width / 2.0 phi_half_width = phi_width / 2.0 # Points within theta and phi width and # further than r distance are selected. in_theta_width = (theta < (center_theta + theta_half_width)) & ( theta > (center_theta - theta_half_width) ) in_phi_width = (phi < (center_phi + phi_half_width)) & ( phi > (center_phi - phi_half_width) ) in_r_distance = r > r_distance return in_theta_width & in_phi_width & in_r_distance
keras-cv/keras_cv/point_cloud/point_cloud.py/0
{ "file_path": "keras-cv/keras_cv/point_cloud/point_cloud.py", "repo_id": "keras-cv", "token_count": 7871 }
88
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def normalize_tuple(value, n, name, allow_zero=False): """Transforms non-negative/positive integer/integers into an integer tuple. Args: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. allow_zero: Default to False. A ValueError will raised if zero is received and this param is False. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof or a negative value is passed. """ error_msg = ( f"The `{name}` argument must be a tuple of {n} " f"integers. Received: {value}" ) if isinstance(value, int): value_tuple = (value,) * n else: try: value_tuple = tuple(value) except TypeError: raise ValueError(error_msg) if len(value_tuple) != n: raise ValueError(error_msg) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): error_msg += ( f"including element {single_value} of " f"type {type(single_value)}" ) raise ValueError(error_msg) if allow_zero: unqualified_values = {v for v in value_tuple if v < 0} req_msg = ">= 0" else: unqualified_values = {v for v in value_tuple if v <= 0} req_msg = "> 0" if unqualified_values: error_msg += ( f" including {unqualified_values}" f" that does not satisfy the requirement `{req_msg}`." ) raise ValueError(error_msg) return value_tuple
keras-cv/keras_cv/utils/conv_utils.py/0
{ "file_path": "keras-cv/keras_cv/utils/conv_utils.py", "repo_id": "keras-cv", "token_count": 1010 }
89
#!/bin/bash rm -rf keras_cv.egg-info/ rm -rf keras_cv/**/__pycache__ rm -rf keras_cv/__pycache__ rm -rf build/
keras-cv/shell/clean.sh/0
{ "file_path": "keras-cv/shell/clean.sh", "repo_id": "keras-cv", "token_count": 52 }
90
# レイヤーについて 全てのKerasレイヤーは次のいくつかの共通したメソッドを持っています. - `layer.get_weights()`: レイヤーの重みをNumpy 配列のリストとして返す. - `layer.set_weights(weights)`: Numpy 配列(`get_weights`で得られる重みと同じshapeをもつ)のリストでレイヤーの重みをセットする. - `layer.get_config()`: レイヤーの設定をもつ辞書を返す.レイヤーは次のように,それ自身の設定から再インスタンス化できます: ```python layer = Dense(32) config = layer.get_config() reconstructed_layer = Dense.from_config(config) ``` あるいは, ```python from keras import layers config = layer.get_config() layer = layers.deserialize({'class_name': layer.__class__.__name__, 'config': config}) ``` もし,レイヤーがシングルノードを持つなら(i.e. もし共有レイヤーでないなら),入力テンソル,出力テンソル,入力のshape,出力のshapeを得ることができます: - `layer.input` - `layer.output` - `layer.input_shape` - `layer.output_shape` もし,レイヤーが複数ノードを持つなら,([the concept of layer node and shared layers](/getting-started/functional-api-guide/#the-concept-of-layer-node)をみてください),以下のメソッドが使えます. - `layer.get_input_at(node_index)` - `layer.get_output_at(node_index)` - `layer.get_input_shape_at(node_index)` - `layer.get_output_shape_at(node_index)`
keras-docs-ja/sources/layers/about-keras-layers.md/0
{ "file_path": "keras-docs-ja/sources/layers/about-keras-layers.md", "repo_id": "keras-docs-ja", "token_count": 688 }
91
# ModelクラスAPI functional APIでは,テンソルの入出力が与えられると,`Model`を以下のようにインスタンス化できます. ```python from keras.models import Model from keras.layers import Input, Dense a = Input(shape=(32,)) b = Dense(32)(a) model = Model(inputs=a, outputs=b) ``` このモデルは,`a`を入力として`b`を計算する際に必要となるあらゆる層を含むことになります. また,マルチ入力またはマルチ出力のモデルの場合は,リストを使うこともできます. ```python model = Model(inputs=[a1, a2], outputs=[b1, b2, b3]) ``` `Model`の詳しい解説は,[Keras functional API](/getting-started/functional-api-guide)をご覧ください. ## メソッド ### compile ```python compile(optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None) ``` 学習のためのモデルを設定します. __引数__ - __optimizer__: 文字列(optimizer名)またはoptimizerのオブジェクト.詳細は[optimizers](/optimizers)を参照してください. - __loss__: 文字列(目的関数名)または目的関数.詳細は[losses](/losses)を参照してください.モデルが複数の出力を持つ場合は,オブジェクトの辞書かリストを渡すことで,各出力に異なる損失を用いることができます.モデルによって最小化される損失値は全ての個々の損失の合計になります. - __metrics__: 訓練時とテスト時にモデルにより評価される評価関数のリスト.一般的には`metrics=['accuracy']`を使うことになります.マルチ出力モデルの各出力のための各評価関数を指定するために,`metrics={'output_a': 'accuracy'}`のような辞書を渡すこともできます. - __loss_weights__: 異なるモデルの出力における損失寄与度に重み付けをするためのスカラ係数(Pythonの浮動小数点数)を表すオプションのリスト,または辞書.モデルによって最小化される損失値は,`loss_weights`係数で重み付けされた個々の損失の*加重合計*です.リストの場合,モデルの出力と1:1対応している必要があります.テンソルの場合,出力の名前(文字列)がスカラー係数に対応している必要があります. - __sample_weight_mode__: タイムステップ毎にサンプルを重み付け(2次元の重み)する場合は,この値を`"temporal"`に設定してください.`None`はデフォルト値で,サンプル毎の重み(1次元の重み)です.モデルに複数の出力がある場合,モードとして辞書かリストを渡すことで,各出力に異なる`sample_weight_mode`を使うことができます. - __weighted_metrics__: 訓練やテストの際にsample_weightまたはclass_weightにより評価と重み付けされるメトリクスのリスト. - __target_tensors__: Kerasはデフォルトでモデルのターゲットためのプレースホルダを作成します.これは訓練中にターゲットデータが入力されるものです.代わりの自分のターゲットテンソルを利用したい場合(訓練時にKerasはこれらのターゲットに対して外部のNumpyデータを必要としません)は,それらを`target_tensors`引数で指定することができます.これは単一のテンソル(単一出力モデルの場合),テンソルのリスト,または出力名をターゲットのテンソルにマッピングした辞書になります. - __**kwargs__: バックエンドにTheano/CNTKを用いる時は,これら引数は`K.function`に渡されます.Tensorflowバックエンドの場合は`tf.Session.run`に渡されます. __Raises__ - __ValueError__: `optimizer`,`loss`,`metrics`,または`sample_weight_mode`に対して無効な引数が与えられた場合. ---- ### fit ```python fit(x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False) ``` 固定回数(データセットの反復)の試行でモデルを学習させます. __引数__ - __x__: モデルが単一の入力を持つ場合は訓練データのNumpy配列,もしくはモデルが複数の入力を持つ場合はNumpy配列のリスト.モデル内のあらゆる入力に名前を当てられている場合,入力の名前とNumpy配列をマップした辞書を渡すことも可能です.フレームワーク固有のテンソル(例えばTensorFlowデータテンソル)からフィードする場合は`x`を`None`にすることもできます. - __y__: モデルが単一の入力を持つ場合は教師(targets)データのNumpy配列,もしくはモデルが複数の出力を持つ場合はNumpy配列のリスト.モデル内のあらゆる出力が名前を当てられている場合,出力の名前とNumpy配列をマップした辞書を渡すことも可能です.フレームワーク固有のテンソル(例えばTensorFlowデータテンソル)からフィードする場合は`y`を`None`にすることもできます. - __batch_size__: 整数または`None`.勾配更新毎のサンプル数を示す整数.指定しなければ`batch_size`はデフォルトで32になります. - __epochs__: 整数.訓練データ配列の反復回数を示す整数.エポックは,提供される`x`および`y`データ全体の反復です. `initial_epoch`と組み合わせると,`epochs`は"最終エポック"として理解されることに注意してください.このモデルは`epochs`で与えられた反復回数だの訓練をするわけではなく,単に`epochs`という指標に試行が達するまで訓練します. - __verbose__: 整数.0,1,2のいずれか.進行状況の表示モード.0 = 表示なし,1 = プログレスバー,2 = 各試行毎に一行の出力. - __callbacks__: - __callbacks__: 訓練時に呼ばれる`keras.callbacks.Callback`インスタンスのリスト.詳細は[callbacks](/callbacks)を参照してください. - __validation_split__: 0から1の間の浮動小数点数.バリデーションデータとして使われる訓練データの割合.モデルはこの割合の訓練データを区別し,それらでは学習を行わず,各試行の終わりにこのデータにおける損失とモデル評価関数を評価します.このバリデーションデータは,シャッフルを行う前に,与えられた`x`と`y`のデータの後ろからサンプリングされます. - __validation_data__: 各試行の最後に損失とモデル評価関数を評価するために用いられる`(x_val, y_val)`のタプル,または`(val_x, val_y, val_sample_weights)`のタプル.モデルはこのデータで学習を行いません.`validation_data`は`validation_split`を上書きします. - __shuffle__: 真理値(訓練データを各試行の前にシャッフルするかどうか)または文字列('batch'の場合).'batch'はHDF5データの限界を扱うための特別なオプションです.バッチサイズのチャンクでシャッフルします.`steps_per_epoch`が`None`でない場合には効果がありません. - __class_weight__: クラスのインデックスと重み(浮動小数点数)をマップするオプションの辞書で,訓練時に各クラスのサンプルに関するモデルの損失に適用します.これは過小評価されたクラスのサンプルに「より注意を向ける」ようモデルに指示するために有用です. - __sample_weight__: オプションのNumpy配列で訓練サンプルの重みです.(訓練時のみ)損失関数への重み付けに用いられます.(重みとサンプルが1:1対応するように)入力サンプルと同じ長さの1次元Numpy配列を渡すこともできますし,時系列データの場合には,`(samples, sequence_length)`の形式の2次元配列を渡すことができ,各サンプルの各タイムステップに異なる重みを割り当てられます.この場合,`compile()`内で,`sample_weight_mode="temporal"`と指定するようにします. - __initial_epoch__: 整数.訓練を開始するエポック(前回の学習を再開するのに便利です). - __steps_per_epoch__: 整数または`None`.終了した1エポックを宣言して次のエポックを始めるまでのステップ数の合計(サンプルのバッチ).TensorFlowのデータテンソルのような入力テンソルを使用して訓練する場合,デフォルトの`None`はデータセットのサンプル数をバッチサイズで割ったものに等しくなります.それが決定できない場合は1になります. - __validation_steps__: `steps_per_epoch`を指定している場合のみ関係します.停止する前にバリデーションするステップの総数(サンプルのバッチ). - __validation_freq__: `validation_data`が指定されている場合のみ関係します. 型は整数,リスト,タプル,集合のいずれかです. 整数として与えられた場合, 評価が行われるまでの間に何エポックの学習を行うかを意味します. 例えば `validation_freq=2` の時, 評価は2エポック毎に行われます. リスト,タプル,集合として与えられた場合, 何エポック目の後に評価が行われるのかを意味します. 例えば `validation_freq=[1, 2, 10]` の時, 評価は1エポック目, 2エポック目, 10エポック目の後に行われます. - __max_queue_size__: 整数.ジェネレータのキューのための最大サイズ. 指定しなければ`max_queue_size`はデフォルトで10になります. - __workers__: 整数.ジェネレータ,もしくは`keras.utils.Sequence`が入力として与えられた場合のみ使用されます. スレッドベースのプロセス使用時の最大プロセス数.指定しなければ`workers`はデフォルトで1になります.もし0ならジェネレータはメインスレッドで実行されます. - __use_multiprocessing__: 真理値.`True`ならスレッドベースのプロセスを使います.指定しなければ`use_multiprocessing`はデフォルトでFalseになります.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をジェネレータに渡すべきではないことに注意してください. - __**kwargs__: バックエンドの互換性のために使用されます. __戻り値__ `History` オブジェクト.`History.history`属性は 実行に成功したエポックにおける訓練の損失値と評価関数値の記録と,(適用可能ならば)検証における損失値と評価関数値も記録しています. __Raises__ - __RuntimeError__: モデルがコンパイルされていない場合. - __ValueError__: 与えられた入力データとモデルが期待するものとが異なる場合. ---- ### evaluate ```python evaluate(x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False) ``` テストモードにおいて,モデルの損失値と評価値を返します. その計算はバッチ処理で行われます. __引数__ - __x__: モデルが単一の入力を持つ場合は訓練データのNumpy配列,もしくはモデルが複数の入力を持つ場合はNumpy配列のリスト.モデル内のあらゆる入力に名前を当てられている場合,入力の名前とNumpy配列をマップした辞書を渡すことも可能です.フレームワーク固有のテンソル(例えばTensorFlowデータテンソル)からフィードする場合は`x`を`None`にすることもできます. - __y__: モデルが単一の入力を持つ場合は教師(targets)データのNumpy配列,もしくはモデルが複数の出力を持つ場合はNumpy配列のリスト.モデル内のあらゆる出力が名前を当てられている場合,出力の名前とNumpy配列をマップした辞書を渡すことも可能です.フレームワーク固有のテンソル(例えばTensorFlowデータテンソル)からフィードする場合は`y`を`None`にすることもできます. - __batch_size__: 整数または`None`.勾配更新毎のサンプル数を示す整数.指定しなければ`batch_size`はデフォルトで32になります. - __verbose__: 0または1.進行状況の表示モード.0 = 表示なし,1 = プログレスバー. - __sample_weight__: オプションのNumpy配列で訓練サンプルの重みです.(訓練時のみ)損失関数への重み付けに用いられます.(重みとサンプルが1:1対応するように)入力サンプルと同じ長さの1次元Numpy配列を渡すこともできますし,時系列データの場合には,`(samples, sequence_length)`の形式の2次元配列を渡すことができ,各サンプルの各タイムステップに異なる重みを割り当てられます.この場合,`compile()`内で,`sample_weight_mode="temporal"`と指定するようにします. - __steps__: 整数または`None`.評価ラウンド終了を宣言するまでの総ステップ数(サンプルのバッチ).デフォルト値の`None`ならば無視されます. - __callbacks__: - __callbacks__: 評価時に呼ばれる`keras.callbacks.Callback`インスタンスのリスト. 詳細は[callbacks](/callbacks)を参照してください. - __max_queue_size__: 整数.ジェネレータのキューのための最大サイズ. 指定しなければ`max_queue_size`はデフォルトで10になります. - __workers__: 整数.ジェネレータ,もしくは`keras.utils.Sequence`が入力として与えられた場合のみ使用されます. スレッドベースのプロセス使用時の最大プロセス数.指定しなければ`workers`はデフォルトで1になります.もし0ならジェネレータはメインスレッドで実行されます. - __use_multiprocessing__: 真理値.`True`ならスレッドベースのプロセスを使います.指定しなければ`use_multiprocessing`はデフォルトでFalseになります.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をジェネレータに渡すべきではないことに注意してください. __戻り値__ テストの損失を表すスカラ値(モデルが単一の出力を持ち,かつ評価関数がない場合),またはスカラ値のリスト(モデルが複数の出力や評価関数を持つ場合).`model.metrics_names`属性はスカラ出力の表示ラベルを提示します. ---- ### predict ```python predict(x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False) ``` 入力サンプルに対する予測の出力を生成します. その計算はバッチ処理で行われます. __引数__ - __x__: Numpy配列の入力データ(もしくはモデルが複数の出力を持つ場合はNumpy配列のリスト). - __batch_size__: 整数値.指定しなければデフォルトで32になります. - __verbose__: 進行状況の表示モードで,0または1. - __steps__: 予測ラウンド終了を宣言するまでの総ステップ数(サンプルのバッチ).デフォルト値の`None`ならば無視されます. - __callbacks__: - __callbacks__: 予測時に呼ばれる`keras.callbacks.Callback`インスタンスのリスト. 詳細は[callbacks](/callbacks)を参照してください. - __max_queue_size__: 整数.ジェネレータのキューのための最大サイズ. 指定しなければ`max_queue_size`はデフォルトで10になります. - __workers__: 整数.ジェネレータ,もしくは`keras.utils.Sequence`が入力として与えられた場合のみ使用されます. スレッドベースのプロセス使用時の最大プロセス数.指定しなければ`workers`はデフォルトで1になります.もし0ならジェネレータはメインスレッドで実行されます. - __use_multiprocessing__: 真理値.`True`ならスレッドベースのプロセスを使います.指定しなければ`use_multiprocessing`はデフォルトでFalseになります.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をジェネレータに渡すべきではないことに注意してください. __戻り値__ 予測結果のNumpy配列. __Raises__ - __ValueError__: 与えられた入力データとモデルが期待するものが異なる場合,またはステートフルなモデルがバッチサイズの倍数でないサンプル数を受け取った場合. ---- ### train_on_batch ```python train_on_batch(x, y, sample_weight=None, class_weight=None, reset_metrics=True) ``` 単一バッチデータにつき一度の勾配更新を行います. __引数__ - __x__: モデルが単一の入力を持つ場合は訓練データのNumpy配列,もしくはモデルが複数の入力を持つ場合はNumpy配列のリスト.モデル内のあらゆる入力に名前を当てられている場合,入力の名前とNumpy配列をマップした辞書を渡すことも可能です. - __y__: モデルが単一の入力を持つ場合は教師(targets)データのNumpy配列,もしくはモデルが複数の出力を持つ場合はNumpy配列のリスト.モデル内のあらゆる出力が名前を当てられている場合,出力の名前とNumpy配列をマップした辞書を渡すことも可能です. - __sample_weight__: オプションのNumpy配列で訓練サンプルの重みです.(訓練時のみ)損失関数への重み付けに用いられます.(重みとサンプルが1:1対応するように)入力サンプルと同じ長さの1次元Numpy配列を渡すこともできますし,時系列データの場合には,`(samples, sequence_length)`の形式の2次元配列を渡すことができ,各サンプルの各タイムステップに異なる重みを割り当てられます.この場合,`compile()`内で,`sample_weight_mode="temporal"`と指定するようにします. - __class_weight__: クラスのインデックスと重み(浮動小数点数)をマップするオプションの辞書で,訓練時に各クラスのサンプルに関するモデルの損失に適用します.これは過小評価されたクラスのサンプルに「より注意を向ける」ようモデルに指示するために有用です. - __reset_metrics__: `True`の場合, 与えられたバッチに対する評価値のみが返されます. `False`の場合, 評価値はバッチをまたいで計算されます. __戻り値__ 学習の損失を表すスカラ値(モデルが単一の出力を持ち,かつ評価関数がない場合),またはスカラ値のリスト(モデルが複数の出力や評価関数を持つ場合).`model.metrics_names`属性はスカラ出力の表示ラベルを提示します. ---- ### test_on_batch ```python test_on_batch(x, y, sample_weight=None, reset_metrics=True) ``` サンプルの単一バッチでモデルをテストします. __引数__ - __x__: テストデータのNumpy配列,もしくはモデルが複数の入力を持つ場合はNumpy配列のリスト. モデル内のあらゆる入力が名前を当てられている場合,入力の名前とNumpy配列をマップした辞書を渡すことも可能です. - __y__: 教師データのNumpy配列,もしくはモデルが複数の出力を持つ場合はNumpy配列のリスト. モデル内のあらゆる出力が名前を当てられている場合,出力の名前とNumpy配列をマップした辞書を渡すことも可能です. - __sample_weight__: オプションのNumpy配列で訓練サンプルの重みです.(訓練時のみ)損失関数への重み付けに用いられます.(重みとサンプルが1:1対応するように)入力サンプルと同じ長さの1次元Numpy配列を渡すこともできますし,時系列データの場合には,`(samples, sequence_length)`の形式の2次元配列を渡すことができ,各サンプルの各タイムステップに異なる重みを割り当てられます.この場合,`compile()`内で,`sample_weight_mode="temporal"`と指定するようにします. - __reset_metrics__: `True`の場合, 与えられたバッチに対する評価値のみが返されます. `False`の場合, 評価値はバッチをまたいで計算されます. __戻り値__ テストの損失を表すスカラ値(モデルが単一の出力を持ち,かつ評価関数がない場合),またはスカラ値のリスト(モデルが複数の出力や評価関数を持つ場合).`model.metrics_names`属性はスカラ出力の表示ラベルを提示します. ---- ### predict_on_batch ```python predict_on_batch(x) ``` サンプルの単一バッチに関する予測を返します. __引数__ - __x__: 入力データ,Numpy配列. __戻り値__ 予測値を格納したNumpy配列. ---- ### fit_generator ```python fit_generator(generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0) ``` Pythonジェネレータ(または`Sequence`のインスタンス)によりバッチ毎に生成されたデータでモデルを訓練します. 本ジェネレータは効率性のためモデルに並列して実行されます.例えば,モデルをGPUで学習させながらCPU上で画像のリアルタイムデータ拡張を行うことができるようになります. `use_multiprocessing=True`のときに,`keras.utils.Sequence`を使うことで順序とエポックごとに全入力を1度だけ使用することを保証します. __引数__ - __generator__: ジェネレータかマルチプロセッシング時にデータの重複を防ぐための`Sequence`(`keras.utils.Sequence`)オブジェクトのインスタンス.本ジェネレータの出力は,以下のいずれかです. - `(inputs, targets)`のタプル. - `(inputs, targets, sample_weights)`のタプル.このタプル(単一出力のジェネレータ)は単一のバッチを作ります.つまり,このタプルにある全ての配列は全て同じ長さ(バッチサイズと等しい)でなければなりません.バッチによってサイズが異なる場合もあります.例えば,データセットのサイズがバッチサイズで割り切れない場合,一般的にエポックの最後のバッチはそれ以外よりも小さくなります.このジェネレータはデータが無限にループすることを期待します.`steps_per_epoch`数のサンプルがモデルに与えられると1度の試行が終了します. - __steps_per_epoch__: ある一つのエポックが終了し,次のエポックが始まる前に`generator`から使用する総ステップ数(サンプルのバッチ数).もし,データサイズをバッチサイズで割った時,通常ユニークなサンプル数に等しくなります.`Sequence`のオプション:指定されていない場合は,`len(generator)`をステップ数として使用します. - __epochs__: 整数.モデルを訓練させるエポック数. エポックは与えられたデータ全体の反復で,`steps_per_epoch`で定義されます. `initial_epoch`と組み合わせると,`epochs`は「最終エポック」として理解されることに注意してください.このモデルは`epochs`で与えられた反復回数の訓練をするわけではなく,単に`epochs`という指標に試行が達するまで訓練します. - __verbose__: 整数.0,1,2のいずれか.進行状況の表示モード.0 = 表示なし,1 = プログレスバー,2 = 各試行毎に一行の出力. - __callbacks__: - __callbacks__: 訓練時に呼ばれる`keras.callbacks.Callback`インスタンスのリスト.詳細は[callbacks](/callbacks)を参照してください. - __validation_data__: これは以下のいずれかです. - バリデーションデータ用のジェネレータ. - (inputs, targets)のタプル. - (inputs, targets, sample_weights)のタプル.各エポックの最後に損失関数やモデルの評価関数の評価に用いられます.このデータは学習には使われません. - __validation_steps__: `validation_data`がジェネレータの場合にのみ関係します.終了する前に`generator`から使用する総ステップ数(サンプルのバッチ数).`Sequence`のオプション:指定されていない場合は,`len(validation_data)`をステップ数として使用します. - __class_weight__: クラスインデックスと各クラスの重みをマップする辞書です. (訓練のときだけ)損失関数の重み付けに使われます. 過小評価されたクラスのサンプルに「より注意を向ける」場合に有用です. - __max_queue_size__: 整数.ジェネレータのキューのための最大サイズ. 指定しなければ`max_queue_size`はデフォルトで10になります. - __workers__: 整数.スレッドベースのプロセス使用時の最大プロセス数.指定しなければ`workers`はデフォルトで1になります.もし0ならジェネレータはメインスレッドで実行されます. - __use_multiprocessing__: 真理値.`True`ならスレッドベースのプロセスを使います.指定しなければ`use_multiprocessing`はデフォルトでFalseになります.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をジェネレータに渡すべきではないことに注意してください. - __shuffle__: 真理値.各試行の初めにバッチの順番をシャッフルするかどうか.`Sequence`(`keras.utils.Sequence`)の時のみ使用されます. - __initial_epoch__: 整数.学習を開始するエポック(前回の学習を再開するのに便利です). __戻り値__ `History`オブジェクト.`History.history` 属性は 実行に成功したエポックにおける訓練の損失値と評価関数値の記録と,(適用可能ならば)検証における損失値と評価関数値も記録しています. __Raises__ - __ValueError__: ジェネレータが無効なフォーマットのデータを使用した場合. __例__ ```python def generate_arrays_from_file(path): while True: with open(path) as f: for line in f: # create numpy arrays of input data # and labels, from each line in the file x1, x2, y = process_line(line) yield ({'input_1': x1, 'input_2': x2}, {'output': y}) model.fit_generator(generate_arrays_from_file('/my_file.txt'), steps_per_epoch=10000, epochs=10) ``` ---- ### evaluate_generator ```python evaluate_generator(generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0) ``` データジェネレータでモデルを評価します. ジェネレータは`test_on_batch`で受け取られたのと同じ種類のデータを返します. __引数__: - __generator__: ジェネレータは(inputs, targets)タプルもしくは(inputs, targets, sample_weights)タプルかマルチプロセッシング時にデータの重複を防ぐためのSequence (keras.utils.Sequence) オブジェクトのインスタンスを使用します. - __steps__: 終了する前に`generator`から使用する総ステップ数(サンプルのバッチ数).`Sequence`のオプション:指定されていない場合は,`len(generator)`をステップ数として使用します. - __max_queue_size__: ジェネレータのキューのための最大サイズ. - __workers__: 整数.スレッドベースのプロセス使用時の最大プロセス数.指定しなければ`workers`はデフォルトで1になります.もし0ならジェネレータはメインスレッドで実行されます. - __use_multiprocessing__: `True`ならスレッドベースのプロセスを使います.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をジェネレータに渡すべきではないことに注意してください. - __verbose__: 進行状況の表示モードで,0または1. __戻り値__ テストの損失を表すスカラ値(モデルが単一の出力を持ち,かつ評価関数がない場合),またはスカラ値のリスト(モデルが複数の出力や評価関数を持つ場合).`model.metrics_names`属性はスカラ出力の表示ラベルを提示します. __Raises__ - __ValueError__: ジェネレータが無効なフォーマットのデータを使用した場合. ---- ### predict_generator ```python predict_generator(generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0) ``` データジェネレータから得た入力サンプルに対する予測を生成します. ジェネレータは`predict_on_batch`が受け取るデータと同じ種類のデータを返します. __引数__ - __generator__: 入力サンプルのバッチかマルチプロセッシング時にデータの重複を防ぐためのSequence (keras.utils.Sequence) オブジェクトのインスタンスを生成するジェネレータ. - __steps__: 終了する前に`generator`から使用する総ステップ数(サンプルのバッチ数).`Sequence`のオプション:指定されていない場合は,`len(generator)`をステップ数として使用します. - __max_queue_size__: ジェネレータのキューの最大サイズ. - __workers__: 整数.スレッドベースのプロセス使用時の最大プロセス数.指定しなければ`workers`はデフォルトで1になります.もし0ならジェネレータはメインスレッドで実行されます. - __use_multiprocessing__: `True`ならスレッドベースのプロセスを使います.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をジェネレータに渡すべきではないことに注意してください. - __verbose__: 進行状況の表示モードで,0または1. __戻り値__ 予測値のNumpy配列. __Raises__ - __ValueError__: ジェネレータが無効なフォーマットのデータを使用した場合. ---- ### get_layer ```python get_layer(name=None, index=None) ``` (ユニークな)名前,またはインデックスに基づきレイヤーを探します. `name`と`index`の両方が与えられた場合,`index`が優先されます. インデックスはボトムアップの幅優先探索の順番に基づきます. __引数__ - __name__: レイヤーの名前を表す文字列. - __index__: レイヤーのインデックスを表す整数. __戻り値__ レイヤーのインスタンス. __Raises__ - __ValueError__: 無効なレイヤーの名前,またはインデックスの場合.
keras-docs-ja/sources/models/model.md/0
{ "file_path": "keras-docs-ja/sources/models/model.md", "repo_id": "keras-docs-ja", "token_count": 15709 }
92
# 케라스 함수형 API 시작하기 케라스 함수형 API는 다중 출력 모델<sub>multi-output model</sub>, 유향 비순환 그래프<sub>directed acyclic graphs</sub>, 혹은 층<sub>layer</sub>을 공유하는 모델과 같이 복잡한 모델을 정의하는 최적의 방법입니다. 이 가이드는 독자가 이미 `Sequential` 모델에 대한 배경 지식이 있다고 가정합니다. 간단한 예시로 시작합시다. ----- ## 첫 번째 예시: 완전 연결 신경망<sub>densely-connected network</sub> 완전 연결 신경망은 `Sequential` 모델로 만드는 것이 더 적합하지만, 간단한 예시를 위해 케라스 함수형 API로 구현해 보겠습니다. - 층 인스턴스는 텐서를 호출할 수 있고, 텐서를 반환합니다. - 입력<sub>input</sub> 텐서와 출력<sub>output</sub> 텐서을 통해 `Model`을 정의할 수 있습니다. - 이러한 모델은 케라스 `Sequential` 모델과 동일한 방식으로 학습됩니다. ```python from keras.layers import Input, Dense from keras.models import Model # 텐서를 반환합니다. inputs = Input(shape=(784,)) # 층 인스턴스는 텐서를 호출할 수 있는 객체이며, 텐서를 반환합니다. output_1 = Dense(64, activation='relu')(inputs) output_2 = Dense(64, activation='relu')(output_1) predictions = Dense(10, activation='softmax')(output_2) # 입력 층과 3개의 완전 연결 층을 # 포함하는 모델을 만듭니다. model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels) # 학습을 시작합니다. ``` ----- ## 모든 모델은 층 인스턴스처럼 호출할 수 있습니다. 함수형 API를 사용하면 학습된 모델을 재사용하기 편리합니다. 어느 모델이건 텐서를 호출하여 층 인스턴스처럼 사용할 수 있습니다. 모델을 호출하면 모델의 *구조*만 재사용하는 것이 아니라 가중치<sub>weights</sub>까지 재사용하게 됩니다. ```python x = Input(shape=(784,)) # 위에서 정의한 10-way softmax를 반환합니다. y = model(x) ``` 예를 들어, 시퀀스 데이터를 처리할 수 있는 모델을 빠르게 만들 수 있습니다. 코드 한 줄로 이미지 분류 모델을 비디오 분류 모델로 바꿀 수 있습니다. ```python from keras.layers import TimeDistributed # 입력 텐서: 20개의 시간 단계를 갖는 시퀀스. # 각 시간 단계는 784차원의 벡터입니다. input_sequences = Input(shape=(20, 784)) # 입력 시퀀스의 모든 시간 단계에 위에서 만든 `model`을 적용합니다. # `model`의 출력이 10-way softmax였으므로, # 아래에 주어진 층 인스턴스의 출력값은 10차원 벡터 20개로 이루어진 시퀀스입니다. processed_sequences = TimeDistributed(model)(input_sequences) ``` ----- ## 다중 입력<sub>multi-input</sub>과 다중 출력 모델 함수형 API는 다중 입력과 출력을 가지는 모델에 적합합니다. 함수형 API를 사용하면 대량의 복잡한 데이터 스트림을 간편하게 관리할 수 있습니다. 다음의 경우를 생각해봅시다. 트위터에서 특정 뉴스 헤드라인이 얼마나 많은 리트윗과 좋아요를 받을지 예측하려고 합니다. 모델에서 가장 중요한 입력값은 단어의 시퀀스로 표현된 헤드라인 자체입니다. 하지만 모델의 성능을 높이기 위해 헤드라인이 게시된 시간 등과 같은 데이터를 추가로 입력 받도록 합시다. 모델 학습에는 두 개의 손실 함수<sub>loss function</sub>가 사용됩니다. 층이 많은 모델에서는 주요 손실 함수를 모델의 초기 단계에 사용하는 것이 적절한 정규화<sub>regularization</sub> 방법입니다. 모델은 아래와 같이 구성되어있습니다. <img src="https://s3.amazonaws.com/keras.io/img/multi-input-multi-output-graph.png" alt="multi-input-multi-output-graph" style="width: 400px;"/> 함수형 API로 모델을 구현해 봅시다. 주요 입력으로 헤드라인이 전달됩니다. 헤드라인은 하나의 `int`가 단어 하나를 인코딩하는 `int` 시퀀스 형태입니다. `int`는 1에서 10,000사이의 값을 갖습니다(10,000 단어의 어휘목록). 하나의 `int` 시퀀스는 100개의 단어로 이루어져 있습니다. ```python from keras.layers import Input, Embedding, LSTM, Dense from keras.models import Model import numpy as np np.random.seed(0) # 시드값을 설정해서 항상 동일한 결과가 나오도록 합니다. # 주요 입력인 헤드라인: 1에서 10000 사이 값을 갖는 `int` 100개로 이루어진 시퀀스. # "name" 인자를 통해 층 인스턴스의 이름을 지정할 수 있습니다. main_input = Input(shape=(100,), dtype='int32', name='main_input') # `Embedding` 층은 입력 시퀀스를 # 512차원 완전 연결 벡터들의 시퀀스로 인코딩합니다. x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input) # LSTM은 벡터 시퀀스를 전체 시퀀스에 대한 # 정보를 포함하는 한 개의 벡터로 변환합니다. lstm_out = LSTM(32)(x) ``` 보조 손실 함수를 통해 모델에서 주요 손실 함수의 손실<sub>main loss</sub>이 크더라도 `LSTM` 및 `Embedding` 층을 원활하게 학습하도록 합니다. ```python auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out) ``` 이제, 보조 입력 데이터를 LSTM 출력에 이어붙여<sub>concatenate</sub> 모델에 전달합니다. ```python auxiliary_input = Input(shape=(5,), name='aux_input') x = keras.layers.concatenate([lstm_out, auxiliary_input]) # 깊은 완전 연결 신경망을 맨 위에 쌓습니다 x = Dense(64, activation='relu')(x) x = Dense(64, activation='relu')(x) x = Dense(64, activation='relu')(x) # 마지막으로 주요 로지스틱 회귀 층을 추가합니다 main_output = Dense(1, activation='sigmoid', name='main_output')(x) ``` 다음은 두 개의 입력과 두 개의 출력을 갖는 모델을 정의합니다. ```python model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output, auxiliary_output]) ``` 보조 손실의 가중치를 0.2로 설정하고 모델을 컴파일합니다. 리스트나 딕셔너리를 사용해서 각각의 출력에 서로 다른 `loss_weights` 혹은 `loss`를 지정할 수 있습니다. 여기서는 `loss` 인자<sub>argument</sub>가 한 개의 손실 함수이므로 모든 출력에 동일한 손실 함수를 사용합니다. ```python model.compile(optimizer='rmsprop', loss='binary_crossentropy', loss_weights=[1., 0.2]) ``` 입력 배열과 타겟 배열의 리스트를 전달하여 모델을 학습시킬 수 있습니다. ```python headline_data = np.round(np.abs(np.random.rand(12, 100) * 100)) additional_data = np.random.randn(12, 5) headline_labels = np.random.randn(12, 1) additional_labels = np.random.randn(12, 1) model.fit([headline_data, additional_data], [headline_labels, additional_labels], epochs=50, batch_size=32) ``` 입력 층 인스턴스와 출력 층 인스턴스에 이름을 지정했으므로(각 층 인스턴스의 `name` 인자를 통해 이름을 지정했습니다), 다음과 같은 방식으로도 모델을 컴파일 할 수 있습니다. ```python model.compile(optimizer='rmsprop', loss={'main_output': 'binary_crossentropy', 'aux_output': 'binary_crossentropy'}, loss_weights={'main_output': 1., 'aux_output': 0.2}) # 모델을 다음과 같은 방식으로도 학습시킬 수 있습니다. model.fit({'main_input': headline_data, 'aux_input': additional_data}, {'main_output': headline_labels, 'aux_output': additional_labels}, epochs=50, batch_size=32) ``` 다음 두 가지 방법을 통해 예측을 할 수 있습니다. ```python model.predict({'main_input': headline_data, 'aux_input': additional_data}) ``` ```python pred = model.predict([headline_data, additional_data]) ``` ----- ## 공유 층<sub>shared layers</sub> 공유 층을 사용하는 모델은 함수형 API가 유용한 경우 중 하나입니다. 공유 층에 대해서 알아봅시다. 트윗 데이터셋을 생각해봅시다. 서로 다른 두 개의 트윗을 동일한 사람이 작성했는지 구별하는 모델을 만들고자 합니다(예를 들어, 트윗의 유사성을 기준으로 사용자를 비교할 수 있습니다). 이 문제를 해결하는 한 가지 방법은 두 개의 트윗을 각각 벡터로 인코딩하고 두 벡터를 연결한 후 로지스틱 회귀를 수행하는 모델을 만드는 것입니다. 이 모델은 서로 다른 두 트윗을 같은 사람이 작성할 확률을 출력합니다. 그런 다음 모델은 긍정적인 트윗 쌍과 부정적인 트윗 쌍을 통해 학습됩니다. 문제가 대칭적이므로 긍정적인 트윗 쌍을 인코딩하는 메커니즘과 같은 방법으로 부정적인 트윗 쌍을 인코딩해야 합니다. 가중치 등 모든 값이 재사용됩니다. 이 예시에서는 공유된 LSTM 층을 사용해 트윗을 인코딩 합니다. 함수형 API로 모델을 만들어 봅시다. 트윗에 대한 입력으로 `(280, 256)` 형태의 이진 행렬(256 차원의 벡터 280개로 이루어진 시퀀스)을 받습니다(여기서 256 차원 벡터의 각 차원은 가장 빈번하게 사용되는 256개의 문자의 유무를 인코딩합니다). ```python import keras from keras.layers import Input, LSTM, Dense from keras.models import Model tweet_a = Input(shape=(280, 256)) tweet_b = Input(shape=(280, 256)) ``` 여러 종류의 입력에 동일한 층을 사용하려면, 층을 한 번 인스턴스화하고 필요한 만큼 층 인스턴스를 호출하면 됩니다. ```python # 이 층 인스턴스는 행렬을 입력 받아 # 크기가 64인 벡터를 반환합니다. shared_lstm = LSTM(64) # 동일한 층 인스턴스를 # 여러 번 재사용하는 경우, layer의 # 가중치 또한 재사용됩니다. # (그렇기에 이는 *동일한* 레이어입니다) encoded_a = shared_lstm(tweet_a) encoded_b = shared_lstm(tweet_b) # 이제 두 벡터를 이어붙입니다. merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=-1) # 로지스틱 회귀를 추가합니다. predictions = Dense(1, activation='sigmoid')(merged_vector) # 트윗을 입력받아 예측하는 # 학습 가능한 모델을 정의합니다. model = Model(inputs=[tweet_a, tweet_b], outputs=predictions) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) model.fit([data_a, data_b], labels, epochs=10) ``` 여기서 잠깐, 공유 층 인스턴스의 출력 또는 출력 형태<sub>shape</sub>에 대해 살펴봅시다. ----- ## "노드"의 개념 어떤 입력에 대해서 층 인스턴스를 호출하면, 새로운 텐서(층 인스턴스의 출력)가 생성됩니다. 또한 입력 텐서와 출력 텐서를 연결하는 "노드"가 층 인스턴스에 추가됩니다. 동일한 층 인스턴스를 여러 번 호출하면, 층 인스턴스는 0, 1, 2…와 같은 인덱스가 달린 여러 개의 노드를 갖게 됩니다. 케라스의 이전 버전에서는 `layer.get_output()`와 `layer.output_shape`를 통해 층 인스턴스의 출력 텐서와 출력 형태를 얻을 수 있었습니다. 여전히 이 방법을 사용할 수 있지만 층 인스턴스가 여러 개의 입력과 연결된 경우에는 어떻게 할 수 있을까요(단, `get_output()`은 `output`이라는 속성으로 대체되었습니다)? 층 인스턴스가 하나의 입력에만 연결되어 있으면 `.output`은 층 인스턴스의 단일 출력을 반환합니다. ```python a = Input(shape=(280, 256)) lstm = LSTM(32) encoded_a = lstm(a) assert lstm.output == encoded_a ``` 층 인스턴스가 여러 개의 입력과 연결된 경우에는 오류가 발생합니다. ```python a = Input(shape=(280, 256)) b = Input(shape=(280, 256)) lstm = LSTM(32) encoded_a = lstm(a) encoded_b = lstm(b) lstm.output ``` ``` >> AttributeError: Layer lstm_1 has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use `get_output_at(node_index)` instead. ``` 다음 코드는 제대로 작동합니다. ```python assert lstm.get_output_at(0) == encoded_a assert lstm.get_output_at(1) == encoded_b ``` 간단하죠? `input_shape`와 `output_shape`의 경우도 마찬가지입니다. 층 인스턴스가 하나의 노드를 갖거나 모든 노드가 동일한 입력/출력 형태를 가지는 경우에는 `layer output`/`input shape`가 자동으로 결정됩니다. `layer.output_shape`/`layer.input_shape`는 하나의 값을 갖습니다. 하지만 `Conv2D` 층을 `(32, 32, 3)` 형태의 입력에 대해서 호출한 다음 `(64, 64, 3)` 형태의 입력에 대해서도 호출하면, 층 인스턴스는 여러 개의 입력/출력 형태를 가지게 됩니다. 이 경우에는 노드의 인덱스를 지정해야 오류가 발생하지 않습니다. ```python a = Input(shape=(32, 32, 3)) b = Input(shape=(64, 64, 3)) conv = Conv2D(16, (3, 3), padding='same') conved_a = conv(a) # 현재는 입력이 하나이므로 아래의 코드에서 오류가 발생하지 않습니다. assert conv.input_shape == (None, 32, 32, 3) conved_b = conv(b) # 이제 `.input_shape`은 오류가 발생하지만, 다음의 코드에서는 오류가 발생하지 않습니다. assert conv.get_input_shape_at(0) == (None, 32, 32, 3) assert conv.get_input_shape_at(1) == (None, 64, 64, 3) ``` ----- ## 추가 예시 몇 가지 예시를 더 살펴봅시다. ### Inception 모듈 Inception 구조에 대해서 더 알고 싶다면, [Going Deeper with Convolutions](http://arxiv.org/abs/1409.4842)를 참고하십시오. ```python from keras.layers import Conv2D, MaxPooling2D, Input input_img = Input(shape=(256, 256, 3)) tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img) tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1) tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img) tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2) tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img) tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3) output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1) ``` ### 합성곱 층<sub>convolution layer</sub>에 대한 Residual connection Residual network에 대해 더 알고 싶다면, [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385)를 참고하십시오. ```python from keras.layers import Conv2D, Input # 3개의 채널을 가진 256x256 이미지에 대한 입력 텐서 x = Input(shape=(256, 256, 3)) # 입력 채널과 같은 3개의 출력 채널을 가지는 3x3 합성곱 y = Conv2D(3, (3, 3), padding='same')(x) # x + y를 반환합니다 z = keras.layers.add([x, y]) ``` ### 공유 시각 모델 이 모델은 동일한 이미지 처리 모듈을 두 개의 입력에 적용하여, 두 MNIST 숫자가 같은 숫자인지 판단합니다. ```python from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten from keras.models import Model # 우선, 시각 모듈을 정의합니다. digit_input = Input(shape=(27, 27, 1)) x = Conv2D(64, (3, 3))(digit_input) x = Conv2D(64, (3, 3))(x) x = MaxPooling2D((2, 2))(x) out = Flatten()(x) vision_model = Model(digit_input, out) # 숫자 분류 모델을 정의합니다. digit_a = Input(shape=(27, 27, 1)) digit_b = Input(shape=(27, 27, 1)) # 시각 모델은 가중치 등을 포함한 모든 것을 공유합니다. out_a = vision_model(digit_a) out_b = vision_model(digit_b) concatenated = keras.layers.concatenate([out_a, out_b]) out = Dense(1, activation='sigmoid')(concatenated) classification_model = Model([digit_a, digit_b], out) ``` ### 이미지에 대한 질문에 답변하는 모델 이 모델은 이미지에 대한 자연어 질문에 대해 한 단어짜리 정답을 제시합니다. 이 모델은 이미지를 벡터로 인코딩하여 이어붙이고 정답이 될 수 있는 단어 목록에 로지스틱 회귀를 학습합니다. ```python from keras.layers import Conv2D, MaxPooling2D, Flatten from keras.layers import Input, LSTM, Embedding, Dense from keras.models import Model, Sequential # 우선 Sequential 모델을 사용해서 이미지 모델을 정의합니다. # 다음 모델은 이미지를 벡터로 인코딩합니다. vision_model = Sequential() vision_model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(224, 224, 3))) vision_model.add(Conv2D(64, (3, 3), activation='relu')) vision_model.add(MaxPooling2D((2, 2))) vision_model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) vision_model.add(Conv2D(128, (3, 3), activation='relu')) vision_model.add(MaxPooling2D((2, 2))) vision_model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) vision_model.add(Conv2D(256, (3, 3), activation='relu')) vision_model.add(Conv2D(256, (3, 3), activation='relu')) vision_model.add(MaxPooling2D((2, 2))) vision_model.add(Flatten()) # 이미지 모델의 출력 텐서를 얻습니다. image_input = Input(shape=(224, 224, 3)) encoded_image = vision_model(image_input) # 다음은 질문을 벡터로 인코딩할 언어 모델을 정의합니다 # 질문은 최대 100 단어입니다. # 각 단어에 1에서 9999까지의 `int` 인덱스를 부여합니다. question_input = Input(shape=(100,), dtype='int32') embedded_question = Embedding(input_dim=10000, output_dim=256, input_length=100)(question_input) encoded_question = LSTM(256)(embedded_question) # 질문 벡터와 이미지 벡터를 이어붙입니다. merged = keras.layers.concatenate([encoded_question, encoded_image]) # 그 위에 답이 될 수 있는 1000개의 단어에 대해 로지스틱 회귀를 학습시킵니다. output = Dense(1000, activation='softmax')(merged) # 다음은 최종 모델입니다. vqa_model = Model(inputs=[image_input, question_input], outputs=output) # 다음 단계에서는 실제 데이터를 이용해 모델을 학습시켜봅시다. ``` ### 비디오에 대한 질문에 답변하는 모델 이미지에 대한 모델을 간단하게 비디오에 대한 모델로 바꿔봅시다. 적절한 학습을 통해서 모델에 짧은 비디오(예: 100 프레임으로 된 사람의 움직임 영상)를 보여주고 비디오에 대한 질문을 할 수 있습니다(예: "남자 아이는 무슨 스포츠를 하고 있니?" -> "축구"). ```python from keras.layers import TimeDistributed video_input = Input(shape=(100, 224, 224, 3)) # 기존에 학습된 vision_model로 비디오를 인코딩합니다(가중치를 재사용합니다.). encoded_frame_sequence = TimeDistributed(vision_model)(video_input) # 출력은 벡터의 시퀀스가 됩니다. encoded_video = LSTM(256)(encoded_frame_sequence) # 출력은 벡터입니다. # 다음은 이전 모델의 가중치를 재사용하여 만든 질문 인코더의 모델 표현입니다. question_encoder = Model(inputs=question_input, outputs=encoded_question) # 이 모델을 이용해서 질문을 인코딩해 봅시다. video_question_input = Input(shape=(100,), dtype='int32') encoded_video_question = question_encoder(video_question_input) # 다음은 비디오 문답 모델입니다. merged = keras.layers.concatenate([encoded_video, encoded_video_question]) output = Dense(1000, activation='softmax')(merged) video_qa_model = Model(inputs=[video_input, video_question_input], outputs=output) ```
keras-docs-ko/sources/getting-started/functional-api-guide.md/0
{ "file_path": "keras-docs-ko/sources/getting-started/functional-api-guide.md", "repo_id": "keras-docs-ko", "token_count": 12948 }
93
# 사용자 정의 케라스 층 만들기 가중치<sub>weight</sub>를 학습하지 않는 간단한 사용자 정의 연산이 필요한 경우에는 `layers.core.Lambda` 층을 사용하는 것이 좋습니다. 하지만 가중치 학습이 필요한 경우라면 직접 층을 만들어야 합니다. **케라스 2.0** 이후의 버전에서 사용자 정의 층을 만들기 위해서는 아래의 세 가지 메소드를 구현해야 합니다(만약 이전 버전을 사용하고 계신다면 업그레이드가 필요합니다). - `build(input_shape)`: 사용할 가중치를 정의하는 부분입니다. 이 메소드의 끝에서는 반드시 `super([Layer], self).build()`를 호출해 `self.built = True`를 지정해야 합니다. - `call(x)`: 해당 층에서 수행할 연산을 정의하는 부분입니다. 마스킹을 지원하는 층을 만드는 경우가 아니라면, 입력 텐서를 받는 첫 번째 인자 외에 다른 인자는 필요하지 않습니다. - `compute_output_shape(input_shape)`: 해당 층에서 수행하는 연산에 의해 입력의 형태가 수정된다면, 이 메소드에서 변형된 형태를 명시해주어야 합니다. 이는 다음 층이 자동으로 입력값의 형태를 추론할 수 있도록 합니다. ```python from keras import backend as K from keras.layers import Layer class MyLayer(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(MyLayer, self).__init__(**kwargs) def build(self, input_shape): # 층에서 사용될 학습 가능한 가중치를 정의합니다. self.kernel = self.add_weight(name='kernel', shape=(input_shape[1], self.output_dim), initializer='uniform', trainable=True) super(MyLayer, self).build(input_shape) # 끝에서 꼭 이 함수를 호출해야 합니다! def call(self, x): return K.dot(x, self.kernel) # 이 층이 실제로 수행할 연산입니다. def compute_output_shape(self, input_shape): return (input_shape[0], self.output_dim) ``` 여러개의 텐서를 입력받아 다시 여러개의 텐서를 출력하는 층을 정의할 수도 있습니다. 이를 위해선 `build(input_shape)`, `call(x)`,`compute_output_shape(input_shape)`의 입력과 출력이 리스트라고 가정해야 합니다. 다음은 위와 동일한 연산에 리스트 입출력을 적용한 예시입니다. ```python from keras import backend as K from keras.layers import Layer class MyLayer(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(MyLayer, self).__init__(**kwargs) def build(self, input_shape): # input_shape가 리스트 형식이 아닐 경우 오류를 출력합니다. assert isinstance(input_shape, list) # 층에서 사용될 학습 가능한 가중치를 정의합니다. self.kernel = self.add_weight(name='kernel', shape=(input_shape[0][1], self.output_dim), # shape에 입력 리스트의 0번째 텐서가 활용되었습니다. initializer='uniform', trainable=True) super(MyLayer, self).build(input_shape) # 끝에서 꼭 이 함수를 호출해야 합니다! def call(self, x): # 입력값이 리스트가 아닐 경우 오류를 출력합니다. assert isinstance(x, list) a, b = x # 두 개의 텐서를 가진 리스트 입력을 가정하고 각각을 a, b에 할당합니다. return [K.dot(a, self.kernel) + b, K.mean(b, axis=-1)] # 두 텐서를 이용하여 연산을 수행하고 리스트로 반환합니다. def compute_output_shape(self, input_shape): # input_shape가 리스트 형식이 아닐 경우 오류를 출력합니다. assert isinstance(input_shape, list) shape_a, shape_b = input_shape return [(shape_a[0], self.output_dim), shape_b[:-1]] # 출력 형태를 리스트 형식으로 반환합니다. ``` 케라스에서는 사용할 수 있는 모든 층에 대한 구현 방법이 예시로 준비되어 있습니다. 해당 층에 대한 소스 코드를 확인하세요!
keras-docs-ko/sources/layers/writing-your-own-keras-layers.md/0
{ "file_path": "keras-docs-ko/sources/layers/writing-your-own-keras-layers.md", "repo_id": "keras-docs-ko", "token_count": 2981 }
94
## 约束项的使用 `constraints` 模块的函数允许在优化期间对网络参数设置约束(例如非负性)。 约束是以层为对象进行的。具体的 API 因层而异,但 `Dense`,`Conv1D`,`Conv2D` 和 `Conv3D` 这些层具有统一的 API。 约束层开放 2 个关键字参数: - `kernel_constraint` 用于主权重矩阵。 - `bias_constraint` 用于偏置。 ```python from keras.constraints import max_norm model.add(Dense(64, kernel_constraint=max_norm(2.))) ``` ## 预定义的约束 <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L22)</span> ### MaxNorm ```python keras.constraints.MaxNorm(max_value=2, axis=0) ``` MaxNorm 最大范数权值约束。 映射到每个隐藏单元的权值的约束,使其具有小于或等于期望值的范数。 __参数__ - __max_value__: 输入权值的最大范数。 - __axis__: 整数,需要计算权值范数的轴。 例如,在 `Dense` 层中权值矩阵的尺寸为 `(input_dim, output_dim)`, 设置 `axis` 为 `0` 以约束每个长度为 `(input_dim,)` 的权值向量。 在 `Conv2D` 层(`data_format="channels_last"`)中,权值张量的尺寸为 `(rows, cols, input_depth, output_depth)`,设置 `axis` 为 `[0, 1, 2]` 以越是每个尺寸为 `(rows, cols, input_depth)` 的滤波器张量的权值。 __参考文献__ - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L61)</span> ### NonNeg ```python keras.constraints.NonNeg() ``` 权重非负的约束。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L69)</span> ### UnitNorm ```python keras.constraints.UnitNorm(axis=0) ``` 映射到每个隐藏单元的权值的约束,使其具有单位范数。 __参数__ - __axis__: 整数,需要计算权值范数的轴。 例如,在 `Dense` 层中权值矩阵的尺寸为 `(input_dim, output_dim)`, 设置 `axis` 为 `0` 以约束每个长度为 `(input_dim,)` 的权值向量。 在 `Conv2D` 层(`data_format="channels_last"`)中,权值张量的尺寸为 `(rows, cols, input_depth, output_depth)`,设置 `axis` 为 `[0, 1, 2]` 以越是每个尺寸为 `(rows, cols, input_depth)` 的滤波器张量的权值。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L98)</span> ### MinMaxNorm ```python keras.constraints.MinMaxNorm(min_value=0.0, max_value=1.0, rate=1.0, axis=0) ``` MinMaxNorm 最小/最大范数权值约束。 映射到每个隐藏单元的权值的约束,使其范数在上下界之间。 __参数__ - __min_value__: 输入权值的最小范数。 - __max_value__: 输入权值的最大范数。 - __rate__: 强制执行约束的比例:权值将被重新调整为 `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`。 实际上,这意味着 rate = 1.0 代表严格执行约束,而 rate <1.0 意味着权值 将在每一步重新调整以缓慢移动到所需间隔内的值。 - __axis__: 整数,需要计算权值范数的轴。 例如,在 `Dense` 层中权值矩阵的尺寸为 `(input_dim, output_dim)`, 设置 `axis` 为 `0` 以约束每个长度为 `(input_dim,)` 的权值向量。 在 `Conv2D` 层(`data_format="channels_last"`)中,权值张量的尺寸为 `(rows, cols, input_depth, output_depth)`,设置 `axis` 为 `[0, 1, 2]` 以越是每个尺寸为 `(rows, cols, input_depth)` 的滤波器张量的权值。 ---
keras-docs-zh/sources/constraints.md/0
{ "file_path": "keras-docs-zh/sources/constraints.md", "repo_id": "keras-docs-zh", "token_count": 2214 }
95
# 本示例演示了将 Convolution1D 用于文本分类。 2个轮次后达到 0.89 的测试精度。 </br> 在 Intel i5 2.4Ghz CPU 上每轮次 90秒。 </br> 在 Tesla K40 GPU 上每轮次 10秒。 ```python from __future__ import print_function from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.layers import Embedding from keras.layers import Conv1D, GlobalMaxPooling1D from keras.datasets import imdb # 设置参数: max_features = 5000 maxlen = 400 batch_size = 32 embedding_dims = 50 filters = 250 kernel_size = 3 hidden_dims = 250 epochs = 2 print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_test = sequence.pad_sequences(x_test, maxlen=maxlen) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('Build model...') model = Sequential() # 我们从有效的嵌入层开始,该层将 vocab 索引映射到 embedding_dims 维度 model.add(Embedding(max_features, embedding_dims, input_length=maxlen)) model.add(Dropout(0.2)) # 我们添加了一个 Convolution1D,它将学习大小为 filter_length 的过滤器词组过滤器: model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) # 我们使用最大池化: model.add(GlobalMaxPooling1D()) # We add a vanilla hidden layer: model.add(Dense(hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) # 我们投影到单个单位输出层上,并用 sigmoid 压扁它: model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test)) ```
keras-docs-zh/sources/examples/imdb_cnn.md/0
{ "file_path": "keras-docs-zh/sources/examples/imdb_cnn.md", "repo_id": "keras-docs-zh", "token_count": 1036 }
96
# 如何使用 sklearn 包装器的示例 在 MNIST 上构建简单的 CNN 模型,并使用 sklearn 的 GridSearchCV 查找最佳模型 ```python from __future__ import print_function import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.wrappers.scikit_learn import KerasClassifier from keras import backend as K from sklearn.model_selection import GridSearchCV num_classes = 10 # 输入图像尺寸 img_rows, img_cols = 28, 28 # 加载训练数据并进行基本数据归一化 (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # 将类向量转换为二进制类矩阵 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) def make_model(dense_layer_sizes, filters, kernel_size, pool_size): '''创建由 2 个卷积层和紧随其后的密集层组成的模型 dense_layer_sizes: 网络层大小列表。 此列表每一层都有一个数字。 filters: 每个卷积层中的卷积滤波器数量 kernel_size: 卷积核大小 pool_size: 最大共享池的大小 ''' model = Sequential() model.add(Conv2D(filters, kernel_size, padding='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(filters, kernel_size)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Flatten()) for layer_size in dense_layer_sizes: model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model dense_size_candidates = [[32], [64], [32, 32], [64, 64]] my_classifier = KerasClassifier(make_model, batch_size=32) validator = GridSearchCV(my_classifier, param_grid={'dense_layer_sizes': dense_size_candidates, # epochs 可用于调整,即使不是模型构建函数的参数 'epochs': [3, 6], 'filters': [8], 'kernel_size': [3], 'pool_size': [2]}, scoring='neg_log_loss', n_jobs=1) validator.fit(x_train, y_train) print('The parameters of the best model are: ') print(validator.best_params_) # validator.best_estimator_ 返回 sklearn-wrapped 版本的最佳模型 # validator.best_estimator_.model 返回非包装的 keras 模型 best_model = validator.best_estimator_.model metric_names = best_model.metrics_names metric_values = best_model.evaluate(x_test, y_test) for metric, value in zip(metric_names, metric_values): print(metric, ': ', value) ```
keras-docs-zh/sources/examples/mnist_sklearn_wrapper.md/0
{ "file_path": "keras-docs-zh/sources/examples/mnist_sklearn_wrapper.md", "repo_id": "keras-docs-zh", "token_count": 1836 }
97
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L69)</span> ### MaxPooling1D ```python keras.layers.MaxPooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last') ``` 对于时序数据的最大池化。 __参数__ - __pool_size__: 整数,最大池化的窗口大小。 - __strides__: 整数,或者是 `None`。作为缩小比例的因数。 例如,2 会使得输入张量缩小一半。 如果是 `None`,那么默认值是 `pool_size`。 - __padding__: `"valid"` 或者 `"same"` (区分大小写)。 - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 对应输入尺寸为 `(batch, steps, features)`, `channels_first` 对应输入尺寸为 `(batch, features, steps)`。 __输入尺寸__ - 如果 `data_format='channels_last'`, 输入为 3D 张量,尺寸为: `(batch_size, steps, features)` - 如果`data_format='channels_first'`, 输入为 3D 张量,尺寸为: `(batch_size, features, steps)` __输出尺寸__ - 如果 `data_format='channels_last'`, 输出为 3D 张量,尺寸为: `(batch_size, downsampled_steps, features)` - 如果 `data_format='channels_first'`, 输出为 3D 张量,尺寸为: `(batch_size, features, downsampled_steps)` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L217)</span> ### MaxPooling2D ```python keras.layers.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None) ``` 对于空间数据的最大池化。 __参数__ - __pool_size__: 整数,或者 2 个整数表示的元组, 沿(垂直,水平)方向缩小比例的因数。 (2,2)会把输入张量的两个维度都缩小一半。 如果只使用一个整数,那么两个维度都会使用同样的窗口长度。 - __strides__: 整数,2 个整数表示的元组,或者是 `None`。 表示步长值。 如果是 `None`,那么默认值是 `pool_size`。 - __padding__: `"valid"` 或者 `"same"` (区分大小写)。 - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 代表尺寸是 `(batch, height, width, channels)` 的输入张量, 而 `channels_first` 代表尺寸是 `(batch, channels, height, width)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, rows, cols, channels)` 的 4D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, rows, cols)` 的 4D 张量 __输出尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, pooled_rows, pooled_cols, channels)` 的 4D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, pooled_rows, pooled_cols)` 的 4D 张量 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L386)</span> ### MaxPooling3D ```python keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None) ``` 对于 3D(空间,或时空间)数据的最大池化。 __参数__ - __pool_size__: 3 个整数表示的元组,缩小(dim1,dim2,dim3)比例的因数。 (2, 2, 2) 会把 3D 输入张量的每个维度缩小一半。 - __strides__: 3 个整数表示的元组,或者是 `None`。步长值。 - __padding__: `"valid"` 或者 `"same"`(区分大小写)。 - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 代表尺寸是 `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的输入张量, 而 `channels_first` 代表尺寸是 `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的 5D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的 5D 张量 __输出尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` 的 5D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` 的 5D 张量 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L117)</span> ### AveragePooling1D ```python keras.layers.AveragePooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last') ``` 对于时序数据的平均池化。 __参数__ - __pool_size__: 整数,平均池化的窗口大小。 - __strides__: 整数,或者是 `None `。作为缩小比例的因数。 例如,2 会使得输入张量缩小一半。 如果是 `None`,那么默认值是 `pool_size`。 - __padding__: `"valid"` 或者 `"same"` (区分大小写)。 - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 对应输入尺寸为 `(batch, steps, features)`, `channels_first` 对应输入尺寸为 `(batch, features, steps)`。 __输入尺寸__ - 如果 `data_format='channels_last'`, 输入为 3D 张量,尺寸为: `(batch_size, steps, features)` - 如果`data_format='channels_first'`, 输入为 3D 张量,尺寸为: `(batch_size, features, steps)` __输出尺寸__ - 如果 `data_format='channels_last'`, 输出为 3D 张量,尺寸为: `(batch_size, downsampled_steps, features)` - 如果 `data_format='channels_first'`, 输出为 3D 张量,尺寸为: `(batch_size, features, downsampled_steps)` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L272)</span> ### AveragePooling2D ```python keras.layers.AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None) ``` 对于空间数据的平均池化。 __参数__ - __pool_size__: 整数,或者 2 个整数表示的元组, 沿(垂直,水平)方向缩小比例的因数。 (2,2)会把输入张量的两个维度都缩小一半。 如果只使用一个整数,那么两个维度都会使用同样的窗口长度。 - __strides__: 整数,2 个整数表示的元组,或者是 `None`。 表示步长值。 如果是 `None`,那么默认值是 `pool_size`。 - __padding__: `"valid"` 或者 `"same"` (区分大小写)。 - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 代表尺寸是 `(batch, height, width, channels)` 的输入张量, 而 `channels_first` 代表尺寸是 `(batch, channels, height, width)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, rows, cols, channels)` 的 4D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, rows, cols)` 的 4D 张量 __输出尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, pooled_rows, pooled_cols, channels)` 的 4D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, pooled_rows, pooled_cols)` 的 4D 张量 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L436)</span> ### AveragePooling3D ```python keras.layers.AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None) ``` 对于 3D (空间,或者时空间)数据的平均池化。 __参数__ - __pool_size__: 3 个整数表示的元组,缩小(dim1,dim2,dim3)比例的因数。 (2, 2, 2) 会把 3D 输入张量的每个维度缩小一半。 - __strides__: 3 个整数表示的元组,或者是 `None`。步长值。 - __padding__: `"valid"` 或者 `"same"`(区分大小写)。 - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 代表尺寸是 `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的输入张量, 而 `channels_first` 代表尺寸是 `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的 5D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的 5D 张量 __输出尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` 的 5D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)` 的 5D 张量 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L557)</span> ### GlobalMaxPooling1D ```python keras.layers.GlobalMaxPooling1D(data_format='channels_last') ``` 对于时序数据的全局最大池化。 __参数__ - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 对应输入尺寸为 `(batch, steps, features)`, `channels_first` 对应输入尺寸为 `(batch, features, steps)`。 __输入尺寸__ 尺寸是 `(batch_size, steps, features)` 的 3D 张量。 __输出尺寸__ 尺寸是 `(batch_size, features)` 的 2D 张量。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L455)</span> ### GlobalAveragePooling1D ```python keras.layers.GlobalAveragePooling1D() ``` 对于时序数据的全局平均池化。 __输入尺寸__ - 如果 `data_format='channels_last'`, 输入为 3D 张量,尺寸为: `(batch_size, steps, features)` - 如果`data_format='channels_first'`, 输入为 3D 张量,尺寸为: `(batch_size, features, steps)` __输出尺寸__ 尺寸是 `(batch_size, features)` 的 2D 张量。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L647)</span> ### GlobalMaxPooling2D ```python keras.layers.GlobalMaxPooling2D(data_format=None) ``` 对于空域数据的全局最大池化。 __参数__ - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 代表尺寸是 `(batch, height, width, channels)` 的输入张量, 而 `channels_first` 代表尺寸是 `(batch, channels, height, width)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, rows, cols, channels)` 的 4D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, rows, cols)` 的 4D 张量 __输出尺寸__ 尺寸是 `(batch_size, channels)` 的 2D 张量 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L510)</span> ### GlobalAveragePooling2D ```python keras.layers.GlobalAveragePooling2D(data_format=None) ``` 对于空域数据的全局平均池化。 __参数__ - __data_format__: 一个字符串,`channels_last` (默认值)或者 `channels_first`。 输入张量中的维度顺序。 `channels_last` 代表尺寸是 `(batch, height, width, channels)` 的输入张量,而 `channels_first` 代表尺寸是 `(batch, channels, height, width)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, rows, cols, channels)` 的 4D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, rows, cols)` 的 4D 张量 __输出尺寸__ 尺寸是 `(batch_size, channels)` 的 2D 张量 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L742)</span> ### GlobalMaxPooling3D ```python keras.layers.GlobalMaxPooling3D(data_format=None) ``` 对于 3D 数据的全局最大池化。 __参数__ - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 代表尺寸是 `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的输入张量, 而 `channels_first` 代表尺寸是 `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的 5D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的 5D 张量 __输出尺寸__ 尺寸是 `(batch_size, channels)` 的 2D 张量 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L707)</span> ### GlobalAveragePooling3D ```python keras.layers.GlobalAveragePooling3D(data_format=None) ``` 对于 3D 数据的全局平均池化。 __参数__ - __data_format__: 字符串,`channels_last` (默认)或 `channels_first` 之一。 表示输入各维度的顺序。 `channels_last` 代表尺寸是 `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的输入张量, 而 `channels_first` 代表尺寸是 `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的输入张量。 默认值根据 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值来设置。 如果还没有设置过,那么默认值就是 "channels_last"。 __输入尺寸__ - 如果 `data_format='channels_last'`: 尺寸是 `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` 的 5D 张量 - 如果 `data_format='channels_first'`: 尺寸是 `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` 的 5D 张量 __输出尺寸__ 尺寸是 `(batch_size, channels)` 的 2D 张量
keras-docs-zh/sources/layers/pooling.md/0
{ "file_path": "keras-docs-zh/sources/layers/pooling.md", "repo_id": "keras-docs-zh", "token_count": 8356 }
98
## 模型可视化 `keras.utils.vis_utils` 模块提供了一些绘制 Keras 模型的实用功能(使用 `graphviz`)。 以下实例,将绘制一张模型图,并保存为文件: ```python from keras.utils import plot_model plot_model(model, to_file='model.png') ``` `plot_model` 有 4 个可选参数: - `show_shapes` (默认为 False) 控制是否在图中输出各层的尺寸。 - `show_layer_names` (默认为 True) 控制是否在图中显示每一层的名字。 - `expand_dim`(默认为 False)控制是否将嵌套模型扩展为图形中的聚类。 - `dpi`(默认为 96)控制图像 dpi。 此外,你也可以直接取得 `pydot.Graph` 对象并自己渲染它。 例如,ipython notebook 中的可视化实例如下: ```python from IPython.display import SVG from keras.utils.vis_utils import model_to_dot SVG(model_to_dot(model).create(prog='dot', format='svg')) ``` ---- ## 训练历史可视化 Keras `Model` 上的 `fit()` 方法返回一个 `History` 对象。`History.history` 属性是一个记录了连续迭代的训练/验证(如果存在)损失值和评估值的字典。这里是一个简单的使用 `matplotlib` 来生成训练/验证集的损失和准确率图表的例子: ```python import matplotlib.pyplot as plt history = model.fit(x, y, validation_split=0.25, epochs=50, batch_size=16, verbose=1) # 绘制训练 & 验证的准确率值 plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # 绘制训练 & 验证的损失值 plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() ```
keras-docs-zh/sources/visualization.md/0
{ "file_path": "keras-docs-zh/sources/visualization.md", "repo_id": "keras-docs-zh", "token_count": 1017 }
99
# Audio Classification with Hugging Face Transformers **Author:** Sreyan Ghosh<br> **Date created:** 2022/07/01<br> **Last modified:** 2022/08/27<br> **Description:** Training Wav2Vec 2.0 using Hugging Face Transformers for Audio Classification. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/audio/ipynb/wav2vec2_audiocls.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/audio/wav2vec2_audiocls.py) --- ## Introduction Identification of speech commands, also known as *keyword spotting* (KWS), is important from an engineering perspective for a wide range of applications, from indexing audio databases and indexing keywords, to running speech models locally on microcontrollers. Currently, many human-computer interfaces (HCI) like Google Assistant, Microsoft Cortana, Amazon Alexa, Apple Siri and others rely on keyword spotting. There is a significant amount of research in the field by all major companies, notably Google and Baidu. In the past decade, deep learning has led to significant performance gains on this task. Though low-level audio features extracted from raw audio like MFCC or mel-filterbanks have been used for decades, the design of these low-level features are [flawed by biases](https://arxiv.org/abs/2101.08596). Moreover, deep learning models trained on these low-level features can easily overfit to noise or signals irrelevant to the task. This makes it is essential for any system to learn speech representations that make high-level information, such as acoustic and linguistic content, including phonemes, words, semantic meanings, tone, speaker characteristics from speech signals available to solve the downstream task. [Wav2Vec 2.0](https://arxiv.org/abs/2006.11477), which solves a self-supervised contrastive learning task to learn high-level speech representations, provides a great alternative to traditional low-level features for training deep learning models for KWS. In this notebook, we train the Wav2Vec 2.0 (base) model, built on the Hugging Face Transformers library, in an end-to-end fashion on the keyword spotting task and achieve state-of-the-art results on the Google Speech Commands Dataset. --- ## Setup ### Installing the requirements ```python pip install git+https://github.com/huggingface/transformers.git pip install datasets pip install huggingface-hub pip install joblib pip install librosa ``` ### Importing the necessary libraries ```python import random import logging import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # Only log error messages tf.get_logger().setLevel(logging.ERROR) # Set random seed tf.keras.utils.set_random_seed(42) ``` ### Define certain variables ```python # Maximum duration of the input audio file we feed to our Wav2Vec 2.0 model. MAX_DURATION = 1 # Sampling rate is the number of samples of audio recorded every second SAMPLING_RATE = 16000 BATCH_SIZE = 32 # Batch-size for training and evaluating our model. NUM_CLASSES = 10 # Number of classes our dataset will have (11 in our case). HIDDEN_DIM = 768 # Dimension of our model output (768 in case of Wav2Vec 2.0 - Base). MAX_SEQ_LENGTH = MAX_DURATION * SAMPLING_RATE # Maximum length of the input audio file. # Wav2Vec 2.0 results in an output frequency with a stride of about 20ms. MAX_FRAMES = 49 MAX_EPOCHS = 2 # Maximum number of training epochs. MODEL_CHECKPOINT = "facebook/wav2vec2-base" # Name of pretrained model from Hugging Face Model Hub ``` --- ## Load the Google Speech Commands Dataset We now download the [Google Speech Commands V1 Dataset](https://arxiv.org/abs/1804.03209), a popular benchmark for training and evaluating deep learning models built for solving the KWS task. The dataset consists of a total of 60,973 audio files, each of 1 second duration, divided into ten classes of keywords ("Yes", "No", "Up", "Down", "Left", "Right", "On", "Off", "Stop", and "Go"), a class for silence, and an unknown class to include the false positive. We load the dataset from [Hugging Face Datasets](https://github.com/huggingface/datasets). This can be easily done with the `load_dataset` function. ```python from datasets import load_dataset speech_commands_v1 = load_dataset("superb", "ks") ``` The dataset has the following fields: - **file**: the path to the raw .wav file of the audio - **audio**: the audio file sampled at 16kHz - **label**: label ID of the audio utterance ```python print(speech_commands_v1) ``` <div class="k-default-codeblock"> ``` DatasetDict({ train: Dataset({ features: ['file', 'audio', 'label'], num_rows: 51094 }) validation: Dataset({ features: ['file', 'audio', 'label'], num_rows: 6798 }) test: Dataset({ features: ['file', 'audio', 'label'], num_rows: 3081 }) }) ``` </div> --- ## Data Pre-processing For the sake of demonstrating the workflow, in this notebook we only take small stratified balanced splits (50%) of the train as our training and test sets. We can easily split the dataset using the `train_test_split` method which expects the split size and the name of the column relative to which you want to stratify. Post splitting the dataset, we remove the `unknown` and `silence` classes and only focus on the ten main classes. The `filter` method does that easily for you. Next we sample our train and test splits to a multiple of the `BATCH_SIZE` to facilitate smooth training and inference. You can achieve that using the `select` method which expects the indices of the samples you want to keep. Rest all are discarded. ```python speech_commands_v1 = speech_commands_v1["train"].train_test_split( train_size=0.5, test_size=0.5, stratify_by_column="label" ) speech_commands_v1 = speech_commands_v1.filter( lambda x: x["label"] != ( speech_commands_v1["train"].features["label"].names.index("_unknown_") and speech_commands_v1["train"].features["label"].names.index("_silence_") ) ) speech_commands_v1["train"] = speech_commands_v1["train"].select( [i for i in range((len(speech_commands_v1["train"]) // BATCH_SIZE) * BATCH_SIZE)] ) speech_commands_v1["test"] = speech_commands_v1["test"].select( [i for i in range((len(speech_commands_v1["test"]) // BATCH_SIZE) * BATCH_SIZE)] ) print(speech_commands_v1) ``` <div class="k-default-codeblock"> ``` DatasetDict({ train: Dataset({ features: ['file', 'audio', 'label'], num_rows: 896 }) test: Dataset({ features: ['file', 'audio', 'label'], num_rows: 896 }) }) ``` </div> Additionally, you can check the actual labels corresponding to each label ID. ```python labels = speech_commands_v1["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label print(id2label) ``` <div class="k-default-codeblock"> ``` {'0': 'yes', '1': 'no', '2': 'up', '3': 'down', '4': 'left', '5': 'right', '6': 'on', '7': 'off', '8': 'stop', '9': 'go', '10': '_silence_', '11': '_unknown_'} ``` </div> Before we can feed the audio utterance samples to our model, we need to pre-process them. This is done by a Hugging Face Transformers "Feature Extractor" which will (as the name indicates) re-sample your inputs to the sampling rate the model expects (in-case they exist with a different sampling rate), as well as generate the other inputs that model requires. To do all of this, we instantiate our `Feature Extractor` with the `AutoFeatureExtractor.from_pretrained`, which will ensure: We get a `Feature Extractor` that corresponds to the model architecture we want to use. We download the config that was used when pretraining this specific checkpoint. This will be cached so that it's not downloaded again the next time we run the cell. The `from_pretrained()` method expects the name of a model from the Hugging Face Hub. This is exactly similar to `MODEL_CHECKPOINT` and we just pass that. We write a simple function that helps us in the pre-processing that is compatible with Hugging Face Datasets. To summarize, our pre-processing function should: - Call the audio column to load and if necessary resample the audio file. - Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec 2.0 model card. - Set a maximum input length so longer inputs are batched without being truncated. ```python from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained( MODEL_CHECKPOINT, return_attention_mask=True ) def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=MAX_SEQ_LENGTH, truncation=True, padding=True, ) return inputs # This line with pre-process our speech_commands_v1 dataset. We also remove the "audio" # and "file" columns as they will be of no use to us while training. processed_speech_commands_v1 = speech_commands_v1.map( preprocess_function, remove_columns=["audio", "file"], batched=True ) # Load the whole dataset splits as a dict of numpy arrays train = processed_speech_commands_v1["train"].shuffle(seed=42).with_format("numpy")[:] test = processed_speech_commands_v1["test"].shuffle(seed=42).with_format("numpy")[:] ``` --- ## Defining the Wav2Vec 2.0 with Classification-Head We now define our model. To be precise, we define a Wav2Vec 2.0 model and add a Classification-Head on top to output a probability distribution of all classes for each input audio sample. Since the model might get complex we first define the Wav2Vec 2.0 model with Classification-Head as a Keras layer and then build the model using that. We instantiate our main Wav2Vec 2.0 model using the `TFWav2Vec2Model` class. This will instantiate a model which will output 768 or 1024 dimensional embeddings according to the config you choose (BASE or LARGE). The `from_pretrained()` additionally helps you load pre-trained weights from the Hugging Face Model Hub. It will download the pre-trained weights together with the config corresponding to the name of the model you have mentioned when calling the method. For our task, we choose the BASE variant of the model that has just been pre-trained, since we fine-tune over it. ```python from transformers import TFWav2Vec2Model def mean_pool(hidden_states, feature_lengths): attenion_mask = tf.sequence_mask( feature_lengths, maxlen=MAX_FRAMES, dtype=tf.dtypes.int64 ) padding_mask = tf.cast( tf.reverse(tf.cumsum(tf.reverse(attenion_mask, [-1]), -1), [-1]), dtype=tf.dtypes.bool, ) hidden_states = tf.where( tf.broadcast_to( tf.expand_dims(~padding_mask, -1), (BATCH_SIZE, MAX_FRAMES, HIDDEN_DIM) ), 0.0, hidden_states, ) pooled_state = tf.math.reduce_sum(hidden_states, axis=1) / tf.reshape( tf.math.reduce_sum(tf.cast(padding_mask, dtype=tf.dtypes.float32), axis=1), [-1, 1], ) return pooled_state class TFWav2Vec2ForAudioClassification(layers.Layer): """Combines the encoder and decoder into an end-to-end model for training.""" def __init__(self, model_checkpoint, num_classes): super().__init__() # Instantiate the Wav2Vec 2.0 model without the Classification-Head self.wav2vec2 = TFWav2Vec2Model.from_pretrained( model_checkpoint, apply_spec_augment=False, from_pt=True ) self.pooling = layers.GlobalAveragePooling1D() # Drop-out layer before the final Classification-Head self.intermediate_layer_dropout = layers.Dropout(0.5) # Classification-Head self.final_layer = layers.Dense(num_classes, activation="softmax") def call(self, inputs): # We take only the first output in the returned dictionary corresponding to the # output of the last layer of Wav2vec 2.0 hidden_states = self.wav2vec2(inputs["input_values"])[0] # If attention mask does exist then mean-pool only un-masked output frames if tf.is_tensor(inputs["attention_mask"]): # Get the length of each audio input by summing up the attention_mask # (attention_mask = (BATCH_SIZE x MAX_SEQ_LENGTH) ∈ {1,0}) audio_lengths = tf.cumsum(inputs["attention_mask"], -1)[:, -1] # Get the number of Wav2Vec 2.0 output frames for each corresponding audio input # length feature_lengths = self.wav2vec2.wav2vec2._get_feat_extract_output_lengths( audio_lengths ) pooled_state = mean_pool(hidden_states, feature_lengths) # If attention mask does not exist then mean-pool only all output frames else: pooled_state = self.pooling(hidden_states) intermediate_state = self.intermediate_layer_dropout(pooled_state) final_state = self.final_layer(intermediate_state) return final_state ``` --- ## Building and Compiling the model We now build and compile our model. We use the `SparseCategoricalCrossentropy` to train our model since it is a classification task. Following much of literature we evaluate our model on the `accuracy` metric. ```python def build_model(): # Model's input inputs = { "input_values": tf.keras.Input(shape=(MAX_SEQ_LENGTH,), dtype="float32"), "attention_mask": tf.keras.Input(shape=(MAX_SEQ_LENGTH,), dtype="int32"), } # Instantiate the Wav2Vec 2.0 model with Classification-Head using the desired # pre-trained checkpoint wav2vec2_model = TFWav2Vec2ForAudioClassification(MODEL_CHECKPOINT, NUM_CLASSES)( inputs ) # Model model = tf.keras.Model(inputs, wav2vec2_model) # Loss loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) # Optimizer optimizer = keras.optimizers.Adam(learning_rate=1e-5) # Compile and return model.compile(loss=loss, optimizer=optimizer, metrics=["accuracy"]) return model model = build_model() ``` --- ## Training the model Before we start training our model, we divide the inputs into its dependent and independent variables. ```python # Remove targets from training dictionaries train_x = {x: y for x, y in train.items() if x != "label"} test_x = {x: y for x, y in test.items() if x != "label"} ``` And now we can finally start training our model. ```python model.fit( train_x, train["label"], validation_data=(test_x, test["label"]), batch_size=BATCH_SIZE, epochs=MAX_EPOCHS, ) ``` <div class="k-default-codeblock"> ``` Epoch 1/2 28/28 [==============================] - 25s 338ms/step - loss: 2.3122 - accuracy: 0.1205 - val_loss: 2.2023 - val_accuracy: 0.2176 Epoch 2/2 28/28 [==============================] - 5s 189ms/step - loss: 2.0533 - accuracy: 0.2868 - val_loss: 1.8177 - val_accuracy: 0.5089 <keras.callbacks.History at 0x7fcee542dc50> ``` </div> Great! Now that we have trained our model, we predict the classes for audio samples in the test set using the `model.predict()` method! We see the model predictions are not that great as it has been trained on a very small number of samples for just 1 epoch. For best results, we recommend training on the complete dataset for at least 5 epochs! ```python preds = model.predict(test_x) ``` <div class="k-default-codeblock"> ``` 28/28 [==============================] - 4s 44ms/step ``` </div> Now we try to infer the model we trained on a randomly sampled audio file. We hear the audio file and then also see how well our model was able to predict! ```python import IPython.display as ipd rand_int = random.randint(0, len(test_x)) ipd.Audio(data=np.asarray(test_x["input_values"][rand_int]), autoplay=True, rate=16000) print("Original Label is ", id2label[str(test["label"][rand_int])]) print("Predicted Label is ", id2label[str(np.argmax((preds[rand_int])))]) ``` <div class="k-default-codeblock"> ``` Original Label is up Predicted Label is on ``` </div> Now you can push this model to Hugging Face Model Hub and also share it with all your friends, family, favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"`, for instance: ```python model.push_to_hub("wav2vec2-ks", organization="keras-io") tokenizer.push_to_hub("wav2vec2-ks", organization="keras-io") ``` And after you push your model this is how you can load it in the future! ```python from transformers import TFWav2Vec2Model model = TFWav2Vec2Model.from_pretrained("your-username/my-awesome-model", from_pt=True) ```
keras-io/examples/audio/md/wav2vec2_audiocls.md/0
{ "file_path": "keras-io/examples/audio/md/wav2vec2_audiocls.md", "repo_id": "keras-io", "token_count": 5769 }
100
""" Title: Data-efficient GANs with Adaptive Discriminator Augmentation Author: [András Béres](https://www.linkedin.com/in/andras-beres-789190210) Date created: 2021/10/28 Last modified: 2021/10/28 Description: Generating images from limited data using the Caltech Birds dataset. Accelerator: GPU """ """ ## Introduction ### GANs [Generative Adversarial Networks (GANs)](https://arxiv.org/abs/1406.2661) are a popular class of generative deep learning models, commonly used for image generation. They consist of a pair of dueling neural networks, called the discriminator and the generator. The discriminator's task is to distinguish real images from generated (fake) ones, while the generator network tries to fool the discriminator by generating more and more realistic images. If the generator is however too easy or too hard to fool, it might fail to provide useful learning signal for the generator, therefore training GANs is usually considered a difficult task. ### Data augmentation for GANS Data augmentation, a popular technique in deep learning, is the process of randomly applying semantics-preserving transformations to the input data to generate multiple realistic versions of it, thereby effectively multiplying the amount of training data available. The simplest example is left-right flipping an image, which preserves its contents while generating a second unique training sample. Data augmentation is commonly used in supervised learning to prevent overfitting and enhance generalization. The authors of [StyleGAN2-ADA](https://arxiv.org/abs/2006.06676) show that discriminator overfitting can be an issue in GANs, especially when only low amounts of training data is available. They propose Adaptive Discriminator Augmentation to mitigate this issue. Applying data augmentation to GANs however is not straightforward. Since the generator is updated using the discriminator's gradients, if the generated images are augmented, the augmentation pipeline has to be differentiable and also has to be GPU-compatible for computational efficiency. Luckily, the [Keras image augmentation layers](https://keras.io/api/layers/preprocessing_layers/image_augmentation/) fulfill both these requirements, and are therefore very well suited for this task. ### Invertible data augmentation A possible difficulty when using data augmentation in generative models is the issue of ["leaky augmentations" (section 2.2)](https://arxiv.org/abs/2006.06676), namely when the model generates images that are already augmented. This would mean that it was not able to separate the augmentation from the underlying data distribution, which can be caused by using non-invertible data transformations. For example, if either 0, 90, 180 or 270 degree rotations are performed with equal probability, the original orientation of the images is impossible to infer, and this information is destroyed. A simple trick to make data augmentations invertible is to only apply them with some probability. That way the original version of the images will be more common, and the data distribution can be inferred. By properly choosing this probability, one can effectively regularize the discriminator without making the augmentations leaky. """ """ ## Setup """ import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds from tensorflow import keras from tensorflow.keras import layers """ ## Hyperparameterers """ # data num_epochs = 10 # train for 400 epochs for good results image_size = 64 # resolution of Kernel Inception Distance measurement, see related section kid_image_size = 75 padding = 0.25 dataset_name = "caltech_birds2011" # adaptive discriminator augmentation max_translation = 0.125 max_rotation = 0.125 max_zoom = 0.25 target_accuracy = 0.85 integration_steps = 1000 # architecture noise_size = 64 depth = 4 width = 128 leaky_relu_slope = 0.2 dropout_rate = 0.4 # optimization batch_size = 128 learning_rate = 2e-4 beta_1 = 0.5 # not using the default value of 0.9 is important ema = 0.99 """ ## Data pipeline In this example, we will use the [Caltech Birds (2011)](https://www.tensorflow.org/datasets/catalog/caltech_birds2011) dataset for generating images of birds, which is a diverse natural dataset containing less then 6000 images for training. When working with such low amounts of data, one has to take extra care to retain as high data quality as possible. In this example, we use the provided bounding boxes of the birds to cut them out with square crops while preserving their aspect ratios when possible. """ def round_to_int(float_value): return tf.cast(tf.math.round(float_value), dtype=tf.int32) def preprocess_image(data): # unnormalize bounding box coordinates height = tf.cast(tf.shape(data["image"])[0], dtype=tf.float32) width = tf.cast(tf.shape(data["image"])[1], dtype=tf.float32) bounding_box = data["bbox"] * tf.stack([height, width, height, width]) # calculate center and length of longer side, add padding target_center_y = 0.5 * (bounding_box[0] + bounding_box[2]) target_center_x = 0.5 * (bounding_box[1] + bounding_box[3]) target_size = tf.maximum( (1.0 + padding) * (bounding_box[2] - bounding_box[0]), (1.0 + padding) * (bounding_box[3] - bounding_box[1]), ) # modify crop size to fit into image target_height = tf.reduce_min( [target_size, 2.0 * target_center_y, 2.0 * (height - target_center_y)] ) target_width = tf.reduce_min( [target_size, 2.0 * target_center_x, 2.0 * (width - target_center_x)] ) # crop image image = tf.image.crop_to_bounding_box( data["image"], offset_height=round_to_int(target_center_y - 0.5 * target_height), offset_width=round_to_int(target_center_x - 0.5 * target_width), target_height=round_to_int(target_height), target_width=round_to_int(target_width), ) # resize and clip # for image downsampling, area interpolation is the preferred method image = tf.image.resize( image, size=[image_size, image_size], method=tf.image.ResizeMethod.AREA ) return tf.clip_by_value(image / 255.0, 0.0, 1.0) def prepare_dataset(split): # the validation dataset is shuffled as well, because data order matters # for the KID calculation return ( tfds.load(dataset_name, split=split, shuffle_files=True) .map(preprocess_image, num_parallel_calls=tf.data.AUTOTUNE) .cache() .shuffle(10 * batch_size) .batch(batch_size, drop_remainder=True) .prefetch(buffer_size=tf.data.AUTOTUNE) ) train_dataset = prepare_dataset("train") val_dataset = prepare_dataset("test") """ After preprocessing the training images look like the following: ![birds dataset](https://i.imgur.com/Ru5HgBM.png) """ """ ## Kernel inception distance [Kernel Inception Distance (KID)](https://arxiv.org/abs/1801.01401) was proposed as a replacement for the popular [Frechet Inception Distance (FID)](https://arxiv.org/abs/1706.08500) metric for measuring image generation quality. Both metrics measure the difference in the generated and training distributions in the representation space of an [InceptionV3](https://keras.io/api/applications/inceptionv3/) network pretrained on [ImageNet](https://www.tensorflow.org/datasets/catalog/imagenet2012). According to the paper, KID was proposed because FID has no unbiased estimator, its expected value is higher when it is measured on fewer images. KID is more suitable for small datasets because its expected value does not depend on the number of samples it is measured on. In my experience it is also computationally lighter, numerically more stable, and simpler to implement because it can be estimated in a per-batch manner. In this example, the images are evaluated at the minimal possible resolution of the Inception network (75x75 instead of 299x299), and the metric is only measured on the validation set for computational efficiency. """ class KID(keras.metrics.Metric): def __init__(self, name="kid", **kwargs): super().__init__(name=name, **kwargs) # KID is estimated per batch and is averaged across batches self.kid_tracker = keras.metrics.Mean() # a pretrained InceptionV3 is used without its classification layer # transform the pixel values to the 0-255 range, then use the same # preprocessing as during pretraining self.encoder = keras.Sequential( [ layers.InputLayer(input_shape=(image_size, image_size, 3)), layers.Rescaling(255.0), layers.Resizing(height=kid_image_size, width=kid_image_size), layers.Lambda(keras.applications.inception_v3.preprocess_input), keras.applications.InceptionV3( include_top=False, input_shape=(kid_image_size, kid_image_size, 3), weights="imagenet", ), layers.GlobalAveragePooling2D(), ], name="inception_encoder", ) def polynomial_kernel(self, features_1, features_2): feature_dimensions = tf.cast(tf.shape(features_1)[1], dtype=tf.float32) return (features_1 @ tf.transpose(features_2) / feature_dimensions + 1.0) ** 3.0 def update_state(self, real_images, generated_images, sample_weight=None): real_features = self.encoder(real_images, training=False) generated_features = self.encoder(generated_images, training=False) # compute polynomial kernels using the two sets of features kernel_real = self.polynomial_kernel(real_features, real_features) kernel_generated = self.polynomial_kernel( generated_features, generated_features ) kernel_cross = self.polynomial_kernel(real_features, generated_features) # estimate the squared maximum mean discrepancy using the average kernel values batch_size = tf.shape(real_features)[0] batch_size_f = tf.cast(batch_size, dtype=tf.float32) mean_kernel_real = tf.reduce_sum(kernel_real * (1.0 - tf.eye(batch_size))) / ( batch_size_f * (batch_size_f - 1.0) ) mean_kernel_generated = tf.reduce_sum( kernel_generated * (1.0 - tf.eye(batch_size)) ) / (batch_size_f * (batch_size_f - 1.0)) mean_kernel_cross = tf.reduce_mean(kernel_cross) kid = mean_kernel_real + mean_kernel_generated - 2.0 * mean_kernel_cross # update the average KID estimate self.kid_tracker.update_state(kid) def result(self): return self.kid_tracker.result() def reset_state(self): self.kid_tracker.reset_state() """ ## Adaptive discriminator augmentation The authors of [StyleGAN2-ADA](https://arxiv.org/abs/2006.06676) propose to change the augmentation probability adaptively during training. Though it is explained differently in the paper, they use [integral control](https://en.wikipedia.org/wiki/PID_controller#Integral) on the augmentation probability to keep the discriminator's accuracy on real images close to a target value. Note, that their controlled variable is actually the average sign of the discriminator logits (r_t in the paper), which corresponds to 2 * accuracy - 1. This method requires two hyperparameters: 1. `target_accuracy`: the target value for the discriminator's accuracy on real images. I recommend selecting its value from the 80-90% range. 2. [`integration_steps`](https://en.wikipedia.org/wiki/PID_controller#Mathematical_form): the number of update steps required for an accuracy error of 100% to transform into an augmentation probability increase of 100%. To give an intuition, this defines how slowly the augmentation probability is changed. I recommend setting this to a relatively high value (1000 in this case) so that the augmentation strength is only adjusted slowly. The main motivation for this procedure is that the optimal value of the target accuracy is similar across different dataset sizes (see [figure 4 and 5 in the paper](https://arxiv.org/abs/2006.06676)), so it does not have to be re-tuned, because the process automatically applies stronger data augmentation when it is needed. """ # "hard sigmoid", useful for binary accuracy calculation from logits def step(values): # negative values -> 0.0, positive values -> 1.0 return 0.5 * (1.0 + tf.sign(values)) # augments images with a probability that is dynamically updated during training class AdaptiveAugmenter(keras.Model): def __init__(self): super().__init__() # stores the current probability of an image being augmented self.probability = tf.Variable(0.0) # the corresponding augmentation names from the paper are shown above each layer # the authors show (see figure 4), that the blitting and geometric augmentations # are the most helpful in the low-data regime self.augmenter = keras.Sequential( [ layers.InputLayer(input_shape=(image_size, image_size, 3)), # blitting/x-flip: layers.RandomFlip("horizontal"), # blitting/integer translation: layers.RandomTranslation( height_factor=max_translation, width_factor=max_translation, interpolation="nearest", ), # geometric/rotation: layers.RandomRotation(factor=max_rotation), # geometric/isotropic and anisotropic scaling: layers.RandomZoom( height_factor=(-max_zoom, 0.0), width_factor=(-max_zoom, 0.0) ), ], name="adaptive_augmenter", ) def call(self, images, training): if training: augmented_images = self.augmenter(images, training) # during training either the original or the augmented images are selected # based on self.probability augmentation_values = tf.random.uniform( shape=(batch_size, 1, 1, 1), minval=0.0, maxval=1.0 ) augmentation_bools = tf.math.less(augmentation_values, self.probability) images = tf.where(augmentation_bools, augmented_images, images) return images def update(self, real_logits): current_accuracy = tf.reduce_mean(step(real_logits)) # the augmentation probability is updated based on the discriminator's # accuracy on real images accuracy_error = current_accuracy - target_accuracy self.probability.assign( tf.clip_by_value( self.probability + accuracy_error / integration_steps, 0.0, 1.0 ) ) """ ## Network architecture Here we specify the architecture of the two networks: * generator: maps a random vector to an image, which should be as realistic as possible * discriminator: maps an image to a scalar score, which should be high for real and low for generated images GANs tend to be sensitive to the network architecture, I implemented a DCGAN architecture in this example, because it is relatively stable during training while being simple to implement. We use a constant number of filters throughout the network, use a sigmoid instead of tanh in the last layer of the generator, and use default initialization instead of random normal as further simplifications. As a good practice, we disable the learnable scale parameter in the batch normalization layers, because on one hand the following relu + convolutional layers make it redundant (as noted in the [documentation](https://keras.io/api/layers/normalization_layers/batch_normalization/)). But also because it should be disabled based on theory when using [spectral normalization (section 4.1)](https://arxiv.org/abs/1802.05957), which is not used here, but is common in GANs. We also disable the bias in the fully connected and convolutional layers, because the following batch normalization makes it redundant. """ # DCGAN generator def get_generator(): noise_input = keras.Input(shape=(noise_size,)) x = layers.Dense(4 * 4 * width, use_bias=False)(noise_input) x = layers.BatchNormalization(scale=False)(x) x = layers.ReLU()(x) x = layers.Reshape(target_shape=(4, 4, width))(x) for _ in range(depth - 1): x = layers.Conv2DTranspose( width, kernel_size=4, strides=2, padding="same", use_bias=False, )(x) x = layers.BatchNormalization(scale=False)(x) x = layers.ReLU()(x) image_output = layers.Conv2DTranspose( 3, kernel_size=4, strides=2, padding="same", activation="sigmoid", )(x) return keras.Model(noise_input, image_output, name="generator") # DCGAN discriminator def get_discriminator(): image_input = keras.Input(shape=(image_size, image_size, 3)) x = image_input for _ in range(depth): x = layers.Conv2D( width, kernel_size=4, strides=2, padding="same", use_bias=False, )(x) x = layers.BatchNormalization(scale=False)(x) x = layers.LeakyReLU(alpha=leaky_relu_slope)(x) x = layers.Flatten()(x) x = layers.Dropout(dropout_rate)(x) output_score = layers.Dense(1)(x) return keras.Model(image_input, output_score, name="discriminator") """ ## GAN model """ class GAN_ADA(keras.Model): def __init__(self): super().__init__() self.augmenter = AdaptiveAugmenter() self.generator = get_generator() self.ema_generator = keras.models.clone_model(self.generator) self.discriminator = get_discriminator() self.generator.summary() self.discriminator.summary() def compile(self, generator_optimizer, discriminator_optimizer, **kwargs): super().compile(**kwargs) # separate optimizers for the two networks self.generator_optimizer = generator_optimizer self.discriminator_optimizer = discriminator_optimizer self.generator_loss_tracker = keras.metrics.Mean(name="g_loss") self.discriminator_loss_tracker = keras.metrics.Mean(name="d_loss") self.real_accuracy = keras.metrics.BinaryAccuracy(name="real_acc") self.generated_accuracy = keras.metrics.BinaryAccuracy(name="gen_acc") self.augmentation_probability_tracker = keras.metrics.Mean(name="aug_p") self.kid = KID() @property def metrics(self): return [ self.generator_loss_tracker, self.discriminator_loss_tracker, self.real_accuracy, self.generated_accuracy, self.augmentation_probability_tracker, self.kid, ] def generate(self, batch_size, training): latent_samples = tf.random.normal(shape=(batch_size, noise_size)) # use ema_generator during inference if training: generated_images = self.generator(latent_samples, training) else: generated_images = self.ema_generator(latent_samples, training) return generated_images def adversarial_loss(self, real_logits, generated_logits): # this is usually called the non-saturating GAN loss real_labels = tf.ones(shape=(batch_size, 1)) generated_labels = tf.zeros(shape=(batch_size, 1)) # the generator tries to produce images that the discriminator considers as real generator_loss = keras.losses.binary_crossentropy( real_labels, generated_logits, from_logits=True ) # the discriminator tries to determine if images are real or generated discriminator_loss = keras.losses.binary_crossentropy( tf.concat([real_labels, generated_labels], axis=0), tf.concat([real_logits, generated_logits], axis=0), from_logits=True, ) return tf.reduce_mean(generator_loss), tf.reduce_mean(discriminator_loss) def train_step(self, real_images): real_images = self.augmenter(real_images, training=True) # use persistent gradient tape because gradients will be calculated twice with tf.GradientTape(persistent=True) as tape: generated_images = self.generate(batch_size, training=True) # gradient is calculated through the image augmentation generated_images = self.augmenter(generated_images, training=True) # separate forward passes for the real and generated images, meaning # that batch normalization is applied separately real_logits = self.discriminator(real_images, training=True) generated_logits = self.discriminator(generated_images, training=True) generator_loss, discriminator_loss = self.adversarial_loss( real_logits, generated_logits ) # calculate gradients and update weights generator_gradients = tape.gradient( generator_loss, self.generator.trainable_weights ) discriminator_gradients = tape.gradient( discriminator_loss, self.discriminator.trainable_weights ) self.generator_optimizer.apply_gradients( zip(generator_gradients, self.generator.trainable_weights) ) self.discriminator_optimizer.apply_gradients( zip(discriminator_gradients, self.discriminator.trainable_weights) ) # update the augmentation probability based on the discriminator's performance self.augmenter.update(real_logits) self.generator_loss_tracker.update_state(generator_loss) self.discriminator_loss_tracker.update_state(discriminator_loss) self.real_accuracy.update_state(1.0, step(real_logits)) self.generated_accuracy.update_state(0.0, step(generated_logits)) self.augmentation_probability_tracker.update_state(self.augmenter.probability) # track the exponential moving average of the generator's weights to decrease # variance in the generation quality for weight, ema_weight in zip( self.generator.weights, self.ema_generator.weights ): ema_weight.assign(ema * ema_weight + (1 - ema) * weight) # KID is not measured during the training phase for computational efficiency return {m.name: m.result() for m in self.metrics[:-1]} def test_step(self, real_images): generated_images = self.generate(batch_size, training=False) self.kid.update_state(real_images, generated_images) # only KID is measured during the evaluation phase for computational efficiency return {self.kid.name: self.kid.result()} def plot_images(self, epoch=None, logs=None, num_rows=3, num_cols=6, interval=5): # plot random generated images for visual evaluation of generation quality if epoch is None or (epoch + 1) % interval == 0: num_images = num_rows * num_cols generated_images = self.generate(num_images, training=False) plt.figure(figsize=(num_cols * 2.0, num_rows * 2.0)) for row in range(num_rows): for col in range(num_cols): index = row * num_cols + col plt.subplot(num_rows, num_cols, index + 1) plt.imshow(generated_images[index]) plt.axis("off") plt.tight_layout() plt.show() plt.close() """ ## Training One can should see from the metrics during training, that if the real accuracy (discriminator's accuracy on real images) is below the target accuracy, the augmentation probability is increased, and vice versa. In my experience, during a healthy GAN training, the discriminator accuracy should stay in the 80-95% range. Below that, the discriminator is too weak, above that it is too strong. Note that we track the exponential moving average of the generator's weights, and use that for image generation and KID evaluation. """ # create and compile the model model = GAN_ADA() model.compile( generator_optimizer=keras.optimizers.Adam(learning_rate, beta_1), discriminator_optimizer=keras.optimizers.Adam(learning_rate, beta_1), ) # save the best model based on the validation KID metric checkpoint_path = "gan_model" checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_path, save_weights_only=True, monitor="val_kid", mode="min", save_best_only=True, ) # run training and plot generated images periodically model.fit( train_dataset, epochs=num_epochs, validation_data=val_dataset, callbacks=[ keras.callbacks.LambdaCallback(on_epoch_end=model.plot_images), checkpoint_callback, ], ) """ ## Inference """ # load the best model and generate images model.load_weights(checkpoint_path) model.plot_images() """ ## Results By running the training for 400 epochs (which takes 2-3 hours in a Colab notebook), one can get high quality image generations using this code example. The evolution of a random batch of images over a 400 epoch training (ema=0.999 for animation smoothness): ![birds evolution gif](https://i.imgur.com/ecGuCcz.gif) Latent-space interpolation between a batch of selected images: ![birds interpolation gif](https://i.imgur.com/nGvzlsC.gif) I also recommend trying out training on other datasets, such as [CelebA](https://www.tensorflow.org/datasets/catalog/celeb_a) for example. In my experience good results can be achieved without changing any hyperparameters (though discriminator augmentation might not be necessary). """ """ ## GAN tips and tricks My goal with this example was to find a good tradeoff between ease of implementation and generation quality for GANs. During preparation, I have run numerous ablations using [this repository](https://github.com/beresandras/gan-flavours-keras). In this section I list the lessons learned and my recommendations in my subjective order of importance. I recommend checking out the [DCGAN paper](https://arxiv.org/abs/1511.06434), this [NeurIPS talk](https://www.youtube.com/watch?v=myGAju4L7O8), and this [large scale GAN study](https://arxiv.org/abs/1711.10337) for others' takes on this subject. ### Architectural tips * **resolution**: Training GANs at higher resolutions tends to get more difficult, I recommend experimenting at 32x32 or 64x64 resolutions initially. * **initialization**: If you see strong colorful patterns early on in the training, the initialization might be the issue. Set the kernel_initializer parameters of layers to [random normal](https://keras.io/api/layers/initializers/#randomnormal-class), and decrease the standard deviation (recommended value: 0.02, following DCGAN) until the issue disappears. * **upsampling**: There are two main methods for upsampling in the generator. [Transposed convolution](https://keras.io/api/layers/convolution_layers/convolution2d_transpose/) is faster, but can lead to [checkerboard artifacts](https://distill.pub/2016/deconv-checkerboard/), which can be reduced by using a kernel size that is divisible with the stride (recommended kernel size is 4 for a stride of 2). [Upsampling](https://keras.io/api/layers/reshaping_layers/up_sampling2d/) + [standard convolution](https://keras.io/api/layers/convolution_layers/convolution2d/) can have slightly lower quality, but checkerboard artifacts are not an issue. I recommend using nearest-neighbor interpolation over bilinear for it. * **batch normalization in discriminator**: Sometimes has a high impact, I recommend trying out both ways. * **[spectral normalization](https://www.tensorflow.org/addons/api_docs/python/tfa/layers/SpectralNormalization)**: A popular technique for training GANs, can help with stability. I recommend disabling batch normalization's learnable scale parameters along with it. * **[residual connections](https://keras.io/guides/functional_api/#a-toy-resnet-model)**: While residual discriminators behave similarly, residual generators are more difficult to train in my experience. They are however necessary for training large and deep architectures. I recommend starting with non-residual architectures. * **dropout**: Using dropout before the last layer of the discriminator improves generation quality in my experience. Recommended dropout rate is below 0.5. * **[leaky ReLU](https://keras.io/api/layers/activation_layers/leaky_relu/)**: Use leaky ReLU activations in the discriminator to make its gradients less sparse. Recommended slope/alpha is 0.2 following DCGAN. ### Algorithmic tips * **loss functions**: Numerous losses have been proposed over the years for training GANs, promising improved performance and stability. I have implemented 5 of them in [this repository](https://github.com/beresandras/gan-flavours-keras), and my experience is in line with [this GAN study](https://arxiv.org/abs/1711.10337): no loss seems to consistently outperform the default non-saturating GAN loss. I recommend using that as a default. * **Adam's beta_1 parameter**: The beta_1 parameter in Adam can be interpreted as the momentum of mean gradient estimation. Using 0.5 or even 0.0 instead of the default 0.9 value was proposed in DCGAN and is important. This example would not work using its default value. * **separate batch normalization for generated and real images**: The forward pass of the discriminator should be separate for the generated and real images. Doing otherwise can lead to artifacts (45 degree stripes in my case) and decreased performance. * **exponential moving average of generator's weights**: This helps to reduce the variance of the KID measurement, and helps in averaging out the rapid color palette changes during training. * **[different learning rate for generator and discriminator](https://arxiv.org/abs/1706.08500)**: If one has the resources, it can help to tune the learning rates of the two networks separately. A similar idea is to update either network's (usually the discriminator's) weights multiple times for each of the other network's updates. I recommend using the same learning rate of 2e-4 (Adam), following DCGAN for both networks, and only updating both of them once as a default. * **label noise**: [One-sided label smoothing](https://arxiv.org/abs/1606.03498) (using less than 1.0 for real labels), or adding noise to the labels can regularize the discriminator not to get overconfident, however in my case they did not improve performance. * **adaptive data augmentation**: Since it adds another dynamic component to the training process, disable it as a default, and only enable it when the other components already work well. """ """ ## Related works Other GAN-related Keras code examples: * [DCGAN + CelebA](https://keras.io/examples/generative/dcgan_overriding_train_step/) * [WGAN + FashionMNIST](https://keras.io/examples/generative/wgan_gp/) * [WGAN + Molecules](https://keras.io/examples/generative/wgan-graphs/) * [ConditionalGAN + MNIST](https://keras.io/examples/generative/conditional_gan/) * [CycleGAN + Horse2Zebra](https://keras.io/examples/generative/cyclegan/) * [StyleGAN](https://keras.io/examples/generative/stylegan/) Modern GAN architecture-lines: * [SAGAN](https://arxiv.org/abs/1805.08318), [BigGAN](https://arxiv.org/abs/1809.11096) * [ProgressiveGAN](https://arxiv.org/abs/1710.10196), [StyleGAN](https://arxiv.org/abs/1812.04948), [StyleGAN2](https://arxiv.org/abs/1912.04958), [StyleGAN2-ADA](https://arxiv.org/abs/2006.06676), [AliasFreeGAN](https://arxiv.org/abs/2106.12423) Concurrent papers on discriminator data augmentation: [1](https://arxiv.org/abs/2006.02595), [2](https://arxiv.org/abs/2006.05338), [3](https://arxiv.org/abs/2006.10738) Recent literature overview on GANs: [talk](https://www.youtube.com/watch?v=3ktD752xq5k) """
keras-io/examples/generative/gan_ada.py/0
{ "file_path": "keras-io/examples/generative/gan_ada.py", "repo_id": "keras-io", "token_count": 11149 }
101
<jupyter_start><jupyter_text>WGAN-GP with R-GCN for the generation of small molecular graphs**Author:** [akensert](https://github.com/akensert)**Date created:** 2021/06/30**Last modified:** 2021/06/30**Description:** Complete implementation of WGAN-GP with R-GCN to generate novel molecules. IntroductionIn this tutorial, we implement a generative model for graphs and use it to generatenovel molecules.Motivation: The [development of new drugs](https://en.wikipedia.org/wiki/Drug_development)(molecules) can be extremely time-consuming and costly. The use of deep learning modelscan alleviate the search for good candidate drugs, by predicting properties of known molecules(e.g., solubility, toxicity, affinity to target protein, etc.). As the number ofpossible molecules is astronomical, the space in which we search for/explore molecules isjust a fraction of the entire space. Therefore, it's arguably desirable to implementgenerative models that can learn to generate novel molecules (which would otherwise have never been explored). References (implementation)The implementation in this tutorial is based on/inspired by the[MolGAN paper](https://arxiv.org/abs/1805.11973) and DeepChem's[Basic MolGAN](https://deepchem.readthedocs.io/en/latest/api_reference/models.htmlbasicmolganmodel). Further reading (generative models)Recent implementations of generative models for molecular graphs also include[Mol-CycleGAN](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0404-1),[GraphVAE](https://arxiv.org/abs/1802.03480) and[JT-VAE](https://arxiv.org/abs/1802.04364). For more information on generativeadverserial networks, see [GAN](https://arxiv.org/abs/1406.2661),[WGAN](https://arxiv.org/abs/1701.07875) and [WGAN-GP](https://arxiv.org/abs/1704.00028). Setup Install RDKit[RDKit](https://www.rdkit.org/) is a collection of cheminformatics and machine-learningsoftware written in C++ and Python. In this tutorial, RDKit is used to conveniently andefficiently transform[SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system) tomolecule objects, and then from those obtain sets of atoms and bonds.SMILES expresses the structure of a given molecule in the form of an ASCII string.The SMILES string is a compact encoding which, for smaller molecules, is relativelyhuman-readable. Encoding molecules as a string both alleviates and facilitates databaseand/or web searching of a given molecule. RDKit uses algorithms toaccurately transform a given SMILES to a molecule object, which can thenbe used to compute a great number of molecular properties/features.Notice, RDKit is commonly installed via [Conda](https://www.rdkit.org/docs/Install.html).However, thanks to[rdkit_platform_wheels](https://github.com/kuelumbus/rdkit_platform_wheels), rdkitcan now (for the sake of this tutorial) be installed easily via pip, as follows:```pip -q install rdkit-pypi```And to allow easy visualization of a molecule objects, Pillow needs to be installed:```pip -q install Pillow``` Import packages<jupyter_code>from rdkit import Chem, RDLogger from rdkit.Chem.Draw import IPythonConsole, MolsToGridImage import numpy as np import tensorflow as tf from tensorflow import keras RDLogger.DisableLog("rdApp.*")<jupyter_output><empty_output><jupyter_text>DatasetThe dataset used in this tutorial is a[quantum mechanics dataset](http://quantum-machine.org/datasets/) (QM9), obtained from[MoleculeNet](http://moleculenet.ai/datasets-1). Although many feature and label columnscome with the dataset, we'll only focus on the[SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system)column. The QM9 dataset is a good first dataset to work with for generatinggraphs, as the maximum number of heavy (non-hydrogen) atoms found in a molecule is only nine.<jupyter_code>csv_path = tf.keras.utils.get_file( "qm9.csv", "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/qm9.csv" ) data = [] with open(csv_path, "r") as f: for line in f.readlines()[1:]: data.append(line.split(",")[1]) # Let's look at a molecule of the dataset smiles = data[1000] print("SMILES:", smiles) molecule = Chem.MolFromSmiles(smiles) print("Num heavy atoms:", molecule.GetNumHeavyAtoms()) molecule<jupyter_output><empty_output><jupyter_text>Define helper functionsThese helper functions will help convert SMILES to graphs and graphs to molecule objects.**Representing a molecular graph**. Molecules can naturally be expressed as undirectedgraphs `G = (V, E)`, where `V` is a set of vertices (atoms), and `E` a set of edges(bonds). As for this implementation, each graph (molecule) will be represented as anadjacency tensor `A`, which encodes existence/non-existence of atom-pairs with theirone-hot encoded bond types stretching an extra dimension, and a feature tensor `H`, whichfor each atom, one-hot encodes its atom type. Notice, as hydrogen atoms can be inferred byRDKit, hydrogen atoms are excluded from `A` and `H` for easier modeling.<jupyter_code>atom_mapping = { "C": 0, 0: "C", "N": 1, 1: "N", "O": 2, 2: "O", "F": 3, 3: "F", } bond_mapping = { "SINGLE": 0, 0: Chem.BondType.SINGLE, "DOUBLE": 1, 1: Chem.BondType.DOUBLE, "TRIPLE": 2, 2: Chem.BondType.TRIPLE, "AROMATIC": 3, 3: Chem.BondType.AROMATIC, } NUM_ATOMS = 9 # Maximum number of atoms ATOM_DIM = 4 + 1 # Number of atom types BOND_DIM = 4 + 1 # Number of bond types LATENT_DIM = 64 # Size of the latent space def smiles_to_graph(smiles): # Converts SMILES to molecule object molecule = Chem.MolFromSmiles(smiles) # Initialize adjacency and feature tensor adjacency = np.zeros((BOND_DIM, NUM_ATOMS, NUM_ATOMS), "float32") features = np.zeros((NUM_ATOMS, ATOM_DIM), "float32") # loop over each atom in molecule for atom in molecule.GetAtoms(): i = atom.GetIdx() atom_type = atom_mapping[atom.GetSymbol()] features[i] = np.eye(ATOM_DIM)[atom_type] # loop over one-hop neighbors for neighbor in atom.GetNeighbors(): j = neighbor.GetIdx() bond = molecule.GetBondBetweenAtoms(i, j) bond_type_idx = bond_mapping[bond.GetBondType().name] adjacency[bond_type_idx, [i, j], [j, i]] = 1 # Where no bond, add 1 to last channel (indicating "non-bond") # Notice: channels-first adjacency[-1, np.sum(adjacency, axis=0) == 0] = 1 # Where no atom, add 1 to last column (indicating "non-atom") features[np.where(np.sum(features, axis=1) == 0)[0], -1] = 1 return adjacency, features def graph_to_molecule(graph): # Unpack graph adjacency, features = graph # RWMol is a molecule object intended to be edited molecule = Chem.RWMol() # Remove "no atoms" & atoms with no bonds keep_idx = np.where( (np.argmax(features, axis=1) != ATOM_DIM - 1) & (np.sum(adjacency[:-1], axis=(0, 1)) != 0) )[0] features = features[keep_idx] adjacency = adjacency[:, keep_idx, :][:, :, keep_idx] # Add atoms to molecule for atom_type_idx in np.argmax(features, axis=1): atom = Chem.Atom(atom_mapping[atom_type_idx]) _ = molecule.AddAtom(atom) # Add bonds between atoms in molecule; based on the upper triangles # of the [symmetric] adjacency tensor (bonds_ij, atoms_i, atoms_j) = np.where(np.triu(adjacency) == 1) for (bond_ij, atom_i, atom_j) in zip(bonds_ij, atoms_i, atoms_j): if atom_i == atom_j or bond_ij == BOND_DIM - 1: continue bond_type = bond_mapping[bond_ij] molecule.AddBond(int(atom_i), int(atom_j), bond_type) # Sanitize the molecule; for more information on sanitization, see # https://www.rdkit.org/docs/RDKit_Book.html#molecular-sanitization flag = Chem.SanitizeMol(molecule, catchErrors=True) # Let's be strict. If sanitization fails, return None if flag != Chem.SanitizeFlags.SANITIZE_NONE: return None return molecule # Test helper functions graph_to_molecule(smiles_to_graph(smiles))<jupyter_output><empty_output><jupyter_text>Generate training setTo save training time, we'll only use a tenth of the QM9 dataset.<jupyter_code>adjacency_tensor, feature_tensor = [], [] for smiles in data[::10]: adjacency, features = smiles_to_graph(smiles) adjacency_tensor.append(adjacency) feature_tensor.append(features) adjacency_tensor = np.array(adjacency_tensor) feature_tensor = np.array(feature_tensor) print("adjacency_tensor.shape =", adjacency_tensor.shape) print("feature_tensor.shape =", feature_tensor.shape)<jupyter_output><empty_output><jupyter_text>ModelThe idea is to implement a generator network and a discriminator network via WGAN-GP,that will result in a generator network that can generate small novel molecules(small graphs).The generator network needs to be able to map (for each example in the batch) a vector `z`to a 3-D adjacency tensor (`A`) and 2-D feature tensor (`H`). For this, `z` will first bepassed through a fully-connected network, for which the output will be further passedthrough two separate fully-connected networks. Each of these two fully-connectednetworks will then output (for each example in the batch) a tanh-activated vectorfollowed by a reshape and softmax to match that of a multi-dimensional adjacency/featuretensor.As the discriminator network will recieves as input a graph (`A`, `H`) from either thegenerator or from the training set, we'll need to implement graph convolutional layers,which allows us to operate on graphs. This means that input to the discriminator networkwill first pass through graph convolutional layers, then an average-pooling layer,and finally a few fully-connected layers. The final output should be a scalar (for eachexample in the batch) which indicates the "realness" of the associated input(in this case a "fake" or "real" molecule). Graph generator<jupyter_code>def GraphGenerator( dense_units, dropout_rate, latent_dim, adjacency_shape, feature_shape, ): z = keras.layers.Input(shape=(LATENT_DIM,)) # Propagate through one or more densely connected layers x = z for units in dense_units: x = keras.layers.Dense(units, activation="tanh")(x) x = keras.layers.Dropout(dropout_rate)(x) # Map outputs of previous layer (x) to [continuous] adjacency tensors (x_adjacency) x_adjacency = keras.layers.Dense(tf.math.reduce_prod(adjacency_shape))(x) x_adjacency = keras.layers.Reshape(adjacency_shape)(x_adjacency) # Symmetrify tensors in the last two dimensions x_adjacency = (x_adjacency + tf.transpose(x_adjacency, (0, 1, 3, 2))) / 2 x_adjacency = keras.layers.Softmax(axis=1)(x_adjacency) # Map outputs of previous layer (x) to [continuous] feature tensors (x_features) x_features = keras.layers.Dense(tf.math.reduce_prod(feature_shape))(x) x_features = keras.layers.Reshape(feature_shape)(x_features) x_features = keras.layers.Softmax(axis=2)(x_features) return keras.Model(inputs=z, outputs=[x_adjacency, x_features], name="Generator") generator = GraphGenerator( dense_units=[128, 256, 512], dropout_rate=0.2, latent_dim=LATENT_DIM, adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS), feature_shape=(NUM_ATOMS, ATOM_DIM), ) generator.summary()<jupyter_output><empty_output><jupyter_text>Graph discriminator**Graph convolutional layer**. The[relational graph convolutional layers](https://arxiv.org/abs/1703.06103) implements non-linearly transformedneighborhood aggregations. We can define these layers as follows:`H^{l+1} = σ(D^{-1} @ A @ H^{l+1} @ W^{l})`Where `σ` denotes the non-linear transformation (commonly a ReLU activation), `A` theadjacency tensor, `H^{l}` the feature tensor at the `l:th` layer, `D^{-1}` the inversediagonal degree tensor of `A`, and `W^{l}` the trainable weight tensor at the `l:th`layer. Specifically, for each bond type (relation), the degree tensor expresses, in thediagonal, the number of bonds attached to each atom. Notice, in this tutorial `D^{-1}` isomitted, for two reasons: (1) it's not obvious how to apply this normalization on thecontinuous adjacency tensors (generated by the generator), and (2) the performance of theWGAN without normalization seems to work just fine. Furthermore, in contrast to the[original paper](https://arxiv.org/abs/1703.06103), no self-loop is defined, as we don'twant to train the generator to predict "self-bonding".<jupyter_code>class RelationalGraphConvLayer(keras.layers.Layer): def __init__( self, units=128, activation="relu", use_bias=False, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, **kwargs ): super().__init__(**kwargs) self.units = units self.activation = keras.activations.get(activation) self.use_bias = use_bias self.kernel_initializer = keras.initializers.get(kernel_initializer) self.bias_initializer = keras.initializers.get(bias_initializer) self.kernel_regularizer = keras.regularizers.get(kernel_regularizer) self.bias_regularizer = keras.regularizers.get(bias_regularizer) def build(self, input_shape): bond_dim = input_shape[0][1] atom_dim = input_shape[1][2] self.kernel = self.add_weight( shape=(bond_dim, atom_dim, self.units), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, trainable=True, name="W", dtype=tf.float32, ) if self.use_bias: self.bias = self.add_weight( shape=(bond_dim, 1, self.units), initializer=self.bias_initializer, regularizer=self.bias_regularizer, trainable=True, name="b", dtype=tf.float32, ) self.built = True def call(self, inputs, training=False): adjacency, features = inputs # Aggregate information from neighbors x = tf.matmul(adjacency, features[:, None, :, :]) # Apply linear transformation x = tf.matmul(x, self.kernel) if self.use_bias: x += self.bias # Reduce bond types dim x_reduced = tf.reduce_sum(x, axis=1) # Apply non-linear transformation return self.activation(x_reduced) def GraphDiscriminator( gconv_units, dense_units, dropout_rate, adjacency_shape, feature_shape ): adjacency = keras.layers.Input(shape=adjacency_shape) features = keras.layers.Input(shape=feature_shape) # Propagate through one or more graph convolutional layers features_transformed = features for units in gconv_units: features_transformed = RelationalGraphConvLayer(units)( [adjacency, features_transformed] ) # Reduce 2-D representation of molecule to 1-D x = keras.layers.GlobalAveragePooling1D()(features_transformed) # Propagate through one or more densely connected layers for units in dense_units: x = keras.layers.Dense(units, activation="relu")(x) x = keras.layers.Dropout(dropout_rate)(x) # For each molecule, output a single scalar value expressing the # "realness" of the inputted molecule x_out = keras.layers.Dense(1, dtype="float32")(x) return keras.Model(inputs=[adjacency, features], outputs=x_out) discriminator = GraphDiscriminator( gconv_units=[128, 128, 128, 128], dense_units=[512, 512], dropout_rate=0.2, adjacency_shape=(BOND_DIM, NUM_ATOMS, NUM_ATOMS), feature_shape=(NUM_ATOMS, ATOM_DIM), ) discriminator.summary()<jupyter_output><empty_output><jupyter_text>WGAN-GP<jupyter_code>class GraphWGAN(keras.Model): def __init__( self, generator, discriminator, discriminator_steps=1, generator_steps=1, gp_weight=10, **kwargs ): super().__init__(**kwargs) self.generator = generator self.discriminator = discriminator self.discriminator_steps = discriminator_steps self.generator_steps = generator_steps self.gp_weight = gp_weight self.latent_dim = self.generator.input_shape[-1] def compile(self, optimizer_generator, optimizer_discriminator, **kwargs): super().compile(**kwargs) self.optimizer_generator = optimizer_generator self.optimizer_discriminator = optimizer_discriminator self.metric_generator = keras.metrics.Mean(name="loss_gen") self.metric_discriminator = keras.metrics.Mean(name="loss_dis") def train_step(self, inputs): if isinstance(inputs[0], tuple): inputs = inputs[0] graph_real = inputs self.batch_size = tf.shape(inputs[0])[0] # Train the discriminator for one or more steps for _ in range(self.discriminator_steps): z = tf.random.normal((self.batch_size, self.latent_dim)) with tf.GradientTape() as tape: graph_generated = self.generator(z, training=True) loss = self._loss_discriminator(graph_real, graph_generated) grads = tape.gradient(loss, self.discriminator.trainable_weights) self.optimizer_discriminator.apply_gradients( zip(grads, self.discriminator.trainable_weights) ) self.metric_discriminator.update_state(loss) # Train the generator for one or more steps for _ in range(self.generator_steps): z = tf.random.normal((self.batch_size, self.latent_dim)) with tf.GradientTape() as tape: graph_generated = self.generator(z, training=True) loss = self._loss_generator(graph_generated) grads = tape.gradient(loss, self.generator.trainable_weights) self.optimizer_generator.apply_gradients( zip(grads, self.generator.trainable_weights) ) self.metric_generator.update_state(loss) return {m.name: m.result() for m in self.metrics} def _loss_discriminator(self, graph_real, graph_generated): logits_real = self.discriminator(graph_real, training=True) logits_generated = self.discriminator(graph_generated, training=True) loss = tf.reduce_mean(logits_generated) - tf.reduce_mean(logits_real) loss_gp = self._gradient_penalty(graph_real, graph_generated) return loss + loss_gp * self.gp_weight def _loss_generator(self, graph_generated): logits_generated = self.discriminator(graph_generated, training=True) return -tf.reduce_mean(logits_generated) def _gradient_penalty(self, graph_real, graph_generated): # Unpack graphs adjacency_real, features_real = graph_real adjacency_generated, features_generated = graph_generated # Generate interpolated graphs (adjacency_interp and features_interp) alpha = tf.random.uniform([self.batch_size]) alpha = tf.reshape(alpha, (self.batch_size, 1, 1, 1)) adjacency_interp = (adjacency_real * alpha) + (1 - alpha) * adjacency_generated alpha = tf.reshape(alpha, (self.batch_size, 1, 1)) features_interp = (features_real * alpha) + (1 - alpha) * features_generated # Compute the logits of interpolated graphs with tf.GradientTape() as tape: tape.watch(adjacency_interp) tape.watch(features_interp) logits = self.discriminator( [adjacency_interp, features_interp], training=True ) # Compute the gradients with respect to the interpolated graphs grads = tape.gradient(logits, [adjacency_interp, features_interp]) # Compute the gradient penalty grads_adjacency_penalty = (1 - tf.norm(grads[0], axis=1)) ** 2 grads_features_penalty = (1 - tf.norm(grads[1], axis=2)) ** 2 return tf.reduce_mean( tf.reduce_mean(grads_adjacency_penalty, axis=(-2, -1)) + tf.reduce_mean(grads_features_penalty, axis=(-1)) )<jupyter_output><empty_output><jupyter_text>Train the modelTo save time (if run on a CPU), we'll only train the model for 10 epochs.<jupyter_code>wgan = GraphWGAN(generator, discriminator, discriminator_steps=1) wgan.compile( optimizer_generator=keras.optimizers.Adam(5e-4), optimizer_discriminator=keras.optimizers.Adam(5e-4), ) wgan.fit([adjacency_tensor, feature_tensor], epochs=10, batch_size=16)<jupyter_output><empty_output><jupyter_text>Sample novel molecules with the generator<jupyter_code>def sample(generator, batch_size): z = tf.random.normal((batch_size, LATENT_DIM)) graph = generator.predict(z) # obtain one-hot encoded adjacency tensor adjacency = tf.argmax(graph[0], axis=1) adjacency = tf.one_hot(adjacency, depth=BOND_DIM, axis=1) # Remove potential self-loops from adjacency adjacency = tf.linalg.set_diag(adjacency, tf.zeros(tf.shape(adjacency)[:-1])) # obtain one-hot encoded feature tensor features = tf.argmax(graph[1], axis=2) features = tf.one_hot(features, depth=ATOM_DIM, axis=2) return [ graph_to_molecule([adjacency[i].numpy(), features[i].numpy()]) for i in range(batch_size) ] molecules = sample(wgan.generator, batch_size=48) MolsToGridImage( [m for m in molecules if m is not None][:25], molsPerRow=5, subImgSize=(150, 150) )<jupyter_output><empty_output>
keras-io/examples/generative/ipynb/wgan-graphs.ipynb/0
{ "file_path": "keras-io/examples/generative/ipynb/wgan-graphs.ipynb", "repo_id": "keras-io", "token_count": 8264 }
102
# Character-level text generation with LSTM **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2015/06/15<br> **Last modified:** 2020/04/30<br> **Description:** Generate text from Nietzsche's writings with a character-level LSTM. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/lstm_character_level_text_generation.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/lstm_character_level_text_generation.py) --- ## Introduction This example demonstrates how to use a LSTM model to generate text character-by-character. At least 20 epochs are required before the generated text starts sounding locally coherent. It is recommended to run this script on GPU, as recurrent networks are quite computationally intensive. If you try this script on new data, make sure your corpus has at least ~100k characters. ~1M is better. --- ## Setup ```python import keras from keras import layers import numpy as np import random import io ``` --- ## Prepare the data ```python path = keras.utils.get_file( "nietzsche.txt", origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt", ) with io.open(path, encoding="utf-8") as f: text = f.read().lower() text = text.replace("\n", " ") # We remove newlines chars for nicer display print("Corpus length:", len(text)) chars = sorted(list(set(text))) print("Total chars:", len(chars)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) # cut the text in semi-redundant sequences of maxlen characters maxlen = 40 step = 3 sentences = [] next_chars = [] for i in range(0, len(text) - maxlen, step): sentences.append(text[i : i + maxlen]) next_chars.append(text[i + maxlen]) print("Number of sequences:", len(sentences)) x = np.zeros((len(sentences), maxlen, len(chars)), dtype="bool") y = np.zeros((len(sentences), len(chars)), dtype="bool") for i, sentence in enumerate(sentences): for t, char in enumerate(sentence): x[i, t, char_indices[char]] = 1 y[i, char_indices[next_chars[i]]] = 1 ``` <div class="k-default-codeblock"> ``` Corpus length: 600893 Total chars: 56 Number of sequences: 200285 ``` </div> --- ## Build the model: a single LSTM layer ```python model = keras.Sequential( [ keras.Input(shape=(maxlen, len(chars))), layers.LSTM(128), layers.Dense(len(chars), activation="softmax"), ] ) optimizer = keras.optimizers.RMSprop(learning_rate=0.01) model.compile(loss="categorical_crossentropy", optimizer=optimizer) ``` --- ## Prepare the text sampling function ```python def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype("float64") preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) ``` --- ## Train the model ```python epochs = 40 batch_size = 128 for epoch in range(epochs): model.fit(x, y, batch_size=batch_size, epochs=1) print() print("Generating text after epoch: %d" % epoch) start_index = random.randint(0, len(text) - maxlen - 1) for diversity in [0.2, 0.5, 1.0, 1.2]: print("...Diversity:", diversity) generated = "" sentence = text[start_index : start_index + maxlen] print('...Generating with seed: "' + sentence + '"') for i in range(400): x_pred = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(sentence): x_pred[0, t, char_indices[char]] = 1.0 preds = model.predict(x_pred, verbose=0)[0] next_index = sample(preds, diversity) next_char = indices_char[next_index] sentence = sentence[1:] + next_char generated += next_char print("...Generated: ", generated) print("-") ``` <div class="k-default-codeblock"> ``` 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 13s 6ms/step - loss: 2.2850 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 0 ...Diversity: 0.2 ...Generating with seed: " fixing, disposing, and shaping, reaches" ...Generated: the strought and the preatice the the the preserses of the truth of the will the the will the crustic present and the will the such a struent and the the cause the the conselution of the such a stronged the strenting the the the comman the conselution of the such a preserst the to the presersed the crustic presents and a made the such a prearity the the presertance the such the deprestion the wil - ...Diversity: 0.5 ...Generating with seed: " fixing, disposing, and shaping, reaches" ...Generated: and which this decrestic him precession the consentined the a the heartiom the densice take can the eart of the comman of the freedingce the saculy the of the prestice the sperial its the artion of the in the true the beliefter of have the in by the supprestially the strenter the freeding the can the cour the nature with the art of the is the conselvest and who of the everything the his sour of t - ...Diversity: 1.0 ...Generating with seed: " fixing, disposing, and shaping, reaches" ...Generated: es must dassing should as the upofing of eamanceicing conductnest ald of wonly lead and ub[ an it wellarvess of masters heave that them and everyther contle oneschednioss blens astiunts firmlus in that glean ar to conlice that is bowadjs by remain impoully hustingques it 2 otherewit fulureatity, self-stinctionce precerenccencenays may 'f neyr tike the would pertic soleititss too- mainfderna- - ...Diversity: 1.2 ...Generating with seed: " fixing, disposing, and shaping, reaches" ...Generated: --the st coutity, what cout madvard; - his nauwe, theeals, antause timely chut"s, their cogklusts, meesing aspreesslyph: in woll the fachicmst, a nature otherfanience that wno=--in weakithmel masully conscance, he in the rem;rhti! there the wart woulditainally riseed to the knew but the menapatepate aisthings so toamand,y of has pructure in mawe,, grang tye cruratiom of the cortruguale, chirope ge - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 7s 4ms/step - loss: 1.6243 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 1 ...Diversity: 0.2 ...Generating with seed: "ies to which i belong?--but not to mysel" ...Generated: f its and and and another and in the experiences which all the conscience of the such a conscience and a thing of the sciented that the simply of the preservers that the superhations of the scientions and account of the the seems to the moral conscience of the scientions of the species of the scientions and an entime of the which all the a such a soulter and in the self-result and all the speciall - ...Diversity: 0.5 ...Generating with seed: "ies to which i belong?--but not to mysel" ...Generated: f for a man something of his man which is another and has be the the man be such another honest and which all that it is other in which all the himself of the would this concertaly in the thus decredicises of the a conscience of the consciences and man and dissenses of the highest and belief of the a thing a the will the conscience. the decerated the concertation of his very one many religio - ...Diversity: 1.0 ...Generating with seed: "ies to which i belong?--but not to mysel" ...Generated: ly hoppealit, or imptaicters to wan trardeness an oppoited fance, as the man" step-bsy-oneself form of his religion that the own an accosts the want that he the "consequent accidence justaverage bands one," which a such for this is roble, resitu in which as does not none, and highly in the "thy not be contramjy of a valsed about foreges. whicerera rapays. he which look be appearing to new imagness - ...Diversity: 1.2 ...Generating with seed: "ies to which i belong?--but not to mysel" ...Generated: f, jetyessphers; in the pposition whi; plajoy one civane. for a hert--saens. always that alsoedness resuritionly) stimcting? :wil "sympons are doistity: mull. we whahe: it the lad not oldming, even auniboan eke for equasly a clunged twreaks unfunghatd of themover ebse, for hi, only been about in stackady their other, that it miste all that mesies of x cin i mudy be wenew. "_wann lines; sick-dy, l - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.4987 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 2 ...Diversity: 0.2 ...Generating with seed: "s and mysteries peculiar to the fresh, s" ...Generated: o the soul of the soul of the sense of the sense of the sense of the sense of the commance of the sense of the sense of the soul of the soul of the sense of the soul of the sense of the soul of the soul of the soul of the soul of the possessed and also in order to all the problem of the soul of the extent is a the sense of the soul of the sense of the sense of the soul of the sense of the sense of - ...Diversity: 0.5 ...Generating with seed: "s and mysteries peculiar to the fresh, s" ...Generated: ee we extent and most of commance of the sense of the most fact of the extents by the exrentined and community of all the explet and its forthour a honted at life of each of the sees of the consequences of commance the most in such some same world and religions in the self-community more of the worther longer to the exte the delight the sense that certainly and complet such an inself the the comma - ...Diversity: 1.0 ...Generating with seed: "s and mysteries peculiar to the fresh, s" ...Generated: uthe is different is worther and same. metaphysical commence. 14 =morathe of its tixuned gox ccumptances, and actions prajed. deen at all nesposart of slight to lack_" is the our philosopher most whanethis which onted ackatoest love reverfuques does alsolars, and the suprer and own purple" for the hant exists it us at excepted, bad sepencates"--ogeroment edremets. 5lid aud the bise love; it - ...Diversity: 1.2 ...Generating with seed: "s and mysteries peculiar to the fresh, s" ...Generated: pe'sequati"nnd unferdice ards ark hertainsly as" enoughe laws and so uprosile of cullited herrely posyed who patule to make sel no take head berowan letedn eistracted pils always whated knowledge--wandsrious of may. by which. whowed crite inneeth hotere, amalts in nature, for the whate de he h4s nkeep often are to dimagical fact the qulitianttrep. yous "be leer natimious, _on that anything mereleg - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.4367 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 3 ...Diversity: 0.2 ...Generating with seed: "nd sinfulness (as, for instance, is stil" ...Generated: l man of the sense of the sense of the sense and substanter to the compresent for the substant the sense of the moral the sense of the sense of the sense of the sense of the sense of the sense of the sense of the sense of the sense of the sense of the sense and as the sense of the sense of the sense of the sense of the sense of the senses to the sense of the sense of the morality and the sensation - ...Diversity: 0.5 ...Generating with seed: "nd sinfulness (as, for instance, is stil" ...Generated: l has standing them that a some only to be man the origin of think of the souls and and we are man as a standard at the soul in a morality, and hoodent were the sense of the sight and spectards satisfeces and almost as i among the especial the great spirits of this desirate of the perhaps to a more the whole say the imposition of a stand to whom we are in the great recover to deed the things of th - ...Diversity: 1.0 ...Generating with seed: "nd sinfulness (as, for instance, is stil" ...Generated: l loods in evenymeness--nor heneringence to have conditionance to turness behold great, us wornt ableme--it is accorditation (amble is music, which moral even which greates and him, themence it may which we greats to his comphewly value a presentlysess orled baching only every oarseloursed. its composp in at the to-didless cannot levers of the morals to . musicable applack sympathy to life of thei - ...Diversity: 1.2 ...Generating with seed: "nd sinfulness (as, for instance, is stil" ...Generated: l-perressions; to oricate sned men of vaice idear, "flows invaulery to anmied flather, mankind_ as his ecivable to their clusianer on littid combletection sublian? comelaciesm's instincts. few mever yy!" and rurgived hiadores to promese amen affellfused; sesble ?for truth, and course and into life.n quite exprement of rulaces, which recognce to ordctationa! oralness,--must be lot an let ardel worn - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.3964 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 4 ...Diversity: 0.2 ...Generating with seed: "ere, warlike, wisely silent, reserved, a" ...Generated: nd all the sense of the sense of the sense of the sense of the world to the consequences in the most in the sense of the spectious of the science of the sense of the superficial to the prosis, and without the sense of present to the sense of the present to the prosis of the specially them is the sense of the most all the consequences of the sense of the sense of the intellectual them is the good i - ...Diversity: 0.5 ...Generating with seed: "ere, warlike, wisely silent, reserved, a" ...Generated: nd above in all to be religions of the preachance of the world as the interthon them as it conduct as to the relation, to all the hally, who is to character of a them and in the most breat in the sense of the obvious every something being them and as in the greatest to may always soul in the false will superficial for the marture there is in the problem of seemates and power also the believer and - ...Diversity: 1.0 ...Generating with seed: "ere, warlike, wisely silent, reserved, a" ...Generated: id, trativally based to peoplested and music lives in forget for the case him, ever much, in reliantic all this often abyrudical loules one or enegst and doubt in the perslation and youn of procoction (and ulconceal that he quysion and sflead matterion for interlogied, of its himself ore a inedi to faithto. yew can approsses were by the own. stot all in faveratility, pervery grated ililess, under - ...Diversity: 1.2 ...Generating with seed: "ere, warlike, wisely silent, reserved, a" ...Generated: will to science visifuet a fiones their leit. there known amoutrous outer in ra: there is ines, baint simply that it to thun been they be futary is breaks: thinn willing applaorate alsovelory, for reed--is rappetions cannotion degrees lage to abo come far yautitual e;ylageos constramionation in religionqme--it is as all forth, a "morally rences that is to smutits man.=--popaity him condition: a f - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.3667 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 5 ...Diversity: 0.2 ...Generating with seed: "o know this! the clumsiness of the germa" ...Generated: n and and and all the strange the consequently and the sense of the strange the spirit of the experience of the prospicial to a strange the spirit of the spirit of the soul of the life and and as a soul of the intention of the an an and and the intention of the conscience of the strange the spirituality and all the strange that is the priesting and and for the strange the spectarility and and and - ...Diversity: 0.5 ...Generating with seed: "o know this! the clumsiness of the germa" ...Generated: n are the tritude in the most still and as in the world and impulse and as the sense of the free one as a madain and about the possible to all the life and the right had not in the best proud and and in the strange the still in a manificting to the intentive of morals and as it is a sense of causity and book and person is an ancient, and and caved to a malicy of which we still to his religion of t - ...Diversity: 1.0 ...Generating with seed: "o know this! the clumsiness of the germa" ...Generated: n in the impirial is give increasons individe perconsimation not who noborted withichorth," in ougration, so a love of consequent and erioar friends thanedo syfulu early, we may be that, of "late, and extragriations and possesting-philour tone on let a fact of nature of nespited mendoms,, sudmeced by soughful, now fold, conditioned muniance of the ut conscioused the merit, in which say so one to - ...Diversity: 1.2 ...Generating with seed: "o know this! the clumsiness of the germa" ...Generated: na; obalityty and hord to resention nor cools indeed-shapp?y--for a onjouf, ?he pain", with regarding of woman to these- for they greitskantirishiansmi. fie's tair inilas to of the oboride nangumey age of mame ", be pettest even it this is mestain have nobort unlog[ming, and the dogawicarily ints ceased, ho, -elaplany i exacces, the whon is alwow them, calls. et ! er handy, whi flials, is his - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.3525 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 6 ...Diversity: 0.2 ...Generating with seed: " that it is well to be so. every profoun" ...Generated: d, the preservation of the same still to the extent the standard to the considerable to the strange the present and interpretation of the considerable the spiritudes the standard to the state of the profounce to the considerable the presented to the strange the great sense and does not delight and perhaps all the superiod and definite called the spectary and interpretation of the spirit and does n - ...Diversity: 0.5 ...Generating with seed: " that it is well to be so. every profoun" ...Generated: d and death, in the state as the seporate and even in the case to the place of all power of the contempting is hers superioding the strange the habing and such as a prise in all the means in the considerance of the strange and most present, in the pleasure of the intear that the standard of the like the with the soul, when they still the pliculity and even the belief and conscience of the belief i - ...Diversity: 1.0 ...Generating with seed: " that it is well to be so. every profoun" ...Generated: d, and i seeks to "destitity that want for the ovolughteszon almost a present, and act of perhaps man in the virtues the sume doun ideas, act always the inaricary ribal cartosity even to the will men would canter finally, appearance, the highest nonier as his asople-century even here. thitie a created nature. 16 , one from the still defect and palious--or tkan the solitation everith su - ...Diversity: 1.2 ...Generating with seed: " that it is well to be so. every profoun" ...Generated: cews; that instince as, eyxiwatolation or to discess out of mask versimsudver as the grantor forturations these--having areing temborzed agdosh in huron adcinturing, a is are crisivalis clore-world now a spechance. the stall at liss whole! the chors, upon what , the tworming immorality of contualion the the hither. the cef truitk taox? this out pninced that crancivire, "c,onssisfulity.--a st - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.3342 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 7 ...Diversity: 0.2 ...Generating with seed: "he psychologist have his ears open throu" ...Generated: gh the species of the sense of the senses and the thing of the subtlety and the soul the species to the soul of the species the subtlety and the state of the sense of the subtlety and the species the senses and and the religious and in the subtlety the sense of the state of the state of the state of the subtlety and the senses the subtlety that the rest that the proposition of the senses and the s - ...Diversity: 0.5 ...Generating with seed: "he psychologist have his ears open throu" ...Generated: gh a substain themselves to be the contradiction of the sense, and in a treatures and into the unimation of wind the thing that the subtles and for their highest and also the more of the soul the position of this world of the last belongs to the greatest in the interpreted to something of power can not be understand as a riling the same soul of the extendent and the offered to every subtle that in - ...Diversity: 1.0 ...Generating with seed: "he psychologist have his ears open throu" ...Generated: gh spiocramentss to semulate wise in a guite thas wish to the ta; that that which best, permotres and like hopons the religion for a rende ndar-in any begart is lot for that the might principle such ougureally wherever should from otherwaed, as ergrimage it feeling every best the dreamly gut that the fartly artists tow science cound the one's extenting been conspicious and directness, not his very - ...Diversity: 1.2 ...Generating with seed: "he psychologist have his ears open throu" ...Generated: gh its with other i does not confemind and blobable take a them, learand prevail thiss!" in the fagce pleased subtlege. see" higher value? thin is about butn upon to pescling ilitement will knows as called secres like pvath of the fighis: we do noware dild. superverce is rawhtes and reverenc by something gruth is that there is fundation he often wherever men and of the, once as it nature. 125 butu - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 7s 4ms/step - loss: 1.3245 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 8 ...Diversity: 0.2 ...Generating with seed: "ich religion satisfies and which science" ...Generated: , the strange that the respect and interpretents and the strange the strange the strange the commonal sense of the world of the most delicate the strange the spectarion the strange the soul of the strange the strange the strange the strange the struct of the moral the moral the saints of the strange the strange the strange the more of the most possibility of the strange the strange the fact the st - ...Diversity: 0.5 ...Generating with seed: "ich religion satisfies and which science" ...Generated: of the self-dome in the struct of the unregarded and of the religion of the original or and interpretents to the standarding and still, and in the habits to pleasure of the saints of the strange the good and hold of the most truth of a still the way of the present the conception of the great compulsion of the state of the soul of a hatred with the religion of a conception and man still of the mor - ...Diversity: 1.0 ...Generating with seed: "ich religion satisfies and which science" ...Generated: of the heists want for phenmenamenessfally a cautf oflicality-ofies above. into the possibilities to our behine. this of opposite. in all epelem(y; which has a presumbtive sensual, shut, gation experient and floney--as respect at least one altoor doubt, the religios of renwers: grateful could more imply that it is god in a stranges. the uneerline conventent a man must love upiear: who sael a the - ...Diversity: 1.2 ...Generating with seed: "ich religion satisfies and which science" ...Generated: d fabll to kild with moraliqying--that your appride-sideal. into rather one the ofte ple. the syst sudmou thinkabl s'straths vette," thing as it is unchill offiest clean hourt in the reacheral his. hers:--they varned the plaists, myrees in order, to dick?all by nature. to his holdien, pwrised--the aspearality at is judger; is calles--faith as veakiness, to folly bet playingly the conceish. by grea - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.3093 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 9 ...Diversity: 0.2 ...Generating with seed: "lly threatening--namely, to acquire one " ...Generated: as the subtile and as the man and and also a suffering and the whole the matter of the same strong as the moral sacretation of the moral fact that the subtile and soul of the strong in the moral sense and soul of the more only and as the moral fact that the strong the present that is the present and an action of the moral sacreage and the contrary of the same as the man and all the extent that is - ...Diversity: 0.5 ...Generating with seed: "lly threatening--namely, to acquire one " ...Generated: same precesses and all unconditional common about moralists of the more that it be desire all the aofering of every morality in all the strong the only and that the nemitating origin, and what all the are more the way of the most entire and danger and historical that the same every sensation, as the new or only and something of his even should for a man of the are the man of the compartician and n - ...Diversity: 1.0 ...Generating with seed: "lly threatening--namely, to acquire one " ...Generated: for a life usequent young aman and must be stile, that they whyst masty, a species properhas life, perhaps need dangered to praise power must learns and dange or opinion and a tronge. one grease"" for whoever temperabilf oprible, indied these only will revalules no ennmus morality to inked a gesting this spoals charactercon upon establistous scientific alarrimable way for ours own all the signific - ...Diversity: 1.2 ...Generating with seed: "lly threatening--namely, to acquire one " ...Generated: is a "almost sun mere is charmped beakally,". but even utsiph. now delucious differentagely beaces himself, the fremists to you are emotably stoth this morbius. craristorous andju," or the motive until relare; of very that what is to prays--but this it or fathous submild', of trainfulive influence, he a fact -mist facult to allow mothm i was as threled, urwhy seew atcensibk for asthis flessic and - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2984 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 10 ...Diversity: 0.2 ...Generating with seed: "ong approximate equals in power, as thuc" ...Generated: h as the problems of the such a strength of the saints of the sense of the problem of the sublime of the possible of the possible of the contradiction of the ordinary for the superior of the power of the preservation of the problems of the problems of the contradiction of the same possible of the sense of the preservation of the problems of the problems of the consist of the sublime on the transle - ...Diversity: 0.5 ...Generating with seed: "ong approximate equals in power, as thuc" ...Generated: h gets the most decails it is a sublemest of the moral in the sense the doginar to the same notion of the saint and preserve the searous the powers which is a man with whole is a stand to the person of the latter here moralish which the word which is the doge a so the manserness of the possible of the a phelogic as a say the flush have the thing of the interpretence of the philosophy of the interp - ...Diversity: 1.0 ...Generating with seed: "ong approximate equals in power, as thuc" ...Generated: h a personal oexs, embabliew. glact, a contently day, it propoded, should "part as learning of the equally hard, there are not without its preseragion "to first more suneles of life. one edeticiate been concerned. euroves, a master which have artifuse--here awly "princedy, it lust hithertopatihingly countedly dradencated pusshing caning of a stand. they have not a struct of perceive willayly surna - ...Diversity: 1.2 ...Generating with seed: "ong approximate equals in power, as thuc" ...Generated: h does noiked the serious false plahe entiment raissipres magbariaticy. ave desersive. it between everyprikn onequity. for with friend, it betished dreamful civilizations. their wrect upon etjoriiu;.-crifciplateques. hil erto ingoes beers delight. from which, in man spitating a ; therhoneity.y that it cise1.whish his exthe mas, will that obedien without ity briak of our age have to cambek of co - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2849 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 11 ...Diversity: 0.2 ...Generating with seed: " night-owls of work even in full day, ye" ...Generated: been and a stand with the same time in the contradictate the condition of man in the world the most possibly the most pleasure to the whole conscience of the standard to a prove the most and sense and really the most prevail in the former to superficial to the world in the contradicts the condition of the condition of the sense of the sour of the same distress to the most interpretation of the po - ...Diversity: 0.5 ...Generating with seed: " night-owls of work even in full day, ye" ...Generated: have not had and the rest of a make nothing to an experience to this mistake and every one may for the whole and expression of which is a "stire"--and in the habits to an are and in the more possible and some turus own are not as the sain one would for the decession of the strength of the sour entire delight and condition and condition and his distrust, in the most compare development of a to ass - ...Diversity: 1.0 ...Generating with seed: " night-owls of work even in full day, ye" ...Generated: ast--doveration are companding good eyes to its dolingest europe called motive eyes--his nearly--we orless that suspision rare and could fruedy, not about madavem no more account they more owing)y illigory new man of this humca-leng make fear yet it is it -h. hard my ordinary), he whatever is yet "two habits, and the master in his reguivonism with, would sa, like the men would alwaind; there - ...Diversity: 1.2 ...Generating with seed: " night-owls of work even in full day, ye" ...Generated: owned i-crrjeng and dones, syntaghtom, man. it tho german just among sehiable, know"of ofterness and alfadged, and false with-musical profound losters wherewer', a hist the charage in law to mought to protgerative of "lovuded" to prises by a beneverseening his gards witkes that attach harmane in a senses fathonick platny right kind and merit secreey-- true, that plunvine--with the virtie erro- - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2824 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 12 ...Diversity: 0.2 ...Generating with seed: "the will of a multiplicity for a simplic" ...Generated: id to be the conception of the fact that it is a profound the same distrust of the consist of the most and all the most interpret to be the most interpretain that the stricted to in the senses and religious and and as a religious and distrust of the problem of the senses the most and the present the same the most and the condition of the concerning the stricted to the consequent of the problem of - ...Diversity: 0.5 ...Generating with seed: "the will of a multiplicity for a simplic" ...Generated: itation of the interpretain in the intention of any this in the valuation of the highter, the conditional soul itself and the transistice of their music of distraite, one corces and endisposition, and it that all the homere he has all relaph in the outwory the discovered to be bead, as in this deceive that the opporting of the action that the problem of such as the extent to power, that it is the - ...Diversity: 1.0 ...Generating with seed: "the will of a multiplicity for a simplic" ...Generated: ided promeny, whoneye anoxier of morality, is called brings he mochonted and incimmusts. among metaphess for wisks itself(an man, the life. explained, theredec indispoler prose might a virtegane, the barbar cases ?therability--as foolest young! if he likes of flesinges instigitical? is the nead, simplained, who have discoveration.--we soondfol, small spectar. that sacrificed--is quite in consequen - ...Diversity: 1.2 ...Generating with seed: "the will of a multiplicity for a simplic" ...Generated: ids and truthowingering from the world, to call: that his ?haus? to resu, drem only relateming. such europe in essyes doo, but eyesckedfreed many com"tiked: from was relapinl wish this immicicul inmoecogdes, when flomoch only what is usy avendpdmed, bollors, andwy, in great, out of the menjiptch" is to llise appearantation--it -things out of customumeces. it obldoube, and the after wisely leasing - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2774 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 13 ...Diversity: 0.2 ...Generating with seed: "stomary to depreciate these little token" ...Generated: s of the most for the sense of the sense of the sense of the sense of the sense of the soul of the self-desires of the sense of the sense of the sensation of the sense of the sensation of the sense of the sense of the stricuble and desires in the sensation of the sensual concerned the sensation of the sensation of the sense of the sensual problem of the sensual for the sensation of the sense of th - ...Diversity: 0.5 ...Generating with seed: "stomary to depreciate these little token" ...Generated: man, which sense, a man in the sense of the future of the possible of the basply to the will, on the most explain of purpose of liver as hitherto strict, the light of the distance that the strength of the superstitie of the present the suprement dream deception of the present in the superstitious of dangerous speciments of think that a strength, the specitities (for instance, that it is a soul of - ...Diversity: 1.0 ...Generating with seed: "stomary to depreciate these little token" ...Generated: ce perjosation, borneen him refined the subject of gurstumet only as only for; an any indreatingly and blools not only man of self-formefully sillant for fear yew dathers, immorek--he wishou to course the people, a manier may be manifest toing to know pest nhe, wish tf the helping only; the stake us a tain cursess. the cal how to the whole , perness, being the most other case, which is beathous an - ...Diversity: 1.2 ...Generating with seed: "stomary to depreciate these little token" ...Generated: d oppossowance which the dendot e als hew cannechoring cishes and communeva net prekittoors, wieln fasped. upon the comprehens-reass! it has, "sub jude"he-whole as insidges, lagens and other historical but it inferentally whese wages, must has has injuribity septles, with his isists, and a; for.iergateny, which beark is things--but every5ne their class of re"tri.""--who hat?--pentlathering is the - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2700 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 14 ...Diversity: 0.2 ...Generating with seed: "ealing, stiffer aye in head and kne" ...Generated: w the consequence of the moral wholl the strong the subjection of the problem of the sense of the sense of the sublime constitution of the subtles and more of the fact that it is a species of the contempt of the soul of the consequence of the truths of the problem of the superior of the subjection of the subjection of the higher that the sense of the sense of the subjection of the seem of the mora - ...Diversity: 0.5 ...Generating with seed: "ealing, stiffer aye in head and kne" ...Generated: w his consequence and still a seer are the man of the fact that a sense in the consequence of the most passe of the may have to have not an actions. there are so the ascent had been so finding and so the moral spirit becomes lookings of the victions of the feeling of the probably man is the deligious saint that is always recognized may in a world, there is not easily have their "wished to understa - ...Diversity: 1.0 ...Generating with seed: "ealing, stiffer aye in head and kne" ...Generated: cierness of retine. and cancsents lost the worgh for this wound in personal the name and imptined untking there were disturculed may sfollowity--but been sublean and former and cay things it play fundament the evilce dange of it of maturiturance and say; the fils, at the charg. it was it fortureing more fundaments. a pleasure would see whowell disestility of adaptic5 which more, truth an an charar - ...Diversity: 1.2 ...Generating with seed: "ealing, stiffer aye in head and kne" ...Generated: tard to fiees, nature mogeth of this fion, unles can nanteburul grown, discernints into ideal verces men in this pribolor, in nachus--which harm: we would mell redicaäsing "at thygeer pointure very expxagn, which stands, comes i to too iddeed, of impuljeful: to tough percedtem-! not the trimoting teacher will underetoduce--nor justice, beaged, these hund..nech:with my justice, and lovering, and no - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2690 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 15 ...Diversity: 0.2 ...Generating with seed: " l'homme normal, que l'homme est le plus" ...Generated: the probably the master of the fact that a superstition of the superstition of the probably the considerate of the master of the stronger that is a philosophers as a present the considerate and a present that a strict that the sense of the possibility and a philosophers of the strictes and as the stronger that a man also the present that is a serve a sense of the state of the probably the state o - ...Diversity: 0.5 ...Generating with seed: " l'homme normal, que l'homme est le plus" ...Generated: so distinctions of such the probably the as a state of easily to the lobe and however, as the one everything and disposite the palpordical single and instraint and sufficient, and say and show there is a speaks and suffers to the strange of the things for the contempths that the master of the life in the same sought to which it is the say and above such as a serious that the special is a supers o - ...Diversity: 1.0 ...Generating with seed: " l'homme normal, que l'homme est le plus" ...Generated: that is a rehesses pleasure--and of german wished the human, and its sugher. inclimin trahtforing him the rudrination he gains it: he will dangerorness, when a motion when for crre-mann, as was human; afticre, which rathes . erto there is alsoker to affaid talked. that man gryon young first means of the maistaring that may just from merely feel: be purisable dabled to echon of estance - ...Diversity: 1.2 ...Generating with seed: " l'homme normal, que l'homme est le plus" ...Generated: trimatism to what is chancerence, sitcented.--what not are sociations afone, women hid. the spign cruish, liken toingelen whild he these c. there hhulf master, do love is mucking indilf-merk suffers of old as if here in faptor, it condiving, it was seposed to thought the possifically rea-usuar,. every, cevented, did".--in this latter purhos, do not seng dracted doftyon) is. but anxignes: men - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2607 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 16 ...Diversity: 0.2 ...Generating with seed: "n. in all pessimistic religions the act" ...Generated: ion of the senting and all the senting and believes and soul of the religious sentiments of the sentines and such a senting and soul of the same with so that it without the special and soul of the sentiments of the sublime and suffering and all the world and such a senting and such a consciousness of the most senting is always for the same thing of the self-consciousness, and all the special into - ...Diversity: 0.5 ...Generating with seed: "n. in all pessimistic religions the act" ...Generated: ion of the world and and almost through the world and action and same is consciousness and desire the motives there are have for the repultion of as the morality is the more one all its work of the self-consciousness, must be the will for ascendence and morality such respect sentiment and personalist that profoundly heart with the fact that it is always never the world and satter the morality in t - ...Diversity: 1.0 ...Generating with seed: "n. in all pessimistic religions the act" ...Generated: ion as weakness itself out of all means, touch occasion: what, phery which. in the smuch a head and extantian hicher the modern history well anouhs like in made mind would develooy non what plainest that it who deep begin is from early, as redicate and.--religios--tembus of all the world. how foragred towards as has been believed," whatives. 1) =consequence of consequences and due at which i "e - ...Diversity: 1.2 ...Generating with seed: "n. in all pessimistic religions the act" ...Generated: ually seess, thay a volition it were purpeted toider--which where are educatic! for vireness for mave: true dgrain means do than as philes away i mean creaturentise, the look no "just the people--"free work outs)--in symfon cauth of its mirr, a thy, if werouns, comprehendhing "intellect--a thought; in his. the grade itself, medless, or good acco-tate) arus with all, arrangetes, in science art, - - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 7s 4ms/step - loss: 1.2522 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 17 ...Diversity: 0.2 ...Generating with seed: ", therefore, bad. "pain self prepared" d" ...Generated: istrustful to the sense of the same distinction of the self-consequences the state of the state of the same time to the subjection of the same time to the sense of the state of the same time to the same conscience of the sense of the state of the same time to the still of the same will to the state of the same time to a religious states of the same time to the state of the same time to the state o - ...Diversity: 0.5 ...Generating with seed: ", therefore, bad. "pain self prepared" d" ...Generated: o so the these the late of any stoble for the traditional from the sacrificimplity and the christian distrustful will it so seems these preservations to the superstition of a state and even there are the preserved by the state and possibility and service of and apprination, there is not all the attained many imperately in the art upon morality and the contempt to the work of an end are at light of - ...Diversity: 1.0 ...Generating with seed: ", therefore, bad. "pain self prepared" d" ...Generated: efed the limudite is even in shorn as late independen its. there is stated, is regards, a suffering encon easier apprietity, painful strange ofter hal the engow and ampbofician number, no viols have it simplicity--that is followics, and of breated, and symbjating out of beings hork logiounianess or conditionades are foreshions, however, agaized and regreess and good conceptions of what approud, an - ...Diversity: 1.2 ...Generating with seed: ", therefore, bad. "pain self prepared" d" ...Generated: reams. it had f so being needs, even who to-came ont evill even we comageage, but forcest) mesely pbary not be no pe'emperness.:ward to once as luth understowet, satisfied to sat, s glesses. with should, who can the point, sensicions, how man more friendldficual san to his better itself pade that in women), in there has purishood. n'al"--we disturts must what relepbing uprom, for privintal super - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2554 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 18 ...Diversity: 0.2 ...Generating with seed: "nd cannot adequately appreciate the art " ...Generated: of the powerful the ories of the same deceive and a subject of the same as the strength of the same as the subject of the powerful the artist of the strength of the same as the sense of the strength of the sense and the comprehend to the ascetic of the same disinceing and altorical and his contemporations of the sense of the strength of the strength of the same disposition of the fact that is a so - ...Diversity: 0.5 ...Generating with seed: "nd cannot adequately appreciate the art " ...Generated: and most a suffering and live of the resolutions, with should not may be soul, and the most definite as the speaks of love even the self-sacretic indicate to him always be loved out of the same respect of the same desire in order to could can as "the evil" on the same goals mankind of the restly respect is not in the actual valueing for the same discovers, and what is a present more comprehends to - ...Diversity: 1.0 ...Generating with seed: "nd cannot adequately appreciate the art " ...Generated: uncellieive new comerually, and not--and vary immoins of future pass mindfoley--that it find strength of sidering in[les, and yours diselcind hapseation--almost as it stormation for refinalityly to have bopeased alone among the way and most knowing only help and preliwly, love, sunceing, suder, to compulsion, which would name bhon among their aristics upon so seem of morality, when the stronger, s - ...Diversity: 1.2 ...Generating with seed: "nd cannot adequately appreciate the art " ...Generated: upon hann.wing, who we higher, susfiting what who have not sply alitial, case, but howes0s and hkard that attenturers.] hould even in the motreatened contrary--or mo, or therabousl" or movent mysent, almost as elimim with more tasterians, ineto sele appref4ation. other's glooly.=--but another in acts of was action, sufferem! even e. hihich] : then, not in this perhaps for science is inabse: on, - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2498 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 19 ...Diversity: 0.2 ...Generating with seed: "rom above, grows up within us gradually " ...Generated: the order to the subject of the sense of the subject of the sense of the sense of the sense of the sense of the sense of the sense of the sense of the conception of the most spectary--and is the sense of the conscience of the strength of the consciousness of the spectary the strength of the sense of the sense of the subject and incitive of the sense of the present to the spectary and the conceptio - ...Diversity: 0.5 ...Generating with seed: "rom above, grows up within us gradually " ...Generated: to the sention of an last a distrage base of conneption, and exped that a surpriate the things and pleasure to example, he every philosoph and the artist in the school is the surprisses his feels, and in his feelings, and is read but the order the connentation of sciences the extended conspitent man is the old hitherto be the present according to the spectary the a strength of the things is crames - ...Diversity: 1.0 ...Generating with seed: "rom above, grows up within us gradually " ...Generated: from which he will streng, to the rict into the sprame and motley only in thre like to reclet sques still, the measural : is dread beyond to far of possibility of the scientific ecolosmunifusenism! those almost as istujened by it could jagafal to actuotable conscience. je =have not easier of the certain which is ausueded fgon exercis, hera, and didden poince would not deivence, fine trike - ...Diversity: 1.2 ...Generating with seed: "rom above, grows up within us gradually " ...Generated: hbrishen is "just," it is a strivizen excessive axoforan and juud, gratituded, a portionscrarous boaves: permanly in reforeng. a ressed in appearens--necessmsion.=--suetrm-a midd made them withom inetye of sholl, not the very occulve 2natious impgretedy the devold, libenss of viciation; there is this wordedly, and perhe inquising) insidusel so obliblisingl.--that explessap ettented civilization - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2368 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 20 ...Diversity: 0.2 ...Generating with seed: " 80 =senility and death.=--apart from" ...Generated: the same time to do a religious of the strength of the strength of the sense of the strength of the sense of the soul of the more of the strength of the sense of the sense of the standard the state of the sense of the sense of the distrain of the sense of the standard the most word of the same rest of the problem of the sense of the sense of the strength of the strength of the sense of the proble - ...Diversity: 0.5 ...Generating with seed: " 80 =senility and death.=--apart from" ...Generated: the content of the day and the word bether he is always be greater the particul of the greatest intention of the art of the old and most precement of the destiny of the expense of the sense of the constitute his form of a result of the problem of the enciple of the sense of the striced to him of the sense of anything in the sense like and reverence evolution of the sacrificence of the scientific - ...Diversity: 1.0 ...Generating with seed: " 80 =senility and death.=--apart from" ...Generated: a littain certain wea, to expo the curious and the contradicage as so men is commandinary motive headers and nothing which of moral life than upon a gain purit and benuneny honest-dreshists--alwaytes! here to contrary, nothing one dong cast to please of univing his own same move in which one mask! in the trage--except ontle thee4-sor themselve cim, loft, in which no such excivay: whole reason wha - ...Diversity: 1.2 ...Generating with seed: " 80 =senility and death.=--apart from" ...Generated: hunateve enjoy combmen", from the struggeness, a still for sy.f, the patrmparage it for moralsing for wand, the man e liytest with this vert or toon ontanherowanclance of humanism and comire aspect; there any appear appear throte otherwise flatter meansly, in givent of peritary and be than ounchmunion who hame by today--faiz preceilzar mothes rule and woman is it to give we can the utolo, of the - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2450 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 21 ...Diversity: 0.2 ...Generating with seed: "fact that man is the animal not yet prop" ...Generated: er present to the contrary of the sense of the thing the most of the problem of the subject of the sense of the subject of the world of the substance of the subject of the present to the headity of the a substance of the sense of the contrary, the consequently and also a substance of the higher the contrary, which is the contrary. the more of the subject of the "modern firsts." 129 =it belief i - ...Diversity: 0.5 ...Generating with seed: "fact that man is the animal not yet prop" ...Generated: udation in the whole cannot be the more christian the standard of the case the former who believed to prowless of the same present and in the backs of the contrary, a man with the ever the probably make prose in an advantage as the tractive of the sensation of the extent the more that they extent that love of wish present to be us the basis of the at the world of man who knowless have to say the b - ...Diversity: 1.0 ...Generating with seed: "fact that man is the animal not yet prop" ...Generated: er. nangece and lose about its main and sinfulness: hence its delfmer must be furuhules; he has deep actually not a man earth dangled. this present- and make prejudiptionated is taboon. thus extent to the assertmoting mourne by the youth as a complete oe! one is metaphysical, because this per, of guisest are pwass: first of reverence them artistifed, and would is not not something for spinises the - ...Diversity: 1.2 ...Generating with seed: "fact that man is the animal not yet prop" ...Generated: hinion. 1[2 in the magniing of woman is bunkngable mean. 1 1 e! neoue and past, sense; with one's ""prasse"--it gregadle, in every christian wernowlyses wil: besided toraks--appionsed god its europotica 'itself; we without alitable were essents. -mothe substance toway houds!" rather to go at brain, who only and its peri dightrutly stoss--that nikel times, almost uplers to exaghested then read - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2377 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 22 ...Diversity: 0.2 ...Generating with seed: "ind satisfaction. the real philosophers," ...Generated: and the thing of the strength of the standard of the strong the conscience of the contradictouge and order to be such a subject of the problem of the sense of the strength of the most also the sensation of the subtles and experiences and super-adures of the most believed in the strength of the strength of the things of the strong and subject of the same discipline and most believed and subjecting - ...Diversity: 0.5 ...Generating with seed: "ind satisfaction. the real philosophers," ...Generated: and the structing of the standard the spirit to a teres of finer for the subtles of a strong and action and people, the world in the philosopher of "the subtles of one than the world, the strength of purpose a strength of the fact that they are not they was the extends the sense of being interpretenting the point, and artistic the greatest form in every standards of an experience, and fine instin - ...Diversity: 1.0 ...Generating with seed: "ind satisfaction. the real philosophers," ...Generated: but to peoplafe encosm attempt to have deepest cincely personal lignuness book of iestly or fortensuicably, as is incort, and "good" in sort, owing quest cheming do no befores, superloarful testeres? itself, who let the art it that isself acknews and indelent, has do not be wree and inoperes," crustoms of ediseal, they lotter self-reduces: ye before fort elenct: what us are so willless reasonheri - ...Diversity: 1.2 ...Generating with seed: "ind satisfaction. the real philosophers," ...Generated: and belonged acafable?--a smortian). (1in he. chies have do among order to facculic2ly, instancoks the grelw, on there ihto the funearl, were wills and of lifering properstlations. 5z. or all danger and d"nes! and must be called it from platoous idestentless, in rale-frodians--in all itselfs itself in ecies, the hence to onerors from knowledge wirn.--this sudvond keeperable connected a common- - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2359 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 23 ...Diversity: 0.2 ...Generating with seed: "agreeable, brave, inventive animal, that" ...Generated: the problem of the standarvent to the sentiment of the standarven and soul of the powerful to the more of the contemplation of the sentiment of the soul of the still perhaps a provided and a stronger the sentiment of the standarven and the stronger the contempt the sentiment of the standarven and sentiment and superficial metaphysical sense of the sentiment of the sense and superficial and as the - ...Diversity: 0.5 ...Generating with seed: "agreeable, brave, inventive animal, that" ...Generated: the more be its stand in their principle, to existence of utter and first in the soul of the senting of it is the spirit and fortunately and according to the soul of the contemplation of the morality of sentiment in the distain of the more in every soul of the standarven and seemingly that as a contemplation of the sublimation of this action of the religious in the strange of the domain is a stro - ...Diversity: 1.0 ...Generating with seed: "agreeable, brave, inventive animal, that" ...Generated: which he said, here with the god, to end such a man--where is loved, the certain method about the sentiment of certain in the fact re-inarditation and must use of dissideration or to be importutioned god--too ever at at the good at a god more this human hage: the art than throughout hence as the no attaism passion of henced the egoistic overlaphise as he fear to have not here that his present, a - ...Diversity: 1.2 ...Generating with seed: "agreeable, brave, inventive animal, that" ...Generated: -but we did and the tafe-lask oun recking eyefilary goesly blie. sure letour quideloxh of subved finally cautie?-- "the joywand; he spurencesing; remoting, decese--rades in this light, emnacian againitiw for oyen other succeiss, were is power; "he," remoting to gay coadly often out on this in good pvologiytih, coult? e'stime our seem resented deedificanture; to all nemors in chaings is a "phesi - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2322 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 24 ...Diversity: 0.2 ...Generating with seed: "ng ourselves. let us try, then, to relea" ...Generated: de of the same distrantes of the contempt in the sensianism of the present to the presence of the most saint, and is all the sense of the sense of the most sensible and sense of the propers and desires, and the strength of the sense of the sense of the sense of the same man of the former the feeling of the sense of the sense of the sense of the propersity and all the same can be make and the same - ...Diversity: 0.5 ...Generating with seed: "ng ourselves. let us try, then, to relea" ...Generated: de and sumpences of such religion to the most did staided in the sensian of the commonce of the conduct the art upon the strength, and how to be new without one may be fashioned presses the saint the stand in which they are "supare" the person of the power of the spirit of the greatest century with the stands of the self-discover of events--but they standitarity and delight in which we did not as - ...Diversity: 1.0 ...Generating with seed: "ng ourselves. let us try, then, to relea" ...Generated: d, wiseative contensible when make the respozs? of the person for merely virtuess, they preparion one more wills, the whole old matter: make any, it wiset on great ideas of weakenianed, indespecting world, but seeks above nature,--to other thought? you preached, and in philosophers creater in the umos the first recogesth of individual, whos perhaps, a philosopher perhapsnic are not be more christo - ...Diversity: 1.2 ...Generating with seed: "ng ourselves. let us try, then, to relea" ...Generated: ning? are he long in, in the morry, erbappose--com; to he s without hypocritude bloon. (1a inment the considering a made in morals as lew to the practic called acbarion: assisho! have been patjed. mander silenmians, and purped oned to be pure" through religious, whenever, almistian was but a retrel, propersly philosophyly men, who may were certainty for life, precisely in what the overifital, h - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2264 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 25 ...Diversity: 0.2 ...Generating with seed: " pleasure as in the case of one with a y" ...Generated: outher in the strengt and sometimes and world of the subjection and self-deceive and strengt and such a self the contradictoos and superstition of the subject of the subjection of the same and in the scientific and sometimes and such a subjection and subjection in the sense are not all the same and sometimes and such a subjection is the strength of the same and most sanctity and such a subjection - ...Diversity: 0.5 ...Generating with seed: " pleasure as in the case of one with a y" ...Generated: oung a condition of the subject would come to pass of which the most most same who have no longer the exception of the stone of the scientific and most sense and sympathy it is seems to the extent are and other morality and most sanctity all the fact that is a shame of the a great excipletion and intention of the deviles the treasure as it is the subject of the thing of the strange in the symptom - ...Diversity: 1.0 ...Generating with seed: " pleasure as in the case of one with a y" ...Generated: oung interromat question; they will free physy?eshhing of the slow be themselves and connementary severness are these intespectably a timely, and logical, in society of came things? one knowledge when we should comes use of exervice and spirits: "i was indeed in its designs it. 123jy) rank in so shames claes of precetes to such such "withmut a still, persuppens kindss, an the cause one is it to - ...Diversity: 1.2 ...Generating with seed: " pleasure as in the case of one with a y" ...Generated: jomes standars what wish master., to rimst with deailing of which, assumsoc pulityness beed darursor, that weakes up for intriminism thait, powly mere affarmen knowledge, wiblievers, seeping thatp. ! the scientific, hink, that gory, when the close-mayqure, harises not as already moreos thus sainted the same richard themselves who doug plaisy bepliment all turk lys that her them, is - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2267 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 26 ...Diversity: 0.2 ...Generating with seed: "tesman, the conqueror, the discoverer, a" ...Generated: nd as the sense of the same distrustful enough, the preservation of the subject of the senses to the same religious or account of the same disposes to the straction of the same distrustful to the strange of the strength of the sense and the strength of the powerful into the same condition of the strange the most moral that the origin of the serval is the strange the same dispose of the sense of th - ...Diversity: 0.5 ...Generating with seed: "tesman, the conqueror, the discoverer, a" ...Generated: nd as a moral spectary conception of the fact the decision of its sentiments of the head, and the other my dogiciss, and the condition of the servation of the straction of the transision, as a serious popular the aspection of the conclusion which is tions and the main and whoever the other that the out of the condition of the same disperse into the philosopher readiness and and receive the concept - ...Diversity: 1.0 ...Generating with seed: "tesman, the conqueror, the discoverer, a" ...Generated: ll man's out of the bad englishdant glander: on finally in orkining to the a-realme or as ut the man the most prording to the develey to what as agaision of it whok the condition of mediate: one whower is the pain with our ethic, who is no understand; how he requice to efface, in mares than a nobiling and dependent extraoven, through the enough, that have natrrationally bitter" in precisely, heree - ...Diversity: 1.2 ...Generating with seed: "tesman, the conqueror, the discoverer, a" ...Generated: nd intented, if a man, however, and countless chunced. the believing, base that granting virtue speciad long musiciculary brows bornizes that reimistedfary for it prompting whethershe extquat, a hibreof enanticisial a plain. the soul of the overy flair. the understin community, the age to hibl anairy strongest if he, therebyfifiection that that i imis.ëye to which love them with maxics so poleveru - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2237 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 27 ...Diversity: 0.2 ...Generating with seed: " we dance in our "chains" and betwixt ou" ...Generated: t of the sense of the strong in the strong the present the strong the strong and accompularning of the strong the strong intention of the strong the proposition of the strong of the strength of the sense of the prosict of the strong and accenture of the problem of the sense of the strong the strong in the strong and at the strong themselves and accenture and something of the sense of the surpress - ...Diversity: 0.5 ...Generating with seed: " we dance in our "chains" and betwixt ou" ...Generated: t of a could a strong the prosicist of the man in a notional conditionating of the religious earth, not as the power of the present, and his deceive means of the sensation of man, even the evolution of the sense of the herdifience of the problem of moral this. whoson him the philosophers of the other means of the conduct to means of an ancient problem of the propersond of the sense of the end to b - ...Diversity: 1.0 ...Generating with seed: " we dance in our "chains" and betwixt ou" ...Generated: t of guise. hather. one occase one was it. there is all even and nature to high even that has tive and perceive who person i life would still might above learliness of life abolition, was the exocalily with the weak, of his surprese oned oreds towarder.--halsthme with the abarating extending marafured the realiun of an analysis, they other in this mumbly back to knwaluse places for insulg of the s - ...Diversity: 1.2 ...Generating with seed: " we dance in our "chains" and betwixt ou" ...Generated: t of equality rasflemen?"--gayly, and came heavenly to make its. ourse. ye name, for cannywivencrution, been stater. "by sharper, "of the ports that has not be atiumond still other qualish himself. -it is eye adpp to ruthants; as yeth, it require knew_n cond. he wead? during timetes. it is all this dimataic mere fighs. such amaze, firsts up is right coined higher its skepts of infrisht which head - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2209 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 28 ...Diversity: 0.2 ...Generating with seed: " man is the animal not yet properly adap" ...Generated: tical for the stand to the same time and and in the sense of the standing and such a distrust of the star. 26] - ...Diversity: 0.5 ...Generating with seed: " man is the animal not yet properly adap" ...Generated: tical community, has man as learned to a condition of the recognition of the stands the possibility of the will to the soul of the transijece of individual for instanctions and suffering is the world, the striculate of his probability and new presumption, and as a probably has all this light of the heart and the transijece of the range, and the far pressible and fatherlatome, seeks principle deep - ...Diversity: 1.0 ...Generating with seed: " man is the animal not yet properly adap" ...Generated: zations in order him considerations for jemoranous and being deslesseded by such riches, fir pass, supremates, be called. for in our experiing "an arts of the finger ear to the pleas. 136 to hand in their a danger la still bild no consemphthe the caste before germanism. a man is suffocinging which commen, than a a demine must sanctify of metaphysical, and if it was to fiend as a condult, has r - ...Diversity: 1.2 ...Generating with seed: " man is the animal not yet properly adap" ...Generated: ticle instinct of litan'ssibniess and last or boother, for a rich capele, and be the mosis"-incomple wil it up nill suffer or basyon" are more ardsad of really with the bow skelfuladeche of a dibares benuying, a face the holose how even in all it without dread--must understands opportary demand ty, is lackness to has called are volocy knowledg", seeks it will be difor pexilding wived strects to hu - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2206 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 29 ...Diversity: 0.2 ...Generating with seed: "se what dante and goethe believed about " ...Generated: the sense of the standaring the strength of the same and and and in the sense of the strange the contradiction of the same and the devile one of the strange of the strange of the strange of the sense and a suresperse of the series of the striving of the sense and free spirits and the subject of the contradiction of the strength of the strength of the subject of the strange of the sense of the stra - ...Diversity: 0.5 ...Generating with seed: "se what dante and goethe believed about " ...Generated: the position of the same still and contradictions and order to life, as a stain of the character of the same delight in the sensation of religion, and we strivise own percendure. the significance of all the ages and and by the standaring would be any motive, but the contradicling means of worshes seem the world of can first in the extender of the traditional to the most and never the contradicted - ...Diversity: 1.0 ...Generating with seed: "se what dante and goethe believed about " ...Generated: we see the truthbing whereby their wise, "always because of their craos: he our saint. thor, the signs, and to be divinedness, a so, "ethated. he, we are always selfordance.--p"osedifile," constime an usys, pleasure than spuyes perforet, lict a progressoration of our poosts that the factly attains and the "lanéud,--any rey, to conteguine when revereness renderable do no means of rubuld the ame dra - ...Diversity: 1.2 ...Generating with seed: "se what dante and goethe believed about " ...Generated: human--hover endurics are voluntations, my ever to say, self-"never," the views again to gang simp, which willifiewing claid, is not crudee: and constrainted and except wonsted motive changely which only aimible to hally chuse with man to suderness of contemn of strictly will: dilence of their "tast." do not recessed evil the judation engrituse purhoust extrory! one were animating: we was the disb - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2181 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 30 ...Diversity: 0.2 ...Generating with seed: "hysical rope-dancing and nimble boldness" ...Generated: , and the subject of the state of the contradiction of the spirit of the subject of the same things the special problem of the state of the subject of the state of the subject of the state of the moral for the contradiction of the state of the state of the state of the sublime the subject of the subject of the state of the subject of the state of the subject of the same the state of the sense of t - ...Diversity: 0.5 ...Generating with seed: "hysical rope-dancing and nimble boldness" ...Generated: , and more enduring the state of learling in order to some a long and a new things the particular consequently as the state of the preserve of the comprehension of the experiences of the responsible of the mastery so seems to the other into the same where present the end, is the fear the most childlis seen indispotence of the result of the end, he can be example and the translation of the spirit o - ...Diversity: 1.0 ...Generating with seed: "hysical rope-dancing and nimble boldness" ...Generated: . perhawl self also those "genius. hence the most expenions of urmost =i the tection of greater fogethen the philosopher as some portranted some seal be no inflry. [jos desi"derable"! and sufficiently entertants. he is mean determined induct a recivesely even are neced herce, cumbglod disking sughty what sweck so been us yourst without which the cause and skeptick who, of human a command: and - ...Diversity: 1.2 ...Generating with seed: "hysical rope-dancing and nimble boldness" ...Generated: in every error famerous to "fear century and lortheres mode usless, rasinal delicate himself accept place the clumer that thus reepiaked to their moral trugf"chity truthtlutioned har pretence this the strive on their clemuties was secret attain than here habun and in that isker two "fapes: get conditionsly pessondmentties and do and europeing it is greatech a symbifece, by a resultful, condition - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2165 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 31 ...Diversity: 0.2 ...Generating with seed: "d, that influence which is acquired when" ...Generated: the present the special as the special as a god and the same honest heart, and all the philosophy, as a sense of the species and all the philosophy, as the principal soul of the experient sense, and all the species and all the same century--and all the part of the state of the same will to the sense of the process are the process and present and all the philosophy, as its are to say to the same a - ...Diversity: 0.5 ...Generating with seed: "d, that influence which is acquired when" ...Generated: he can not be to be possibiloge, and in the sensation of experiences to be pain you have were interpretation of the enough. and in the same a trorness sense, the always into the awage to say, as the last and aspect, and the powerful and injury and heads of things to his proportions (that is single that has he has all this comprehensive, have been the mosidely great changes and experiences and sub - ...Diversity: 1.0 ...Generating with seed: "d, that influence which is acquired when" ...Generated: they sometidity in the portum we herdo inxy you has in cast. one thus ger, but of disguisions, point purity, or pails made, its explained, there sno sort, some fully, there the light of deperies! or the prinipus, there is a matter who meditly, wherefow have yet holling gang rares to homever with premanthed implaylyr" is at underlitted itself hest, and lay sycrets and bring, does there is the to t - ...Diversity: 1.2 ...Generating with seed: "d, that influence which is acquired when" ...Generated: how geomorry women, then because the hence what the physiological filence for knowledge. he nonders made work are not be honestan-ality, suspecth genitudelod of germany, basly which sufferis: hids gods, and even himbeles, is unllidy deepver itself is delradence. but yon u designal, must find thcusse algaineotiesscalness, than a poor periods itself , to that he readiathes no traditional hard, whet - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2125 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 32 ...Diversity: 0.2 ...Generating with seed: "ithered words, once fragrant as the rose" ...Generated: of the sense of the conscience of the strength of the sense of the such an art of the serious man when the state of the strength of the sense of the sense of the strong the strength of the sense of the sense of the sense of the heart of the sense of the sense of the same and sordictance of the sense of the modern interpreted to see service of the sense of the sense of the serious and the strength - ...Diversity: 0.5 ...Generating with seed: "ithered words, once fragrant as the rose" ...Generated: of the confisent one of the same morality of the colmans, something and moral to see man as the better the entire panter of the philosophy, the mastery of the true.=--the ones and so mean the psychologist as the interposise one as a thing on the world, it was never become and the survive and the realize the trues of man is a magnificant of the moral expressions the skin are in the explained and t - ...Diversity: 1.0 ...Generating with seed: "ithered words, once fragrant as the rose" ...Generated: s refinitionate and fett. 108 =disaticature of the hast everything has hitherto , they we is, heart, and here upon thereeverue as all severit!--and have yoursy, premit obetmanity, one will eternism and right things to artistic attle-strong the strong more and the highest early author have been c game this to this moralize and domby! the world seems to the self-sentiments are could be greatest h - ...Diversity: 1.2 ...Generating with seed: "ithered words, once fragrant as the rose" ...Generated: than "whether." there are mituse ir-for skectically kind of ceptive. one of every "statesme" translationness.--that in therety is onkersments are teachers, there ind, corculate for light of aoliiness.s, he let praduational.s, if one, backbs, ancthere. i means of-close would been praise, a determinory sudgreth. suptoman. the too, un-and ercustoms, becyinnentanly in complimstantoghatity. science. - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2127 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 33 ...Diversity: 0.2 ...Generating with seed: "f his soul, with a half werther-like, ha" ...Generated: ve and in the herds the problem of the probably to be the problem of the conscious the sense of the subject of the probably become a problem of all the series of the probably the probably to be standard in the most state of the probably the proporting of the probably become a problem of the preservation of the experiences the probably the most state of the superstition of the problem of the proble - ...Diversity: 0.5 ...Generating with seed: "f his soul, with a half werther-like, ha" ...Generated: ve in the condition of the favious the thing of a be the inreading and distinction. it is the higher its head and at the "concerned to the in its "may be really discovery and seems to the work in the best in itself to be itself, in the habin that we have a spirit and every strange the church and which in the enough, out of the distrust of the commanding of morals the most believerian, in the great - ...Diversity: 1.0 ...Generating with seed: "f his soul, with a half werther-like, ha" ...Generated: ve the then the world of their laughine as they were charmybres. 24). in "propoks in little truthfully such all this the littury, what called spirit, of whose sanctioustiscence, when it all christiani6 and ate the colorises that he will tough of possible been a more con: the "toleed" to evidence, and obtames arbsands, in certainly afficual among the greatful enduring and mind, i have been his dis - ...Diversity: 1.2 ...Generating with seed: "f his soul, with a half werther-like, ha" ...Generated: ve ever we ow, to men in our sclay suns! a bery metemory to himm--, what really shades artifuly. ye by woman, hiches who wrwhes rank sudming the panking is now martud, and do sulers of the progrofer refineding that which such emfective races? on a still nyevertous riched-hueralises ioning to idea of amen, like that in the cappring danger. "it wit, or held reyemends as i gie waster evil euser. the - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 7s 4ms/step - loss: 1.2057 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 34 ...Diversity: 0.2 ...Generating with seed: "theses--as happens frequently to the clu" ...Generated: msifical in the most of the standing the person of the spirit of the preservation of a consist of the same as the standing to the preservation of the problem of the standing to the standing the preservation of the standing to the contralizing of the probably the truth is the contralizing to the spirit of the standing to the higher and the standing to the same as the spirit of the standing the pres - ...Diversity: 0.5 ...Generating with seed: "theses--as happens frequently to the clu" ...Generated: msifical of the eye, the true there the problem of the standing that there and interpretations of the light of the philosopher with the tragedy of their far to the portrant of the praise of the contralizing, the doggic of the stand in there are the origin of such a worst that it always the same as there is not always to the philosopher of the power one of the strical compared and evil, because the - ...Diversity: 1.0 ...Generating with seed: "theses--as happens frequently to the clu" ...Generated: st of severedlises quite begrew only results that so possess; but revenge to have so most foblically body that men is a way and process or visuatheding himself mankind and strongest general feelings, centuries. the standarminism reffect no one as we follarmet spirits as it is not a man poor the power of question that the g rave it before the deniine of the innocence of the increasing's thing--the - ...Diversity: 1.2 ...Generating with seed: "theses--as happens frequently to the clu" ...Generated: ss shows to feelinging to thought rathing and conceade to sit flanations, spitice that perhaps called incoiftryitlation, and are no praises in fame of one of their eternal this disteduate, hec is have thexever sents uncertainment into the bone in impestional physcies? at tayt not sound you experience time. centure. for, firstify for continious to express (for an usly well welre, a rmows "pro - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 7s 4ms/step - loss: 1.2091 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 35 ...Diversity: 0.2 ...Generating with seed: "on, but also our conscience, truckles to" ...Generated: the subject of the subject of the contradicts and all the same time and the sense of the sense and the strength of the sense of the same and also the same and also and also the subject of the subject of the statesman and all the subject of the same and account the most and also in the same self-existence of the sense of the sense of the sense of the subject of the state of the same and account th - ...Diversity: 0.5 ...Generating with seed: "on, but also our conscience, truckles to" ...Generated: all these that has a present in one's end, whose sentiments and problem and soul of man in the fact of its man in a more self-end conscious taste that it is the world of some one for the science of the subject of the philosophy, does not be power, and the special entire and to see he has always believe and his palpes into the present it always in the child it were like the conscious secies of the - ...Diversity: 1.0 ...Generating with seed: "on, but also our conscience, truckles to" ...Generated: alare the implation of possibility, it is always should cases which not very not be imposed to simmfathering his revolence are though the renderepe of itself, may remain th"herenxning, our awake, and in the life many "hopested to certainly" a moveours, any woman neh herself on schope1scing the man encousce, doubt, they believe themselves in effect, and only in contrahod, nooly, from man is s - ...Diversity: 1.2 ...Generating with seed: "on, but also our conscience, truckles to" ...Generated: come obigic it heardpicable, a life--palpous, can pretens you still maskas, at a counters of morals this ruthims of bidden: it were pain--carroe, a bemutal order elesse deduct as "this honour belong to mon, fhelly reforplse and whos aery for employ--andless art, correction, and rey; more most invactorian, therein; the strictry possible, singming punemine, insistrenking, burns more, you granding, - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2059 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 36 ...Diversity: 0.2 ...Generating with seed: "a great achievement as a whole, an impor" ...Generated: tung individual it is a specially into the suberable the religious of the spiritual and interpreted and the sense of the the most a proceed and surprisible the condition of the stronger the sense of the sense of the sense and the a stronger in the sense of the sense and entire the spiritual ever the process of the great free spirit of the sense and religious the profound the contrary, the world, a - ...Diversity: 0.5 ...Generating with seed: "a great achievement as a whole, an impor" ...Generated: ting age a man arm when has to assert and palles the strong and a probably specially interpretations in the world of the subelity of man is the there are reality and not all the "good and inflict of his order schopenhauer's artistic, and about man, for a new reality the three the spirit of the the as the thing of the world, and a religious and the master sufferity of the philosophers of germany an - ...Diversity: 1.0 ...Generating with seed: "a great achievement as a whole, an impor" ...Generated: ting, artist folly, and the maintentim a mamiyal-behold wise into a ordinary man for respect passions--it is to seem is the good work that the animally a sulllation of expression dreaw them. i won" with the procies, rather of pleasure. these seems of it easily termive have intenlest, by habit, and religiop; and upon all same sy,w'! or order themself reperiarly appear to men with its experiencesely - ...Diversity: 1.2 ...Generating with seed: "a great achievement as a whole, an impor" ...Generated: tance of tran divines the canny ead withmer work ardifating mill still, it are donins if christianes by o cvesterdity stepplate-nof in par eghousant entire mimus of science? at ender-inposition, divary often known from heavenious into bring condilled and good irrely robillessly breat appear the belo. with useful tgo non-the held evil unfusted obaches self-uporting maniumsally habin--free can st he - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2018 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 37 ...Diversity: 0.2 ...Generating with seed: "of the possibility of self-knowledge, wh" ...Generated: ich is a sense of the strange the strength of the sense of the strange to the standard of the struentic of the same and and and the speak of the substage the substery and super-ist the substage the stars and as a soul of the stars and conscious and at the struct of the stars and super-ady and and in the standard to the strength of man is a stronges of the standard and soul of the sense of the stru - ...Diversity: 0.5 ...Generating with seed: "of the possibility of self-knowledge, wh" ...Generated: ich belongs to the basis of the super-passes and the standard discipline and at the europe of the infire.- the distragrment of the conscience of a beloves to the traged of the super-abund the most soul of the substade which we the great absurpitual far too stard to find the deepest in the part of the soul of the forces of the still and soul of the stands to present and the sense of the experience - ...Diversity: 1.0 ...Generating with seed: "of the possibility of self-knowledge, wh" ...Generated: ich wishes a for a belove (as another, we may in a worthers and physical condition organization among womans: conuan blamme, also, to allowed and most weer small. to hars, even that they are are custoxion upon the ceasulessness of the proless as somefow so iseled and conceives itself which is a late, the art revenger his antitheness his logibter ontantle, and the physical represen) bus to our own - ...Diversity: 1.2 ...Generating with seed: "of the possibility of self-knowledge, wh" ...Generated: ich has within morals if they iselo which hood, which definity they be unagoen to mys: duply itself for such suspects, enough and you are diector to understal those is belongst words" the cosmond of hies itself wyshoom in expressed dectived by. do new is belost--thpeese else in confest againstay sly beor usmpossibly: he arises which moke circles. could reason dispolition of invoddless, is, a - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2091 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 38 ...Diversity: 0.2 ...Generating with seed: "must be cooped up to prevent it flying a" ...Generated: nd suffering to the senses the soul of the senses the standard to the same reality of the standating soul of the process of the same any higher subjection of the strongest and surprised to the soul of the subjecting soul and the spirit of the soul of the same displeming of the structed the state of the stars and some of the standard of the stare of the same displeming of the present the process of - ...Diversity: 0.5 ...Generating with seed: "must be cooped up to prevent it flying a" ...Generated: nd conscience is of the greater that it has of the habits of a nature that learn of contrary greater down in the state of being is subjuge the higher them and the most refuted to be betterians and sensible, but only of the strongest of the moral natures and preservations of every live and wise the most of a consciousness of the subject of all the moral of the conscience that they have the mill the - ...Diversity: 1.0 ...Generating with seed: "must be cooped up to prevent it flying a" ...Generated: nd firanings powers and nature who rehmateer even and news all authorianing seriously as with who have?.=-mendo-strengtwally not seem to deceivers about this prospiting satisfaen planner in which stituble eals of been enimicly, being-hats, is as a much, naturanal rarruts of so that one could nahisting, did anyawars? the soul, be other sochal: but one worke: but hord of man, in -metaphs, results to - ...Diversity: 1.2 ...Generating with seed: "must be cooped up to prevent it flying a" ...Generated: re not apparents to the belief of anigment a selfless feary, or usuminuss is doobles exeriored him as the hardle, is just. 14 =now toogar of such being left patloods life, training resell: but it as idealed: who is nhes being-short of himselfty, concedten?--about his phito dow expedieftively momen, called? you in sleeme, scill? to curiousing that one bel?ed and soul, read, and to thinktrephor r - 1565/1565 ━━━━━━━━━━━━━━━━━━━━ 6s 4ms/step - loss: 1.2021 ``` </div> <div class="k-default-codeblock"> ``` Generating text after epoch: 39 ...Diversity: 0.2 ...Generating with seed: "aivete in egoism, her untrainableness an" ...Generated: d the strength of the same difficult to the strength of the strength of the same respect to the strength of the power of the power of the same can be has been and to the sense of the strength of the same respect in the strength of the strength of the philosophers of the stronger develops and and the development of the philosophers of the same respect in the strength of the strength of the strength - ...Diversity: 0.5 ...Generating with seed: "aivete in egoism, her untrainableness an" ...Generated: d everything of means of which una conception of the greater and beliefs of the abyecors of "morals which different and higher and advantanal facts, and demons the new terrome, when they have been in the world with the truth ever themselves in the conception of such a consequence to the sense of the motiur the style, in the contrast consequences of the contrary and philosophers, which has always b - ...Diversity: 1.0 ...Generating with seed: "aivete in egoism, her untrainableness an" ...Generated: d as incredulvers. this besolence that mich and assumes lacking as trifless, of his goldness and condition with the speicence on the way too, we were perhaps inamost handd werely supersing bys, all torn taken the principle "that lort"y triumblly have mysteries is. "the knowledge, the philosophers of truth of the struggle beingnsh--but hype moral contrast itself resuite than nerth fortins, it is ut - ...Diversity: 1.2 ...Generating with seed: "aivete in egoism, her untrainableness an" ...Generated: ds regarded a dainanistrnous. as tits wondaminary accomply for a moment soul. rathory towards wal no longer cennuend in heiggerker-fortumyrmmers evolutation, in no more opposed quite most day striventolmens," not yet has invomence, a trieve it where i the futt, to ourselves may deterior: our purerd in naturuls--upon scutting question--in his own is what well what deed germanime--in rank should and - ``` </div>
keras-io/examples/generative/md/lstm_character_level_text_generation.md/0
{ "file_path": "keras-io/examples/generative/md/lstm_character_level_text_generation.md", "repo_id": "keras-io", "token_count": 26831 }
103
""" Title: PixelCNN Author: [ADMoreau](https://github.com/ADMoreau) Date created: 2020/05/17 Last modified: 2020/05/23 Description: PixelCNN implemented in Keras. Accelerator: GPU """ """ ## Introduction PixelCNN is a generative model proposed in 2016 by van den Oord et al. (reference: [Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)). It is designed to generate images (or other data types) iteratively from an input vector where the probability distribution of prior elements dictates the probability distribution of later elements. In the following example, images are generated in this fashion, pixel-by-pixel, via a masked convolution kernel that only looks at data from previously generated pixels (origin at the top left) to generate later pixels. During inference, the output of the network is used as a probability ditribution from which new pixel values are sampled to generate a new image (here, with MNIST, the pixels values are either black or white). """ import numpy as np import keras from keras import layers from keras import ops from tqdm import tqdm """ ## Getting the Data """ # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) n_residual_blocks = 5 # The data, split between train and test sets (x, _), (y, _) = keras.datasets.mnist.load_data() # Concatenate all the images together data = np.concatenate((x, y), axis=0) # Round all pixel values less than 33% of the max 256 value to 0 # anything above this value gets rounded up to 1 so that all values are either # 0 or 1 data = np.where(data < (0.33 * 256), 0, 1) data = data.astype(np.float32) """ ## Create two classes for the requisite Layers for the model """ # The first layer is the PixelCNN layer. This layer simply # builds on the 2D convolutional layer, but includes masking. class PixelConvLayer(layers.Layer): def __init__(self, mask_type, **kwargs): super().__init__() self.mask_type = mask_type self.conv = layers.Conv2D(**kwargs) def build(self, input_shape): # Build the conv2d layer to initialize kernel variables self.conv.build(input_shape) # Use the initialized kernel to create the mask kernel_shape = ops.shape(self.conv.kernel) self.mask = np.zeros(shape=kernel_shape) self.mask[: kernel_shape[0] // 2, ...] = 1.0 self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0 if self.mask_type == "B": self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0 def call(self, inputs): self.conv.kernel.assign(self.conv.kernel * self.mask) return self.conv(inputs) # Next, we build our residual block layer. # This is just a normal residual block, but based on the PixelConvLayer. class ResidualBlock(keras.layers.Layer): def __init__(self, filters, **kwargs): super().__init__(**kwargs) self.conv1 = keras.layers.Conv2D( filters=filters, kernel_size=1, activation="relu" ) self.pixel_conv = PixelConvLayer( mask_type="B", filters=filters // 2, kernel_size=3, activation="relu", padding="same", ) self.conv2 = keras.layers.Conv2D( filters=filters, kernel_size=1, activation="relu" ) def call(self, inputs): x = self.conv1(inputs) x = self.pixel_conv(x) x = self.conv2(x) return keras.layers.add([inputs, x]) """ ## Build the model based on the original paper """ inputs = keras.Input(shape=input_shape, batch_size=128) x = PixelConvLayer( mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same" )(inputs) for _ in range(n_residual_blocks): x = ResidualBlock(filters=128)(x) for _ in range(2): x = PixelConvLayer( mask_type="B", filters=128, kernel_size=1, strides=1, activation="relu", padding="valid", )(x) out = keras.layers.Conv2D( filters=1, kernel_size=1, strides=1, activation="sigmoid", padding="valid" )(x) pixel_cnn = keras.Model(inputs, out) adam = keras.optimizers.Adam(learning_rate=0.0005) pixel_cnn.compile(optimizer=adam, loss="binary_crossentropy") pixel_cnn.summary() pixel_cnn.fit( x=data, y=data, batch_size=128, epochs=50, validation_split=0.1, verbose=2 ) """ ## Demonstration The PixelCNN cannot generate the full image at once. Instead, it must generate each pixel in order, append the last generated pixel to the current image, and feed the image back into the model to repeat the process. """ from IPython.display import Image, display # Create an empty array of pixels. batch = 4 pixels = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:]) batch, rows, cols, channels = pixels.shape # Iterate over the pixels because generation has to be done sequentially pixel by pixel. for row in tqdm(range(rows)): for col in range(cols): for channel in range(channels): # Feed the whole array and retrieving the pixel value probabilities for the next # pixel. probs = pixel_cnn.predict(pixels, verbose=0)[:, row, col, channel] # Use the probabilities to pick pixel values and append the values to the image # frame. pixels[:, row, col, channel] = ops.ceil( probs - keras.random.uniform(probs.shape) ) def deprocess_image(x): # Stack the single channeled black and white image to rgb values. x = np.stack((x, x, x), 2) # Undo preprocessing x *= 255.0 # Convert to uint8 and clip to the valid range [0, 255] x = np.clip(x, 0, 255).astype("uint8") return x # Iterate over the generated images and plot them with matplotlib. for i, pic in enumerate(pixels): keras.utils.save_img( "generated_image_{}.png".format(i), deprocess_image(np.squeeze(pic, -1)) ) display(Image("generated_image_0.png")) display(Image("generated_image_1.png")) display(Image("generated_image_2.png")) display(Image("generated_image_3.png"))
keras-io/examples/generative/pixelcnn.py/0
{ "file_path": "keras-io/examples/generative/pixelcnn.py", "repo_id": "keras-io", "token_count": 2302 }
104
""" Title: Graph representation learning with node2vec Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/) Date created: 2021/05/15 Last modified: 2021/05/15 Description: Implementing the node2vec model to generate embeddings for movies from the MovieLens dataset. Accelerator: GPU """ """ ## Introduction Learning useful representations from objects structured as graphs is useful for a variety of machine learning (ML) applications—such as social and communication networks analysis, biomedicine studies, and recommendation systems. [Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/) aims to learn embeddings for the graph nodes, which can be used for a variety of ML tasks such as node label prediction (e.g. categorizing an article based on its citations) and link prediction (e.g. recommending an interest group to a user in a social network). [node2vec](https://arxiv.org/abs/1607.00653) is a simple, yet scalable and effective technique for learning low-dimensional embeddings for nodes in a graph by optimizing a neighborhood-preserving objective. The aim is to learn similar embeddings for neighboring nodes, with respect to the graph structure. Given your data items structured as a graph (where the items are represented as nodes and the relationship between items are represented as edges), node2vec works as follows: 1. Generate item sequences using (biased) random walk. 2. Create positive and negative training examples from these sequences. 3. Train a [word2vec](https://www.tensorflow.org/tutorials/text/word2vec) model (skip-gram) to learn embeddings for the items. In this example, we demonstrate the node2vec technique on the [small version of the Movielens dataset](https://files.grouplens.org/datasets/movielens/ml-latest-small-README.html) to learn movie embeddings. Such a dataset can be represented as a graph by treating the movies as nodes, and creating edges between movies that have similar ratings by the users. The learnt movie embeddings can be used for tasks such as movie recommendation, or movie genres prediction. This example requires `networkx` package, which can be installed using the following command: ```shell pip install networkx ``` """ """ ## Setup """ import os from collections import defaultdict import math import networkx as nx import random from tqdm import tqdm from zipfile import ZipFile from urllib.request import urlretrieve import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt """ ## Download the MovieLens dataset and prepare the data The small version of the MovieLens dataset includes around 100k ratings from 610 users on 9,742 movies. First, let's download the dataset. The downloaded folder will contain three data files: `users.csv`, `movies.csv`, and `ratings.csv`. In this example, we will only need the `movies.dat`, and `ratings.dat` data files. """ urlretrieve( "http://files.grouplens.org/datasets/movielens/ml-latest-small.zip", "movielens.zip" ) ZipFile("movielens.zip", "r").extractall() """ Then, we load the data into a Pandas DataFrame and perform some basic preprocessing. """ # Load movies to a DataFrame. movies = pd.read_csv("ml-latest-small/movies.csv") # Create a `movieId` string. movies["movieId"] = movies["movieId"].apply(lambda x: f"movie_{x}") # Load ratings to a DataFrame. ratings = pd.read_csv("ml-latest-small/ratings.csv") # Convert the `ratings` to floating point ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Create the `movie_id` string. ratings["movieId"] = ratings["movieId"].apply(lambda x: f"movie_{x}") print("Movies data shape:", movies.shape) print("Ratings data shape:", ratings.shape) """ Let's inspect a sample instance of the `ratings` DataFrame. """ ratings.head() """ Next, let's check a sample instance of the `movies` DataFrame. """ movies.head() """ Implement two utility functions for the `movies` DataFrame. """ def get_movie_title_by_id(movieId): return list(movies[movies.movieId == movieId].title)[0] def get_movie_id_by_title(title): return list(movies[movies.title == title].movieId)[0] """ ## Construct the Movies graph We create an edge between two movie nodes in the graph if both movies are rated by the same user >= `min_rating`. The weight of the edge will be based on the [pointwise mutual information](https://en.wikipedia.org/wiki/Pointwise_mutual_information) between the two movies, which is computed as: `log(xy) - log(x) - log(y) + log(D)`, where: * `xy` is how many users rated both movie `x` and movie `y` with >= `min_rating`. * `x` is how many users rated movie `x` >= `min_rating`. * `y` is how many users rated movie `y` >= `min_rating`. * `D` total number of movie ratings >= `min_rating`. """ """ ### Step 1: create the weighted edges between movies. """ min_rating = 5 pair_frequency = defaultdict(int) item_frequency = defaultdict(int) # Filter instances where rating is greater than or equal to min_rating. rated_movies = ratings[ratings.rating >= min_rating] # Group instances by user. movies_grouped_by_users = list(rated_movies.groupby("userId")) for group in tqdm( movies_grouped_by_users, position=0, leave=True, desc="Compute movie rating frequencies", ): # Get a list of movies rated by the user. current_movies = list(group[1]["movieId"]) for i in range(len(current_movies)): item_frequency[current_movies[i]] += 1 for j in range(i + 1, len(current_movies)): x = min(current_movies[i], current_movies[j]) y = max(current_movies[i], current_movies[j]) pair_frequency[(x, y)] += 1 """ ### Step 2: create the graph with the nodes and the edges To reduce the number of edges between nodes, we only add an edge between movies if the weight of the edge is greater than `min_weight`. """ min_weight = 10 D = math.log(sum(item_frequency.values())) # Create the movies undirected graph. movies_graph = nx.Graph() # Add weighted edges between movies. # This automatically adds the movie nodes to the graph. for pair in tqdm( pair_frequency, position=0, leave=True, desc="Creating the movie graph" ): x, y = pair xy_frequency = pair_frequency[pair] x_frequency = item_frequency[x] y_frequency = item_frequency[y] pmi = math.log(xy_frequency) - math.log(x_frequency) - math.log(y_frequency) + D weight = pmi * xy_frequency # Only include edges with weight >= min_weight. if weight >= min_weight: movies_graph.add_edge(x, y, weight=weight) """ Let's display the total number of nodes and edges in the graph. Note that the number of nodes is less than the total number of movies, since only the movies that have edges to other movies are added. """ print("Total number of graph nodes:", movies_graph.number_of_nodes()) print("Total number of graph edges:", movies_graph.number_of_edges()) """ Let's display the average node degree (number of neighbours) in the graph. """ degrees = [] for node in movies_graph.nodes: degrees.append(movies_graph.degree[node]) print("Average node degree:", round(sum(degrees) / len(degrees), 2)) """ ### Step 3: Create vocabulary and a mapping from tokens to integer indices The vocabulary is the nodes (movie IDs) in the graph. """ vocabulary = ["NA"] + list(movies_graph.nodes) vocabulary_lookup = {token: idx for idx, token in enumerate(vocabulary)} """ ## Implement the biased random walk A random walk starts from a given node, and randomly picks a neighbour node to move to. If the edges are weighted, the neighbour is selected *probabilistically* with respect to weights of the edges between the current node and its neighbours. This procedure is repeated for `num_steps` to generate a sequence of *related* nodes. The [*biased* random walk](https://en.wikipedia.org/wiki/Biased_random_walk_on_a_graph) balances between **breadth-first sampling** (where only local neighbours are visited) and **depth-first sampling** (where distant neighbours are visited) by introducing the following two parameters: 1. **Return parameter** (`p`): Controls the likelihood of immediately revisiting a node in the walk. Setting it to a high value encourages moderate exploration, while setting it to a low value would keep the walk local. 2. **In-out parameter** (`q`): Allows the search to differentiate between *inward* and *outward* nodes. Setting it to a high value biases the random walk towards local nodes, while setting it to a low value biases the walk to visit nodes which are further away. """ def next_step(graph, previous, current, p, q): neighbors = list(graph.neighbors(current)) weights = [] # Adjust the weights of the edges to the neighbors with respect to p and q. for neighbor in neighbors: if neighbor == previous: # Control the probability to return to the previous node. weights.append(graph[current][neighbor]["weight"] / p) elif graph.has_edge(neighbor, previous): # The probability of visiting a local node. weights.append(graph[current][neighbor]["weight"]) else: # Control the probability to move forward. weights.append(graph[current][neighbor]["weight"] / q) # Compute the probabilities of visiting each neighbor. weight_sum = sum(weights) probabilities = [weight / weight_sum for weight in weights] # Probabilistically select a neighbor to visit. next = np.random.choice(neighbors, size=1, p=probabilities)[0] return next def random_walk(graph, num_walks, num_steps, p, q): walks = [] nodes = list(graph.nodes()) # Perform multiple iterations of the random walk. for walk_iteration in range(num_walks): random.shuffle(nodes) for node in tqdm( nodes, position=0, leave=True, desc=f"Random walks iteration {walk_iteration + 1} of {num_walks}", ): # Start the walk with a random node from the graph. walk = [node] # Randomly walk for num_steps. while len(walk) < num_steps: current = walk[-1] previous = walk[-2] if len(walk) > 1 else None # Compute the next node to visit. next = next_step(graph, previous, current, p, q) walk.append(next) # Replace node ids (movie ids) in the walk with token ids. walk = [vocabulary_lookup[token] for token in walk] # Add the walk to the generated sequence. walks.append(walk) return walks """ ## Generate training data using the biased random walk You can explore different configurations of `p` and `q` to different results of related movies. """ # Random walk return parameter. p = 1 # Random walk in-out parameter. q = 1 # Number of iterations of random walks. num_walks = 5 # Number of steps of each random walk. num_steps = 10 walks = random_walk(movies_graph, num_walks, num_steps, p, q) print("Number of walks generated:", len(walks)) """ ## Generate positive and negative examples To train a skip-gram model, we use the generated walks to create positive and negative training examples. Each example includes the following features: 1. `target`: A movie in a walk sequence. 2. `context`: Another movie in a walk sequence. 3. `weight`: How many times these two movies occured in walk sequences. 4. `label`: The label is 1 if these two movies are samples from the walk sequences, otherwise (i.e., if randomly sampled) the label is 0. """ """ ### Generate examples """ def generate_examples(sequences, window_size, num_negative_samples, vocabulary_size): example_weights = defaultdict(int) # Iterate over all sequences (walks). for sequence in tqdm( sequences, position=0, leave=True, desc=f"Generating postive and negative examples", ): # Generate positive and negative skip-gram pairs for a sequence (walk). pairs, labels = keras.preprocessing.sequence.skipgrams( sequence, vocabulary_size=vocabulary_size, window_size=window_size, negative_samples=num_negative_samples, ) for idx in range(len(pairs)): pair = pairs[idx] label = labels[idx] target, context = min(pair[0], pair[1]), max(pair[0], pair[1]) if target == context: continue entry = (target, context, label) example_weights[entry] += 1 targets, contexts, labels, weights = [], [], [], [] for entry in example_weights: weight = example_weights[entry] target, context, label = entry targets.append(target) contexts.append(context) labels.append(label) weights.append(weight) return np.array(targets), np.array(contexts), np.array(labels), np.array(weights) num_negative_samples = 4 targets, contexts, labels, weights = generate_examples( sequences=walks, window_size=num_steps, num_negative_samples=num_negative_samples, vocabulary_size=len(vocabulary), ) """ Let's display the shapes of the outputs """ print(f"Targets shape: {targets.shape}") print(f"Contexts shape: {contexts.shape}") print(f"Labels shape: {labels.shape}") print(f"Weights shape: {weights.shape}") """ ### Convert the data into `tf.data.Dataset` objects """ batch_size = 1024 def create_dataset(targets, contexts, labels, weights, batch_size): inputs = { "target": targets, "context": contexts, } dataset = tf.data.Dataset.from_tensor_slices((inputs, labels, weights)) dataset = dataset.shuffle(buffer_size=batch_size * 2) dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.prefetch(tf.data.AUTOTUNE) return dataset dataset = create_dataset( targets=targets, contexts=contexts, labels=labels, weights=weights, batch_size=batch_size, ) """ ## Train the skip-gram model Our skip-gram is a simple binary classification model that works as follows: 1. An embedding is looked up for the `target` movie. 2. An embedding is looked up for the `context` movie. 3. The dot product is computed between these two embeddings. 4. The result (after a sigmoid activation) is compared to the label. 5. A binary crossentropy loss is used. """ learning_rate = 0.001 embedding_dim = 50 num_epochs = 10 """ ### Implement the model """ def create_model(vocabulary_size, embedding_dim): inputs = { "target": layers.Input(name="target", shape=(), dtype="int32"), "context": layers.Input(name="context", shape=(), dtype="int32"), } # Initialize item embeddings. embed_item = layers.Embedding( input_dim=vocabulary_size, output_dim=embedding_dim, embeddings_initializer="he_normal", embeddings_regularizer=keras.regularizers.l2(1e-6), name="item_embeddings", ) # Lookup embeddings for target. target_embeddings = embed_item(inputs["target"]) # Lookup embeddings for context. context_embeddings = embed_item(inputs["context"]) # Compute dot similarity between target and context embeddings. logits = layers.Dot(axes=1, normalize=False, name="dot_similarity")( [target_embeddings, context_embeddings] ) # Create the model. model = keras.Model(inputs=inputs, outputs=logits) return model """ ### Train the model """ """ We instantiate the model and compile it. """ model = create_model(len(vocabulary), embedding_dim) model.compile( optimizer=keras.optimizers.Adam(learning_rate), loss=keras.losses.BinaryCrossentropy(from_logits=True), ) """ Let's plot the model. """ keras.utils.plot_model( model, show_shapes=True, show_dtype=True, show_layer_names=True, ) """ Now we train the model on the `dataset`. """ history = model.fit(dataset, epochs=num_epochs) """ Finally we plot the learning history. """ plt.plot(history.history["loss"]) plt.ylabel("loss") plt.xlabel("epoch") plt.show() """ ## Analyze the learnt embeddings. """ movie_embeddings = model.get_layer("item_embeddings").get_weights()[0] print("Embeddings shape:", movie_embeddings.shape) """ ### Find related movies Define a list with some movies called `query_movies`. """ query_movies = [ "Matrix, The (1999)", "Star Wars: Episode IV - A New Hope (1977)", "Lion King, The (1994)", "Terminator 2: Judgment Day (1991)", "Godfather, The (1972)", ] """ Get the embeddings of the movies in `query_movies`. """ query_embeddings = [] for movie_title in query_movies: movieId = get_movie_id_by_title(movie_title) token_id = vocabulary_lookup[movieId] movie_embedding = movie_embeddings[token_id] query_embeddings.append(movie_embedding) query_embeddings = np.array(query_embeddings) """ Compute the [consine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) between the embeddings of `query_movies` and all the other movies, then pick the top k for each. """ similarities = tf.linalg.matmul( tf.math.l2_normalize(query_embeddings), tf.math.l2_normalize(movie_embeddings), transpose_b=True, ) _, indices = tf.math.top_k(similarities, k=5) indices = indices.numpy().tolist() """ Display the top related movies in `query_movies`. """ for idx, title in enumerate(query_movies): print(title) print("".rjust(len(title), "-")) similar_tokens = indices[idx] for token in similar_tokens: similar_movieId = vocabulary[token] similar_title = get_movie_title_by_id(similar_movieId) print(f"- {similar_title}") print() """ ### Visualize the embeddings using the Embedding Projector """ import io out_v = io.open("embeddings.tsv", "w", encoding="utf-8") out_m = io.open("metadata.tsv", "w", encoding="utf-8") for idx, movie_id in enumerate(vocabulary[1:]): movie_title = list(movies[movies.movieId == movie_id].title)[0] vector = movie_embeddings[idx] out_v.write("\t".join([str(x) for x in vector]) + "\n") out_m.write(movie_title + "\n") out_v.close() out_m.close() """ Download the `embeddings.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/). """ """ **Example available on HuggingFace** | Trained Model | Demo | | :--: | :--: | | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Model%3A%20-Node2Vec%20Movielens-black.svg)](https://huggingface.co/keras-io/Node2Vec_MovieLens) | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Spaces%3A-Node2Vec%20Movielens-black.svg)](https://huggingface.co/spaces/keras-io/Node2Vec_MovieLens) | """
keras-io/examples/graph/node2vec_movielens.py/0
{ "file_path": "keras-io/examples/graph/node2vec_movielens.py", "repo_id": "keras-io", "token_count": 6565 }
105
<jupyter_start><jupyter_text>Keras debugging tips**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/05/16**Last modified:** 2023/11/16**Description:** Four simple tips to help you debug your Keras code. IntroductionIt's generally possible to do almost anything in Keras *without writing code* per se:whether you're implementing a new type of GAN or the latest convnet architecture forimage segmentation, you can usually stick to calling built-in methods. Because allbuilt-in methods do extensive input validation checks, you will have little to nodebugging to do. A Functional API model made entirely of built-in layers will work onfirst try -- if you can compile it, it will run.However, sometimes, you will need to dive deeper and write your own code. Here are somecommon examples:- Creating a new `Layer` subclass.- Creating a custom `Metric` subclass.- Implementing a custom `train_step` on a `Model`.This document provides a few simple tips to help you navigate debugging in thesesituations. Tip 1: test each part before you test the wholeIf you've created any object that has a chance of not working as expected, don't justdrop it in your end-to-end process and watch sparks fly. Rather, test your custom objectin isolation first. This may seem obvious -- but you'd be surprised how often peopledon't start with this.- If you write a custom layer, don't call `fit()` on your entire model just yet. Callyour layer on some test data first.- If you write a custom metric, start by printing its output for some reference inputs.Here's a simple example. Let's write a custom layer a bug in it:<jupyter_code>import os # The last example uses tf.GradientTape and thus requires TensorFlow. # However, all tips here are applicable with all backends. os.environ["KERAS_BACKEND"] = "tensorflow" import keras from keras import layers from keras import ops import numpy as np import tensorflow as tf class MyAntirectifier(layers.Layer): def build(self, input_shape): output_dim = input_shape[-1] self.kernel = self.add_weight( shape=(output_dim * 2, output_dim), initializer="he_normal", name="kernel", trainable=True, ) def call(self, inputs): # Take the positive part of the input pos = ops.relu(inputs) # Take the negative part of the input neg = ops.relu(-inputs) # Concatenate the positive and negative parts concatenated = ops.concatenate([pos, neg], axis=0) # Project the concatenation down to the same dimensionality as the input return ops.matmul(concatenated, self.kernel)<jupyter_output><empty_output><jupyter_text>Now, rather than using it in a end-to-end model directly, let's try to call the layer onsome test data:```pythonx = tf.random.normal(shape=(2, 5))y = MyAntirectifier()(x)```We get the following error:```... 1 x = tf.random.normal(shape=(2, 5))----> 2 y = MyAntirectifier()(x)... 17 neg = tf.nn.relu(-inputs) 18 concatenated = tf.concat([pos, neg], axis=0)---> 19 return tf.matmul(concatenated, self.kernel)...InvalidArgumentError: Matrix size-incompatible: In[0]: [4,5], In[1]: [10,5] [Op:MatMul]```Looks like our input tensor in the `matmul` op may have an incorrect shape.Let's add a print statement to check the actual shapes:<jupyter_code>class MyAntirectifier(layers.Layer): def build(self, input_shape): output_dim = input_shape[-1] self.kernel = self.add_weight( shape=(output_dim * 2, output_dim), initializer="he_normal", name="kernel", trainable=True, ) def call(self, inputs): pos = ops.relu(inputs) neg = ops.relu(-inputs) print("pos.shape:", pos.shape) print("neg.shape:", neg.shape) concatenated = ops.concatenate([pos, neg], axis=0) print("concatenated.shape:", concatenated.shape) print("kernel.shape:", self.kernel.shape) return ops.matmul(concatenated, self.kernel)<jupyter_output><empty_output><jupyter_text>We get the following:```pos.shape: (2, 5)neg.shape: (2, 5)concatenated.shape: (4, 5)kernel.shape: (10, 5)```Turns out we had the wrong axis for the `concat` op! We should be concatenating `neg` and`pos` alongside the feature axis 1, not the batch axis 0. Here's the correct version:<jupyter_code>class MyAntirectifier(layers.Layer): def build(self, input_shape): output_dim = input_shape[-1] self.kernel = self.add_weight( shape=(output_dim * 2, output_dim), initializer="he_normal", name="kernel", trainable=True, ) def call(self, inputs): pos = ops.relu(inputs) neg = ops.relu(-inputs) print("pos.shape:", pos.shape) print("neg.shape:", neg.shape) concatenated = ops.concatenate([pos, neg], axis=1) print("concatenated.shape:", concatenated.shape) print("kernel.shape:", self.kernel.shape) return ops.matmul(concatenated, self.kernel)<jupyter_output><empty_output><jupyter_text>Now our code works fine:<jupyter_code>x = keras.random.normal(shape=(2, 5)) y = MyAntirectifier()(x)<jupyter_output><empty_output><jupyter_text>Tip 2: use `model.summary()` and `plot_model()` to check layer output shapesIf you're working with complex network topologies, you're going to need a wayto visualize how your layers are connected and how they transform the data that passesthrough them.Here's an example. Consider this model with three inputs and two outputs (lifted from the[Functional API guide](https://keras.io/guides/functional_api/manipulate-complex-graph-topologies)):<jupyter_code>num_tags = 12 # Number of unique issue tags num_words = 10000 # Size of vocabulary obtained when preprocessing text data num_departments = 4 # Number of departments for predictions title_input = keras.Input( shape=(None,), name="title" ) # Variable-length sequence of ints body_input = keras.Input(shape=(None,), name="body") # Variable-length sequence of ints tags_input = keras.Input( shape=(num_tags,), name="tags" ) # Binary vectors of size `num_tags` # Embed each word in the title into a 64-dimensional vector title_features = layers.Embedding(num_words, 64)(title_input) # Embed each word in the text into a 64-dimensional vector body_features = layers.Embedding(num_words, 64)(body_input) # Reduce sequence of embedded words in the title into a single 128-dimensional vector title_features = layers.LSTM(128)(title_features) # Reduce sequence of embedded words in the body into a single 32-dimensional vector body_features = layers.LSTM(32)(body_features) # Merge all available features into a single large vector via concatenation x = layers.concatenate([title_features, body_features, tags_input]) # Stick a logistic regression for priority prediction on top of the features priority_pred = layers.Dense(1, name="priority")(x) # Stick a department classifier on top of the features department_pred = layers.Dense(num_departments, name="department")(x) # Instantiate an end-to-end model predicting both priority and department model = keras.Model( inputs=[title_input, body_input, tags_input], outputs=[priority_pred, department_pred], )<jupyter_output><empty_output><jupyter_text>Calling `summary()` can help you check the output shape of each layer:<jupyter_code>model.summary()<jupyter_output><empty_output><jupyter_text>You can also visualize the entire network topology alongside output shapes using`plot_model`:<jupyter_code>keras.utils.plot_model(model, show_shapes=True)<jupyter_output><empty_output><jupyter_text>With this plot, any connectivity-level error becomes immediately obvious. Tip 3: to debug what happens during `fit()`, use `run_eagerly=True`The `fit()` method is fast: it runs a well-optimized, fully-compiled computation graph.That's great for performance, but it also means that the code you're executing isn't thePython code you've written. This can be problematic when debugging. As you may recall,Python is slow -- so we use it as a staging language, not as an execution language.Thankfully, there's an easy way to run your code in "debug mode", fully eagerly:pass `run_eagerly=True` to `compile()`. Your call to `fit()` will now get executed lineby line, without any optimization. It's slower, but it makes it possible to print thevalue of intermediate tensors, or to use a Python debugger. Great for debugging.Here's a basic example: let's write a really simple model with a custom `train_step()` method.Our model just implements gradient descent, but instead of first-order gradients,it uses a combination of first-order and second-order gradients. Pretty simple so far.Can you spot what we're doing wrong?<jupyter_code>class MyModel(keras.Model): def train_step(self, data): inputs, targets = data trainable_vars = self.trainable_variables with tf.GradientTape() as tape2: with tf.GradientTape() as tape1: y_pred = self(inputs, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compute_loss(y=targets, y_pred=y_pred) # Compute first-order gradients dl_dw = tape1.gradient(loss, trainable_vars) # Compute second-order gradients d2l_dw2 = tape2.gradient(dl_dw, trainable_vars) # Combine first-order and second-order gradients grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)] # Update weights self.optimizer.apply_gradients(zip(grads, trainable_vars)) # Update metrics (includes the metric that tracks the loss) for metric in self.metrics: if metric.name == "loss": metric.update_state(loss) else: metric.update_state(targets, y_pred) # Return a dict mapping metric names to current value return {m.name: m.result() for m in self.metrics}<jupyter_output><empty_output><jupyter_text>Let's train a one-layer model on MNIST with this custom loss function.We pick, somewhat at random, a batch size of 1024 and a learning rate of 0.1. The generalidea being to use larger batches and a larger learning rate than usual, since our"improved" gradients should lead us to quicker convergence.<jupyter_code># Construct an instance of MyModel def get_model(): inputs = keras.Input(shape=(784,)) intermediate = layers.Dense(256, activation="relu")(inputs) outputs = layers.Dense(10, activation="softmax")(intermediate) model = MyModel(inputs, outputs) return model # Prepare data (x_train, y_train), _ = keras.datasets.mnist.load_data() x_train = np.reshape(x_train, (-1, 784)) / 255 model = get_model() model.compile( optimizer=keras.optimizers.SGD(learning_rate=1e-2), loss="sparse_categorical_crossentropy", ) model.fit(x_train, y_train, epochs=3, batch_size=1024, validation_split=0.1)<jupyter_output><empty_output><jupyter_text>Oh no, it doesn't converge! Something is not working as planned.Time for some step-by-step printing of what's going on with our gradients.We add various `print` statements in the `train_step` method, and we make sure to pass`run_eagerly=True` to `compile()` to run our code step-by-step, eagerly.<jupyter_code>class MyModel(keras.Model): def train_step(self, data): print() print("----Start of step: %d" % (self.step_counter,)) self.step_counter += 1 inputs, targets = data trainable_vars = self.trainable_variables with tf.GradientTape() as tape2: with tf.GradientTape() as tape1: y_pred = self(inputs, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compute_loss(y=targets, y_pred=y_pred) # Compute first-order gradients dl_dw = tape1.gradient(loss, trainable_vars) # Compute second-order gradients d2l_dw2 = tape2.gradient(dl_dw, trainable_vars) print("Max of dl_dw[0]: %.4f" % tf.reduce_max(dl_dw[0])) print("Min of dl_dw[0]: %.4f" % tf.reduce_min(dl_dw[0])) print("Mean of dl_dw[0]: %.4f" % tf.reduce_mean(dl_dw[0])) print("-") print("Max of d2l_dw2[0]: %.4f" % tf.reduce_max(d2l_dw2[0])) print("Min of d2l_dw2[0]: %.4f" % tf.reduce_min(d2l_dw2[0])) print("Mean of d2l_dw2[0]: %.4f" % tf.reduce_mean(d2l_dw2[0])) # Combine first-order and second-order gradients grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)] # Update weights self.optimizer.apply_gradients(zip(grads, trainable_vars)) # Update metrics (includes the metric that tracks the loss) for metric in self.metrics: if metric.name == "loss": metric.update_state(loss) else: metric.update_state(targets, y_pred) # Return a dict mapping metric names to current value return {m.name: m.result() for m in self.metrics} model = get_model() model.compile( optimizer=keras.optimizers.SGD(learning_rate=1e-2), loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], run_eagerly=True, ) model.step_counter = 0 # We pass epochs=1 and steps_per_epoch=10 to only run 10 steps of training. model.fit(x_train, y_train, epochs=1, batch_size=1024, verbose=0, steps_per_epoch=10)<jupyter_output><empty_output><jupyter_text>What did we learn?- The first order and second order gradients can have values that differ by orders ofmagnitudes.- Sometimes, they may not even have the same sign.- Their values can vary greatly at each step.This leads us to an obvious idea: let's normalize the gradients before combining them.<jupyter_code>class MyModel(keras.Model): def train_step(self, data): inputs, targets = data trainable_vars = self.trainable_variables with tf.GradientTape() as tape2: with tf.GradientTape() as tape1: y_pred = self(inputs, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compute_loss(y=targets, y_pred=y_pred) # Compute first-order gradients dl_dw = tape1.gradient(loss, trainable_vars) # Compute second-order gradients d2l_dw2 = tape2.gradient(dl_dw, trainable_vars) dl_dw = [tf.math.l2_normalize(w) for w in dl_dw] d2l_dw2 = [tf.math.l2_normalize(w) for w in d2l_dw2] # Combine first-order and second-order gradients grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)] # Update weights self.optimizer.apply_gradients(zip(grads, trainable_vars)) # Update metrics (includes the metric that tracks the loss) for metric in self.metrics: if metric.name == "loss": metric.update_state(loss) else: metric.update_state(targets, y_pred) # Return a dict mapping metric names to current value return {m.name: m.result() for m in self.metrics} model = get_model() model.compile( optimizer=keras.optimizers.SGD(learning_rate=1e-2), loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) model.fit(x_train, y_train, epochs=5, batch_size=1024, validation_split=0.1)<jupyter_output><empty_output><jupyter_text>Now, training converges! It doesn't work well at all, but at least the model learnssomething.After spending a few minutes tuning parameters, we get to the following configurationthat works somewhat well (achieves 97% validation accuracy and seems reasonably robust tooverfitting):- Use `0.2 * w1 + 0.8 * w2` for combining gradients.- Use a learning rate that decays linearly over time.I'm not going to say that the idea works -- this isn't at all how you're supposed to dosecond-order optimization (pointers: see the Newton & Gauss-Newton methods, quasi-Newtonmethods, and BFGS). But hopefully this demonstration gave you an idea of how you candebug your way out of uncomfortable training situations.Remember: use `run_eagerly=True` for debugging what happens in `fit()`. And when your codeis finally working as expected, make sure to remove this flag in order to get the bestruntime performance!Here's our final training run:<jupyter_code>class MyModel(keras.Model): def train_step(self, data): inputs, targets = data trainable_vars = self.trainable_variables with tf.GradientTape() as tape2: with tf.GradientTape() as tape1: y_pred = self(inputs, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compute_loss(y=targets, y_pred=y_pred) # Compute first-order gradients dl_dw = tape1.gradient(loss, trainable_vars) # Compute second-order gradients d2l_dw2 = tape2.gradient(dl_dw, trainable_vars) dl_dw = [tf.math.l2_normalize(w) for w in dl_dw] d2l_dw2 = [tf.math.l2_normalize(w) for w in d2l_dw2] # Combine first-order and second-order gradients grads = [0.2 * w1 + 0.8 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)] # Update weights self.optimizer.apply_gradients(zip(grads, trainable_vars)) # Update metrics (includes the metric that tracks the loss) for metric in self.metrics: if metric.name == "loss": metric.update_state(loss) else: metric.update_state(targets, y_pred) # Return a dict mapping metric names to current value return {m.name: m.result() for m in self.metrics} model = get_model() lr = learning_rate = keras.optimizers.schedules.InverseTimeDecay( initial_learning_rate=0.1, decay_steps=25, decay_rate=0.1 ) model.compile( optimizer=keras.optimizers.SGD(lr), loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) model.fit(x_train, y_train, epochs=50, batch_size=2048, validation_split=0.1)<jupyter_output><empty_output>
keras-io/examples/keras_recipes/ipynb/debugging_tips.ipynb/0
{ "file_path": "keras-io/examples/keras_recipes/ipynb/debugging_tips.ipynb", "repo_id": "keras-io", "token_count": 6998 }
106
# Creating TFRecords **Author:** [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)<br> **Date created:** 2021/02/27<br> **Last modified:** 2023/12/20<br> **Description:** Converting data to the TFRecord format. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/creating_tfrecords.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/creating_tfrecords.py) --- ## Introduction The TFRecord format is a simple format for storing a sequence of binary records. Converting your data into TFRecord has many advantages, such as: - **More efficient storage**: the TFRecord data can take up less space than the original data; it can also be partitioned into multiple files. - **Fast I/O**: the TFRecord format can be read with parallel I/O operations, which is useful for [TPUs](https://www.tensorflow.org/guide/tpu) or multiple hosts. - **Self-contained files**: the TFRecord data can be read from a single source—for example, the [COCO2017](https://cocodataset.org/) dataset originally stores data in two folders ("images" and "annotations"). An important use case of the TFRecord data format is training on TPUs. First, TPUs are fast enough to benefit from optimized I/O operations. In addition, TPUs require data to be stored remotely (e.g. on Google Cloud Storage) and using the TFRecord format makes it easier to load the data without batch-downloading. Performance using the TFRecord format can be further improved if you also use it with the [tf.data](https://www.tensorflow.org/guide/data) API. In this example you will learn how to convert data of different types (image, text, and numeric) into TFRecord. **Reference** - [TFRecord and tf.train.Example](https://www.tensorflow.org/tutorials/load_data/tfrecord) --- ## Dependencies ```python import os os.environ["KERAS_BACKEND"] = "tensorflow" import keras import json import pprint import tensorflow as tf import matplotlib.pyplot as plt ``` --- ## Download the COCO2017 dataset We will be using the [COCO2017](https://cocodataset.org/) dataset, because it has many different types of features, including images, floating point data, and lists. It will serve as a good example of how to encode different features into the TFRecord format. This dataset has two sets of fields: images and annotation meta-data. The images are a collection of JPG files and the meta-data are stored in a JSON file which, according to the [official site](https://cocodataset.org/#format-data), contains the following properties: ``` id: int, image_id: int, category_id: int, segmentation: RLE or [polygon], object segmentation mask bbox: [x,y,width,height], object bounding box coordinates area: float, area of the bounding box iscrowd: 0 or 1, is single object or a collection ``` ```python root_dir = "datasets" tfrecords_dir = "tfrecords" images_dir = os.path.join(root_dir, "val2017") annotations_dir = os.path.join(root_dir, "annotations") annotation_file = os.path.join(annotations_dir, "instances_val2017.json") images_url = "http://images.cocodataset.org/zips/val2017.zip" annotations_url = ( "http://images.cocodataset.org/annotations/annotations_trainval2017.zip" ) # Download image files if not os.path.exists(images_dir): image_zip = keras.utils.get_file( "images.zip", cache_dir=os.path.abspath("."), origin=images_url, extract=True, ) os.remove(image_zip) # Download caption annotation files if not os.path.exists(annotations_dir): annotation_zip = keras.utils.get_file( "captions.zip", cache_dir=os.path.abspath("."), origin=annotations_url, extract=True, ) os.remove(annotation_zip) print("The COCO dataset has been downloaded and extracted successfully.") with open(annotation_file, "r") as f: annotations = json.load(f)["annotations"] print(f"Number of images: {len(annotations)}") ``` <div class="k-default-codeblock"> ``` Downloading data from http://images.cocodataset.org/zips/val2017.zip 815585330/815585330 ━━━━━━━━━━━━━━━━━━━━ 79s 0us/step Downloading data from http://images.cocodataset.org/annotations/annotations_trainval2017.zip 252907541/252907541 ━━━━━━━━━━━━━━━━━━━━ 5s 0us/step The COCO dataset has been downloaded and extracted successfully. Number of images: 36781 ``` </div> ### Contents of the COCO2017 dataset ```python pprint.pprint(annotations[60]) ``` <div class="k-default-codeblock"> ``` {'area': 367.89710000000014, 'bbox': [265.67, 222.31, 26.48, 14.71], 'category_id': 72, 'id': 34096, 'image_id': 525083, 'iscrowd': 0, 'segmentation': [[267.51, 222.31, 292.15, 222.31, 291.05, 237.02, 265.67, 237.02]]} ``` </div> --- ## Parameters `num_samples` is the number of data samples on each TFRecord file. `num_tfrecords` is total number of TFRecords that we will create. ```python num_samples = 4096 num_tfrecords = len(annotations) // num_samples if len(annotations) % num_samples: num_tfrecords += 1 # add one record if there are any remaining samples if not os.path.exists(tfrecords_dir): os.makedirs(tfrecords_dir) # creating TFRecords output folder ``` --- ## Define TFRecords helper functions ```python def image_feature(value): """Returns a bytes_list from a string / byte.""" return tf.train.Feature( bytes_list=tf.train.BytesList(value=[tf.io.encode_jpeg(value).numpy()]) ) def bytes_feature(value): """Returns a bytes_list from a string / byte.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.encode()])) def float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def float_feature_list(value): """Returns a list of float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def create_example(image, path, example): feature = { "image": image_feature(image), "path": bytes_feature(path), "area": float_feature(example["area"]), "bbox": float_feature_list(example["bbox"]), "category_id": int64_feature(example["category_id"]), "id": int64_feature(example["id"]), "image_id": int64_feature(example["image_id"]), } return tf.train.Example(features=tf.train.Features(feature=feature)) def parse_tfrecord_fn(example): feature_description = { "image": tf.io.FixedLenFeature([], tf.string), "path": tf.io.FixedLenFeature([], tf.string), "area": tf.io.FixedLenFeature([], tf.float32), "bbox": tf.io.VarLenFeature(tf.float32), "category_id": tf.io.FixedLenFeature([], tf.int64), "id": tf.io.FixedLenFeature([], tf.int64), "image_id": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, feature_description) example["image"] = tf.io.decode_jpeg(example["image"], channels=3) example["bbox"] = tf.sparse.to_dense(example["bbox"]) return example ``` --- ## Generate data in the TFRecord format Let's generate the COCO2017 data in the TFRecord format. The format will be `file_{number}.tfrec` (this is optional, but including the number sequences in the file names can make counting easier). ```python for tfrec_num in range(num_tfrecords): samples = annotations[(tfrec_num * num_samples) : ((tfrec_num + 1) * num_samples)] with tf.io.TFRecordWriter( tfrecords_dir + "/file_%.2i-%i.tfrec" % (tfrec_num, len(samples)) ) as writer: for sample in samples: image_path = f"{images_dir}/{sample['image_id']:012d}.jpg" image = tf.io.decode_jpeg(tf.io.read_file(image_path)) example = create_example(image, image_path, sample) writer.write(example.SerializeToString()) ``` --- ## Explore one sample from the generated TFRecord ```python raw_dataset = tf.data.TFRecordDataset(f"{tfrecords_dir}/file_00-{num_samples}.tfrec") parsed_dataset = raw_dataset.map(parse_tfrecord_fn) for features in parsed_dataset.take(1): for key in features.keys(): if key != "image": print(f"{key}: {features[key]}") print(f"Image shape: {features['image'].shape}") plt.figure(figsize=(7, 7)) plt.imshow(features["image"].numpy()) plt.show() ``` <div class="k-default-codeblock"> ``` bbox: [473.07 395.93 38.65 28.67] area: 702.1057739257812 category_id: 18 id: 1768 image_id: 289343 path: b'datasets/val2017/000000289343.jpg' Image shape: (640, 529, 3) ``` </div> ![png](/img/examples/keras_recipes/creating_tfrecords/creating_tfrecords_14_1.png) --- ## Train a simple model using the generated TFRecords Another advantage of TFRecord is that you are able to add many features to it and later use only a few of them, in this case, we are going to use only `image` and `category_id`. --- ## Define dataset helper functions ```python def prepare_sample(features): image = keras.ops.image.resize(features["image"], size=(224, 224)) return image, features["category_id"] def get_dataset(filenames, batch_size): dataset = ( tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTOTUNE) .map(parse_tfrecord_fn, num_parallel_calls=AUTOTUNE) .map(prepare_sample, num_parallel_calls=AUTOTUNE) .shuffle(batch_size * 10) .batch(batch_size) .prefetch(AUTOTUNE) ) return dataset train_filenames = tf.io.gfile.glob(f"{tfrecords_dir}/*.tfrec") batch_size = 32 epochs = 1 steps_per_epoch = 50 AUTOTUNE = tf.data.AUTOTUNE input_tensor = keras.layers.Input(shape=(224, 224, 3), name="image") model = keras.applications.EfficientNetB0( input_tensor=input_tensor, weights=None, classes=91 ) model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) model.fit( x=get_dataset(train_filenames, batch_size), epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=1, ) ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 146s 2s/step - loss: 3.9206 - sparse_categorical_accuracy: 0.1690 <keras.src.callbacks.history.History at 0x7f70684c27a0> ``` </div> --- ## Conclusion This example demonstrates that instead of reading images and annotations from different sources you can have your data coming from a single source thanks to TFRecord. This process can make storing and reading data simpler and more efficient. For more information, you can go to the [TFRecord and tf.train.Example](https://www.tensorflow.org/tutorials/load_data/tfrecord) tutorial.
keras-io/examples/keras_recipes/md/creating_tfrecords.md/0
{ "file_path": "keras-io/examples/keras_recipes/md/creating_tfrecords.md", "repo_id": "keras-io", "token_count": 4280 }
107
""" Title: Estimating required sample size for model training Author: [JacoVerster](https://twitter.com/JacoVerster) Date created: 2021/05/20 Last modified: 2021/06/06 Description: Modeling the relationship between training set size and model accuracy. Accelerator: GPU """ """ # Introduction In many real-world scenarios, the amount image data available to train a deep learning model is limited. This is especially true in the medical imaging domain, where dataset creation is costly. One of the first questions that usually comes up when approaching a new problem is: **"how many images will we need to train a good enough machine learning model?"** In most cases, a small set of samples is available, and we can use it to model the relationship between training data size and model performance. Such a model can be used to estimate the optimal number of images needed to arrive at a sample size that would achieve the required model performance. A systematic review of [Sample-Size Determination Methodologies](https://www.researchgate.net/publication/335779941_Sample-Size_Determination_Methodologies_for_Machine_Learning_in_Medical_Imaging_Research_A_Systematic_Review) by Balki et al. provides examples of several sample-size determination methods. In this example, a balanced subsampling scheme is used to determine the optimal sample size for our model. This is done by selecting a random subsample consisting of Y number of images and training the model using the subsample. The model is then evaluated on an independent test set. This process is repeated N times for each subsample with replacement to allow for the construction of a mean and confidence interval for the observed performance. """ """ ## Setup """ import os os.environ["KERAS_BACKEND"] = "tensorflow" import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import keras from keras import layers import tensorflow_datasets as tfds # Define seed and fixed variables seed = 42 keras.utils.set_random_seed(seed) AUTO = tf.data.AUTOTUNE """ ## Load TensorFlow dataset and convert to NumPy arrays We'll be using the [TF Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers). """ # Specify dataset parameters dataset_name = "tf_flowers" batch_size = 64 image_size = (224, 224) # Load data from tfds and split 10% off for a test set (train_data, test_data), ds_info = tfds.load( dataset_name, split=["train[:90%]", "train[90%:]"], shuffle_files=True, as_supervised=True, with_info=True, ) # Extract number of classes and list of class names num_classes = ds_info.features["label"].num_classes class_names = ds_info.features["label"].names print(f"Number of classes: {num_classes}") print(f"Class names: {class_names}") # Convert datasets to NumPy arrays def dataset_to_array(dataset, image_size, num_classes): images, labels = [], [] for img, lab in dataset.as_numpy_iterator(): images.append(tf.image.resize(img, image_size).numpy()) labels.append(tf.one_hot(lab, num_classes)) return np.array(images), np.array(labels) img_train, label_train = dataset_to_array(train_data, image_size, num_classes) img_test, label_test = dataset_to_array(test_data, image_size, num_classes) num_train_samples = len(img_train) print(f"Number of training samples: {num_train_samples}") """ ## Plot a few examples from the test set """ plt.figure(figsize=(16, 12)) for n in range(30): ax = plt.subplot(5, 6, n + 1) plt.imshow(img_test[n].astype("uint8")) plt.title(np.array(class_names)[label_test[n] == True][0]) plt.axis("off") """ ## Augmentation Define image augmentation using keras preprocessing layers and apply them to the training set. """ # Define image augmentation model image_augmentation = keras.Sequential( [ layers.RandomFlip(mode="horizontal"), layers.RandomRotation(factor=0.1), layers.RandomZoom(height_factor=(-0.1, -0)), layers.RandomContrast(factor=0.1), ], ) # Apply the augmentations to the training images and plot a few examples img_train = image_augmentation(img_train).numpy() plt.figure(figsize=(16, 12)) for n in range(30): ax = plt.subplot(5, 6, n + 1) plt.imshow(img_train[n].astype("uint8")) plt.title(np.array(class_names)[label_train[n] == True][0]) plt.axis("off") """ ## Define model building & training functions We create a few convenience functions to build a transfer-learning model, compile and train it and unfreeze layers for fine-tuning. """ def build_model(num_classes, img_size=image_size[0], top_dropout=0.3): """Creates a classifier based on pre-trained MobileNetV2. Arguments: num_classes: Int, number of classese to use in the softmax layer. img_size: Int, square size of input images (defaults is 224). top_dropout: Int, value for dropout layer (defaults is 0.3). Returns: Uncompiled Keras model. """ # Create input and pre-processing layers for MobileNetV2 inputs = layers.Input(shape=(img_size, img_size, 3)) x = layers.Rescaling(scale=1.0 / 127.5, offset=-1)(inputs) model = keras.applications.MobileNetV2( include_top=False, weights="imagenet", input_tensor=x ) # Freeze the pretrained weights model.trainable = False # Rebuild top x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output) x = layers.Dropout(top_dropout)(x) outputs = layers.Dense(num_classes, activation="softmax")(x) model = keras.Model(inputs, outputs) print("Trainable weights:", len(model.trainable_weights)) print("Non_trainable weights:", len(model.non_trainable_weights)) return model def compile_and_train( model, training_data, training_labels, metrics=[keras.metrics.AUC(name="auc"), "acc"], optimizer=keras.optimizers.Adam(), patience=5, epochs=5, ): """Compiles and trains the model. Arguments: model: Uncompiled Keras model. training_data: NumPy Array, trainig data. training_labels: NumPy Array, trainig labels. metrics: Keras/TF metrics, requires at least 'auc' metric (default is `[keras.metrics.AUC(name='auc'), 'acc']`). optimizer: Keras/TF optimizer (defaults is `keras.optimizers.Adam()). patience: Int, epochsfor EarlyStopping patience (defaults is 5). epochs: Int, number of epochs to train (default is 5). Returns: Training history for trained Keras model. """ stopper = keras.callbacks.EarlyStopping( monitor="val_auc", mode="max", min_delta=0, patience=patience, verbose=1, restore_best_weights=True, ) model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=metrics) history = model.fit( x=training_data, y=training_labels, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=[stopper], ) return history def unfreeze(model, block_name, verbose=0): """Unfreezes Keras model layers. Arguments: model: Keras model. block_name: Str, layer name for example block_name = 'block4'. Checks if supplied string is in the layer name. verbose: Int, 0 means silent, 1 prints out layers trainability status. Returns: Keras model with all layers after (and including) the specified block_name to trainable, excluding BatchNormalization layers. """ # Unfreeze from block_name onwards set_trainable = False for layer in model.layers: if block_name in layer.name: set_trainable = True if set_trainable and not isinstance(layer, layers.BatchNormalization): layer.trainable = True if verbose == 1: print(layer.name, "trainable") else: if verbose == 1: print(layer.name, "NOT trainable") print("Trainable weights:", len(model.trainable_weights)) print("Non-trainable weights:", len(model.non_trainable_weights)) return model """ ## Define iterative training function To train a model over several subsample sets we need to create an iterative training function. """ def train_model(training_data, training_labels): """Trains the model as follows: - Trains only the top layers for 10 epochs. - Unfreezes deeper layers. - Train for 20 more epochs. Arguments: training_data: NumPy Array, trainig data. training_labels: NumPy Array, trainig labels. Returns: Model accuracy. """ model = build_model(num_classes) # Compile and train top layers history = compile_and_train( model, training_data, training_labels, metrics=[keras.metrics.AUC(name="auc"), "acc"], optimizer=keras.optimizers.Adam(), patience=3, epochs=10, ) # Unfreeze model from block 10 onwards model = unfreeze(model, "block_10") # Compile and train for 20 epochs with a lower learning rate fine_tune_epochs = 20 total_epochs = history.epoch[-1] + fine_tune_epochs history_fine = compile_and_train( model, training_data, training_labels, metrics=[keras.metrics.AUC(name="auc"), "acc"], optimizer=keras.optimizers.Adam(learning_rate=1e-4), patience=5, epochs=total_epochs, ) # Calculate model accuracy on the test set _, _, acc = model.evaluate(img_test, label_test) return np.round(acc, 4) """ ## Train models iteratively Now that we have model building functions and supporting iterative functions we can train the model over several subsample splits. - We select the subsample splits as 5%, 10%, 25% and 50% of the downloaded dataset. We pretend that only 50% of the actual data is available at present. - We train the model 5 times from scratch at each split and record the accuracy values. Note that this trains 20 models and will take some time. Make sure you have a GPU runtime active. To keep this example lightweight, sample data from a previous training run is provided. """ def train_iteratively(sample_splits=[0.05, 0.1, 0.25, 0.5], iter_per_split=5): """Trains a model iteratively over several sample splits. Arguments: sample_splits: List/NumPy array, contains fractions of the trainins set to train over. iter_per_split: Int, number of times to train a model per sample split. Returns: Training accuracy for all splits and iterations and the number of samples used for training at each split. """ # Train all the sample models and calculate accuracy train_acc = [] sample_sizes = [] for fraction in sample_splits: print(f"Fraction split: {fraction}") # Repeat training 3 times for each sample size sample_accuracy = [] num_samples = int(num_train_samples * fraction) for i in range(iter_per_split): print(f"Run {i+1} out of {iter_per_split}:") # Create fractional subsets rand_idx = np.random.randint(num_train_samples, size=num_samples) train_img_subset = img_train[rand_idx, :] train_label_subset = label_train[rand_idx, :] # Train model and calculate accuracy accuracy = train_model(train_img_subset, train_label_subset) print(f"Accuracy: {accuracy}") sample_accuracy.append(accuracy) train_acc.append(sample_accuracy) sample_sizes.append(num_samples) return train_acc, sample_sizes # Running the above function produces the following outputs train_acc = [ [0.8202, 0.7466, 0.8011, 0.8447, 0.8229], [0.861, 0.8774, 0.8501, 0.8937, 0.891], [0.891, 0.9237, 0.8856, 0.9101, 0.891], [0.8937, 0.9373, 0.9128, 0.8719, 0.9128], ] sample_sizes = [165, 330, 825, 1651] """ ## Learning curve We now plot the learning curve by fitting an exponential curve through the mean accuracy points. We use TF to fit an exponential function through the data. We then extrapolate the learning curve to the predict the accuracy of a model trained on the whole training set. """ def fit_and_predict(train_acc, sample_sizes, pred_sample_size): """Fits a learning curve to model training accuracy results. Arguments: train_acc: List/Numpy Array, training accuracy for all model training splits and iterations. sample_sizes: List/Numpy array, number of samples used for training at each split. pred_sample_size: Int, sample size to predict model accuracy based on fitted learning curve. """ x = sample_sizes mean_acc = tf.convert_to_tensor([np.mean(i) for i in train_acc]) error = [np.std(i) for i in train_acc] # Define mean squared error cost and exponential curve fit functions mse = keras.losses.MeanSquaredError() def exp_func(x, a, b): return a * x**b # Define variables, learning rate and number of epochs for fitting with TF a = tf.Variable(0.0) b = tf.Variable(0.0) learning_rate = 0.01 training_epochs = 5000 # Fit the exponential function to the data for epoch in range(training_epochs): with tf.GradientTape() as tape: y_pred = exp_func(x, a, b) cost_function = mse(y_pred, mean_acc) # Get gradients and compute adjusted weights gradients = tape.gradient(cost_function, [a, b]) a.assign_sub(gradients[0] * learning_rate) b.assign_sub(gradients[1] * learning_rate) print(f"Curve fit weights: a = {a.numpy()} and b = {b.numpy()}.") # We can now estimate the accuracy for pred_sample_size max_acc = exp_func(pred_sample_size, a, b).numpy() # Print predicted x value and append to plot values print(f"A model accuracy of {max_acc} is predicted for {pred_sample_size} samples.") x_cont = np.linspace(x[0], pred_sample_size, 100) # Build the plot fig, ax = plt.subplots(figsize=(12, 6)) ax.errorbar(x, mean_acc, yerr=error, fmt="o", label="Mean acc & std dev.") ax.plot(x_cont, exp_func(x_cont, a, b), "r-", label="Fitted exponential curve.") ax.set_ylabel("Model clasification accuracy.", fontsize=12) ax.set_xlabel("Training sample size.", fontsize=12) ax.set_xticks(np.append(x, pred_sample_size)) ax.set_yticks(np.append(mean_acc, max_acc)) ax.set_xticklabels(list(np.append(x, pred_sample_size)), rotation=90, fontsize=10) ax.yaxis.set_tick_params(labelsize=10) ax.set_title("Learning curve: model accuracy vs sample size.", fontsize=14) ax.legend(loc=(0.75, 0.75), fontsize=10) ax.xaxis.grid(True) ax.yaxis.grid(True) plt.tight_layout() plt.show() # The mean absolute error (MAE) is calculated for curve fit to see how well # it fits the data. The lower the error the better the fit. mae = keras.losses.MeanAbsoluteError() print(f"The mae for the curve fit is {mae(mean_acc, exp_func(x, a, b)).numpy()}.") # We use the whole training set to predict the model accuracy fit_and_predict(train_acc, sample_sizes, pred_sample_size=num_train_samples) """ From the extrapolated curve we can see that 3303 images will yield an estimated accuracy of about 95%. Now, let's use all the data (3303 images) and train the model to see if our prediction was accurate! """ # Now train the model with full dataset to get the actual accuracy accuracy = train_model(img_train, label_train) print(f"A model accuracy of {accuracy} is reached on {num_train_samples} images!") """ ## Conclusion We see that a model accuracy of about 94-96%* is reached using 3303 images. This is quite close to our estimate! Even though we used only 50% of the dataset (1651 images) we were able to model the training behaviour of our model and predict the model accuracy for a given amount of images. This same methodology can be used to predict the amount of images needed to reach a desired accuracy. This is very useful when a smaller set of data is available, and it has been shown that convergence on a deep learning model is possible, but more images are needed. The image count prediction can be used to plan and budget for further image collection initiatives. """
keras-io/examples/keras_recipes/sample_size_estimate.py/0
{ "file_path": "keras-io/examples/keras_recipes/sample_size_estimate.py", "repo_id": "keras-io", "token_count": 5969 }
108
<jupyter_start><jupyter_text>Data Parallel Training with KerasNLP and tf.distribute**Author:** Anshuman Mishra**Date created:** 2023/07/07**Last modified:** 2023/07/07**Description:** Data Parallel training with KerasNLP and tf.distribute. IntroductionDistributed training is a technique used to train deep learning models on multiple devicesor machines simultaneously. It helps to reduce training time and allows for training largermodels with more data. KerasNLP is a library that provides tools and utilities for naturallanguage processing tasks, including distributed training.In this tutorial, we will use KerasNLP to train a BERT-based masked language model (MLM)on the wikitext-2 dataset (a 2 million word dataset of wikipedia articles). The MLM taskinvolves predicting the masked words in a sentence, which helps the model learn contextualrepresentations of words.This guide focuses on data parallelism, in particular synchronous data parallelism, whereeach accelerator (a GPU or TPU) holds a complete replica of the model, and sees adifferent partial batch of the input data. Partial gradients are computed on each device,aggregated, and used to compute a global gradient update.Specifically, this guide teaches you how to use the `tf.distribute` API to train Kerasmodels on multiple GPUs, with minimal changes to your code, in the following two setups:- On multiple GPUs (typically 2 to 8) installed on a single machine (single host,multi-device training). This is the most common setup for researchers and small-scaleindustry workflows.- On a cluster of many machines, each hosting one or multiple GPUs (multi-workerdistributed training). This is a good setup for large-scale industry workflows, e.g.training high-resolution text summarization models on billion word datasets on 20-100 GPUs.<jupyter_code>!pip install -q --upgrade keras-nlp !pip install -q --upgrade keras # Upgrade to Keras 3.<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>import os os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import keras import keras_nlp<jupyter_output><empty_output><jupyter_text>Before we start any training, let's configure our single GPU to show up as two logicaldevices.When you are training with two or more phsyical GPUs, this is totally uncessary. Thisis just a trick to show real distributed training on the default colab GPU runtime,which has only one GPU available.<jupyter_code>!nvidia-smi --query-gpu=memory.total --format=csv,noheader physical_devices = tf.config.list_physical_devices("GPU") tf.config.set_logical_device_configuration( physical_devices[0], [ tf.config.LogicalDeviceConfiguration(memory_limit=15360 // 2), tf.config.LogicalDeviceConfiguration(memory_limit=15360 // 2), ], ) logical_devices = tf.config.list_logical_devices("GPU") logical_devices EPOCHS = 3<jupyter_output><empty_output><jupyter_text>To do single-host, multi-device synchronous training with a Keras model, you would usethe `tf.distribute.MirroredStrategy` API. Here's how it works:- Instantiate a `MirroredStrategy`, optionally configuring which specific devices youwant to use (by default the strategy will use all GPUs available).- Use the strategy object to open a scope, and within this scope, create all the Kerasobjects you need that contain variables. Typically, that means **creating & compiling themodel** inside the distribution scope.- Train the model via `fit()` as usual.<jupyter_code>strategy = tf.distribute.MirroredStrategy() print(f"Number of devices: {strategy.num_replicas_in_sync}")<jupyter_output><empty_output><jupyter_text>Base batch size and learning rate<jupyter_code>base_batch_size = 32 base_learning_rate = 1e-4<jupyter_output><empty_output><jupyter_text>Calculate scaled batch size and learning rate<jupyter_code>scaled_batch_size = base_batch_size * strategy.num_replicas_in_sync scaled_learning_rate = base_learning_rate * strategy.num_replicas_in_sync<jupyter_output><empty_output><jupyter_text>Now, we need to download and preprocess the wikitext-2 dataset. This dataset will beused for pretraining the BERT model. We will filter out short lines to ensure that thedata has enough context for training.<jupyter_code>keras.utils.get_file( origin="https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip", extract=True, ) wiki_dir = os.path.expanduser("~/.keras/datasets/wikitext-2/") # Load wikitext-103 and filter out short lines. wiki_train_ds = ( tf.data.TextLineDataset( wiki_dir + "wiki.train.tokens", ) .filter(lambda x: tf.strings.length(x) > 100) .shuffle(buffer_size=500) .batch(scaled_batch_size) .cache() .prefetch(tf.data.AUTOTUNE) ) wiki_val_ds = ( tf.data.TextLineDataset(wiki_dir + "wiki.valid.tokens") .filter(lambda x: tf.strings.length(x) > 100) .shuffle(buffer_size=500) .batch(scaled_batch_size) .cache() .prefetch(tf.data.AUTOTUNE) ) wiki_test_ds = ( tf.data.TextLineDataset(wiki_dir + "wiki.test.tokens") .filter(lambda x: tf.strings.length(x) > 100) .shuffle(buffer_size=500) .batch(scaled_batch_size) .cache() .prefetch(tf.data.AUTOTUNE) )<jupyter_output><empty_output><jupyter_text>In the above code, we download the wikitext-2 dataset and extract it. Then, we definethree datasets: wiki_train_ds, wiki_val_ds, and wiki_test_ds. These datasets arefiltered to remove short lines and are batched for efficient training. It's a common practice to use a decayed learning rate in NLP training/tuning. We'lluse `PolynomialDecay` schedule here.<jupyter_code>total_training_steps = sum(1 for _ in wiki_train_ds.as_numpy_iterator()) * EPOCHS lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=scaled_learning_rate, decay_steps=total_training_steps, end_learning_rate=0.0, ) class PrintLR(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): print( f"\nLearning rate for epoch {epoch + 1} is {model_dist.optimizer.learning_rate.numpy()}" )<jupyter_output><empty_output><jupyter_text>Let's also make a callback to TensorBoard, this will enable visualization of differentmetrics while we train the model in later part of this tutorial. We put all the callbackstogether as follows:<jupyter_code>callbacks = [ tf.keras.callbacks.TensorBoard(log_dir="./logs"), PrintLR(), ] print(tf.config.list_physical_devices("GPU"))<jupyter_output><empty_output><jupyter_text>With the datasets prepared, we now initialize and compile our model and optimizer withinthe `strategy.scope()`:<jupyter_code>with strategy.scope(): # Everything that creates variables should be under the strategy scope. # In general this is only model construction & `compile()`. model_dist = keras_nlp.models.BertMaskedLM.from_preset("bert_tiny_en_uncased") # This line just sets pooled_dense layer as non-trainiable, we do this to avoid # warnings of this layer being unused model_dist.get_layer("bert_backbone").get_layer("pooled_dense").trainable = False model_dist.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.AdamW(learning_rate=scaled_learning_rate), weighted_metrics=[keras.metrics.SparseCategoricalAccuracy()], jit_compile=False, ) model_dist.fit( wiki_train_ds, validation_data=wiki_val_ds, epochs=EPOCHS, callbacks=callbacks )<jupyter_output><empty_output><jupyter_text>After fitting our model under the scope, we evaluate it normally!<jupyter_code>model_dist.evaluate(wiki_test_ds)<jupyter_output><empty_output>
keras-io/examples/nlp/ipynb/data_parallel_training_with_keras_nlp.ipynb/0
{ "file_path": "keras-io/examples/nlp/ipynb/data_parallel_training_with_keras_nlp.ipynb", "repo_id": "keras-io", "token_count": 2489 }
109
<jupyter_start><jupyter_text>Semantic Similarity with KerasNLP**Author:** [Anshuman Mishra](https://github.com/shivance/)**Date created:** 2023/02/25**Last modified:** 2023/02/25**Description:** Use pretrained models from KerasNLP for the Semantic Similarity Task. IntroductionSemantic similarity refers to the task of determining the degree of similarity between twosentences in terms of their meaning. We already saw in [this](https://keras.io/examples/nlp/semantic_similarity_with_bert/)example how to use SNLI (Stanford Natural Language Inference) corpus to predict sentencesemantic similarity with the HuggingFace Transformers library. In this tutorial we willlearn how to use [KerasNLP](https://keras.io/keras_nlp/), an extension of the core Keras API,for the same task. Furthermore, we will discover how KerasNLP effectively reduces boilerplatecode and simplifies the process of building and utilizing models. For more information on KerasNLP,please refer to [KerasNLP's official documentation](https://keras.io/keras_nlp/).This guide is broken down into the following parts:1. *Setup*, task definition, and establishing a baseline.2. *Establishing baseline* with BERT.3. *Saving and Reloading* the model.4. *Performing inference* with the model.5 *Improving accuracy* with RoBERTa SetupThe following guide uses [Keras Core](https://keras.io/keras_core/) to work inany of `tensorflow`, `jax` or `torch`. Support for Keras Core is baked intoKerasNLP, simply change the `KERAS_BACKEND` environment variable below to changethe backend you would like to use. We select the `jax` backend below, which willgive us a particularly fast train step below.<jupyter_code>!pip install -q --upgrade keras-nlp !pip install -q --upgrade keras # Upgrade to Keras 3. import numpy as np import tensorflow as tf import keras import keras_nlp import tensorflow_datasets as tfds<jupyter_output><empty_output><jupyter_text>To load the SNLI dataset, we use the tensorflow-datasets library, whichcontains over 550,000 samples in total. However, to ensure that this example runsquickly, we use only 20% of the training samples. Overview of SNLI DatasetEvery sample in the dataset contains three components: `hypothesis`, `premise`,and `label`. epresents the original caption provided to the author of the pair,while the hypothesis refers to the hypothesis caption created by the author ofthe pair. The label is assigned by annotators to indicate the similarity betweenthe two sentences.The dataset contains three possible similarity label values: Contradiction, Entailment,and Neutral. Contradiction represents completely dissimilar sentences, while Entailmentdenotes similar meaning sentences. Lastly, Neutral refers to sentences where no clearsimilarity or dissimilarity can be established between them.<jupyter_code>snli_train = tfds.load("snli", split="train[:20%]") snli_val = tfds.load("snli", split="validation") snli_test = tfds.load("snli", split="test") # Here's an example of how our training samples look like, where we randomly select # four samples: sample = snli_test.batch(4).take(1).get_single_element() sample<jupyter_output><empty_output><jupyter_text>PreprocessingIn our dataset, we have identified that some samples have missing or incorrectly labeleddata, which is denoted by a value of -1. To ensure the accuracy and reliability of our model,we simply filter out these samples from our dataset.<jupyter_code>def filter_labels(sample): return sample["label"] >= 0<jupyter_output><empty_output><jupyter_text>Here's a utility function that splits the example into an `(x, y)` tuple that is suitablefor `model.fit()`. By default, `keras_nlp.models.BertClassifier` will tokenize and packtogether raw strings using a `"[SEP]"` token during training. Therefore, this labelsplitting is all the data preparation that we need to perform.<jupyter_code>def split_labels(sample): x = (sample["hypothesis"], sample["premise"]) y = sample["label"] return x, y train_ds = ( snli_train.filter(filter_labels) .map(split_labels, num_parallel_calls=tf.data.AUTOTUNE) .batch(16) ) val_ds = ( snli_val.filter(filter_labels) .map(split_labels, num_parallel_calls=tf.data.AUTOTUNE) .batch(16) ) test_ds = ( snli_test.filter(filter_labels) .map(split_labels, num_parallel_calls=tf.data.AUTOTUNE) .batch(16) )<jupyter_output><empty_output><jupyter_text>Establishing baseline with BERT.We use the BERT model from KerasNLP to establish a baseline for our semantic similaritytask. The `keras_nlp.models.BertClassifier` class attaches a classification head to the BERTBackbone, mapping the backbone outputs to a logit output suitable for a classification task.This significantly reduces the need for custom code.KerasNLP models have built-in tokenization capabilities that handle tokenization by defaultbased on the selected model. However, users can also use custom preprocessing techniquesas per their specific needs. If we pass a tuple as input, the model will tokenize all thestrings and concatenate them with a `"[SEP]"` separator.We use this model with pretrained weights, and we can use the `from_preset()` methodto use our own preprocessor. For the SNLI dataset, we set `num_classes` to 3.<jupyter_code>bert_classifier = keras_nlp.models.BertClassifier.from_preset( "bert_tiny_en_uncased", num_classes=3 )<jupyter_output><empty_output><jupyter_text>Please note that the BERT Tiny model has only 4,386,307 trainable parameters.KerasNLP task models come with compilation defaults. We can now train the model we justinstantiated by calling the `fit()` method.<jupyter_code>bert_classifier.fit(train_ds, validation_data=val_ds, epochs=1)<jupyter_output><empty_output><jupyter_text>Our BERT classifier achieved an accuracy of around 76% on the validation split. Now,let's evaluate its performance on the test split. Evaluate the performance of the trained model on test data.<jupyter_code>bert_classifier.evaluate(test_ds)<jupyter_output><empty_output><jupyter_text>Our baseline BERT model achieved a similar accuracy of around 76% on the test split.Now, let's try to improve its performance by recompiling the model with a slightlyhigher learning rate.<jupyter_code>bert_classifier = keras_nlp.models.BertClassifier.from_preset( "bert_tiny_en_uncased", num_classes=3 ) bert_classifier.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.Adam(5e-5), metrics=["accuracy"], ) bert_classifier.fit(train_ds, validation_data=val_ds, epochs=1) bert_classifier.evaluate(test_ds)<jupyter_output><empty_output><jupyter_text>Just tweaking the learning rate alone was not enough to boost performance, whichstayed right around 76%. Let's try again, but this time with`keras.optimizers.AdamW`, and a learning rate schedule.<jupyter_code>class TriangularSchedule(keras.optimizers.schedules.LearningRateSchedule): """Linear ramp up for `warmup` steps, then linear decay to zero at `total` steps.""" def __init__(self, rate, warmup, total): self.rate = rate self.warmup = warmup self.total = total def get_config(self): config = {"rate": self.rate, "warmup": self.warmup, "total": self.total} return config def __call__(self, step): step = keras.ops.cast(step, dtype="float32") rate = keras.ops.cast(self.rate, dtype="float32") warmup = keras.ops.cast(self.warmup, dtype="float32") total = keras.ops.cast(self.total, dtype="float32") warmup_rate = rate * step / self.warmup cooldown_rate = rate * (total - step) / (total - warmup) triangular_rate = keras.ops.minimum(warmup_rate, cooldown_rate) return keras.ops.maximum(triangular_rate, 0.0) bert_classifier = keras_nlp.models.BertClassifier.from_preset( "bert_tiny_en_uncased", num_classes=3 ) # Get the total count of training batches. # This requires walking the dataset to filter all -1 labels. epochs = 3 total_steps = sum(1 for _ in train_ds.as_numpy_iterator()) * epochs warmup_steps = int(total_steps * 0.2) bert_classifier.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.AdamW( TriangularSchedule(1e-4, warmup_steps, total_steps) ), metrics=["accuracy"], ) bert_classifier.fit(train_ds, validation_data=val_ds, epochs=epochs)<jupyter_output><empty_output><jupyter_text>Success! With the learning rate scheduler and the `AdamW` optimizer, our validationaccuracy improved to around 79%.Now, let's evaluate our final model on the test set and see how it performs.<jupyter_code>bert_classifier.evaluate(test_ds)<jupyter_output><empty_output><jupyter_text>Our Tiny BERT model achieved an accuracy of approximately 79% on the test setwith the use of a learning rate scheduler. This is a significant improvement overour previous results. Fine-tuning a pretrained BERTmodel can be a powerful tool in natural language processing tasks, and even asmall model like Tiny BERT can achieve impressive results.Let's save our model for nowand move on to learning how to perform inference with it. Save and Reload the model<jupyter_code>bert_classifier.save("bert_classifier.keras") restored_model = keras.models.load_model("bert_classifier.keras") restored_model.evaluate(test_ds)<jupyter_output><empty_output><jupyter_text>Performing inference with the model.Let's see how to perform inference with KerasNLP models<jupyter_code># Convert to Hypothesis-Premise pair, for forward pass through model sample = (sample["hypothesis"], sample["premise"]) sample<jupyter_output><empty_output><jupyter_text>The default preprocessor in KerasNLP models handles input tokenization automatically,so we don't need to perform tokenization explicitly.<jupyter_code>predictions = bert_classifier.predict(sample) def softmax(x): return np.exp(x) / np.exp(x).sum(axis=0) # Get the class predictions with maximum probabilities predictions = softmax(predictions)<jupyter_output><empty_output><jupyter_text>Improving accuracy with RoBERTaNow that we have established a baseline, we can attempt to improve our resultsby experimenting with different models. Thanks to KerasNLP, fine-tuning a RoBERTacheckpoint on the same dataset is easy with just a few lines of code.<jupyter_code># Inittializing a RoBERTa from preset roberta_classifier = keras_nlp.models.RobertaClassifier.from_preset( "roberta_base_en", num_classes=3 ) roberta_classifier.fit(train_ds, validation_data=val_ds, epochs=1) roberta_classifier.evaluate(test_ds)<jupyter_output><empty_output><jupyter_text>The RoBERTa base model has significantly more trainable parameters than the BERTTiny model, with almost 30 times as many at 124,645,635 parameters. As a result, it tookapproximately 1.5 hours to train on a P100 GPU. However, the performanceimprovement was substantial, with accuracy increasing to 88% on both the validationand test splits. With RoBERTa, we were able to fit a maximum batch size of 16 onour P100 GPU.Despite using a different model, the steps to perform inference with RoBERTa arethe same as with BERT!<jupyter_code>predictions = roberta_classifier.predict(sample) print(tf.math.argmax(predictions, axis=1).numpy())<jupyter_output><empty_output>
keras-io/examples/nlp/ipynb/semantic_similarity_with_keras_nlp.ipynb/0
{ "file_path": "keras-io/examples/nlp/ipynb/semantic_similarity_with_keras_nlp.ipynb", "repo_id": "keras-io", "token_count": 3474 }
110
# Character-level recurrent sequence-to-sequence model **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2017/09/29<br> **Last modified:** 2023/11/22<br> **Description:** Character-level recurrent sequence-to-sequence model. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/lstm_seq2seq.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/lstm_seq2seq.py) --- ## Introduction This example demonstrates how to implement a basic character-level recurrent sequence-to-sequence model. We apply it to translating short English sentences into short French sentences, character-by-character. Note that it is fairly unusual to do character-level machine translation, as word-level models are more common in this domain. **Summary of the algorithm** - We start with input sequences from a domain (e.g. English sentences) and corresponding target sequences from another domain (e.g. French sentences). - An encoder LSTM turns input sequences to 2 state vectors (we keep the last LSTM state and discard the outputs). - A decoder LSTM is trained to turn the target sequences into the same sequence but offset by one timestep in the future, a training process called "teacher forcing" in this context. It uses as initial state the state vectors from the encoder. Effectively, the decoder learns to generate `targets[t+1...]` given `targets[...t]`, conditioned on the input sequence. - In inference mode, when we want to decode unknown input sequences, we: - Encode the input sequence into state vectors - Start with a target sequence of size 1 (just the start-of-sequence character) - Feed the state vectors and 1-char target sequence to the decoder to produce predictions for the next character - Sample the next character using these predictions (we simply use argmax). - Append the sampled character to the target sequence - Repeat until we generate the end-of-sequence character or we hit the character limit. --- ## Setup ```python import numpy as np import keras import os from pathlib import Path ``` --- ## Download the data ```python fpath = keras.utils.get_file(origin="http://www.manythings.org/anki/fra-eng.zip") dirpath = Path(fpath).parent.absolute() os.system(f"unzip -q {fpath} -d {dirpath}") ``` <div class="k-default-codeblock"> ``` 0 ``` </div> --- ## Configuration ```python batch_size = 64 # Batch size for training. epochs = 100 # Number of epochs to train for. latent_dim = 256 # Latent dimensionality of the encoding space. num_samples = 10000 # Number of samples to train on. # Path to the data txt file on disk. data_path = os.path.join(dirpath, "fra.txt") ``` --- ## Prepare the data ```python # Vectorize the data. input_texts = [] target_texts = [] input_characters = set() target_characters = set() with open(data_path, "r", encoding="utf-8") as f: lines = f.read().split("\n") for line in lines[: min(num_samples, len(lines) - 1)]: input_text, target_text, _ = line.split("\t") # We use "tab" as the "start sequence" character # for the targets, and "\n" as "end sequence" character. target_text = "\t" + target_text + "\n" input_texts.append(input_text) target_texts.append(target_text) for char in input_text: if char not in input_characters: input_characters.add(char) for char in target_text: if char not in target_characters: target_characters.add(char) input_characters = sorted(list(input_characters)) target_characters = sorted(list(target_characters)) num_encoder_tokens = len(input_characters) num_decoder_tokens = len(target_characters) max_encoder_seq_length = max([len(txt) for txt in input_texts]) max_decoder_seq_length = max([len(txt) for txt in target_texts]) print("Number of samples:", len(input_texts)) print("Number of unique input tokens:", num_encoder_tokens) print("Number of unique output tokens:", num_decoder_tokens) print("Max sequence length for inputs:", max_encoder_seq_length) print("Max sequence length for outputs:", max_decoder_seq_length) input_token_index = dict([(char, i) for i, char in enumerate(input_characters)]) target_token_index = dict([(char, i) for i, char in enumerate(target_characters)]) encoder_input_data = np.zeros( (len(input_texts), max_encoder_seq_length, num_encoder_tokens), dtype="float32", ) decoder_input_data = np.zeros( (len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype="float32", ) decoder_target_data = np.zeros( (len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype="float32", ) for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)): for t, char in enumerate(input_text): encoder_input_data[i, t, input_token_index[char]] = 1.0 encoder_input_data[i, t + 1 :, input_token_index[" "]] = 1.0 for t, char in enumerate(target_text): # decoder_target_data is ahead of decoder_input_data by one timestep decoder_input_data[i, t, target_token_index[char]] = 1.0 if t > 0: # decoder_target_data will be ahead by one timestep # and will not include the start character. decoder_target_data[i, t - 1, target_token_index[char]] = 1.0 decoder_input_data[i, t + 1 :, target_token_index[" "]] = 1.0 decoder_target_data[i, t:, target_token_index[" "]] = 1.0 ``` <div class="k-default-codeblock"> ``` Number of samples: 10000 Number of unique input tokens: 70 Number of unique output tokens: 93 Max sequence length for inputs: 14 Max sequence length for outputs: 59 ``` </div> --- ## Build the model ```python # Define an input sequence and process it. encoder_inputs = keras.Input(shape=(None, num_encoder_tokens)) encoder = keras.layers.LSTM(latent_dim, return_state=True) encoder_outputs, state_h, state_c = encoder(encoder_inputs) # We discard `encoder_outputs` and only keep the states. encoder_states = [state_h, state_c] # Set up the decoder, using `encoder_states` as initial state. decoder_inputs = keras.Input(shape=(None, num_decoder_tokens)) # We set up our decoder to return full output sequences, # and to return internal states as well. We don't use the # return states in the training model, but we will use them in inference. decoder_lstm = keras.layers.LSTM(latent_dim, return_sequences=True, return_state=True) decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states) decoder_dense = keras.layers.Dense(num_decoder_tokens, activation="softmax") decoder_outputs = decoder_dense(decoder_outputs) # Define the model that will turn # `encoder_input_data` & `decoder_input_data` into `decoder_target_data` model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs) ``` --- ## Train the model ```python model.compile( optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"] ) model.fit( [encoder_input_data, decoder_input_data], decoder_target_data, batch_size=batch_size, epochs=epochs, validation_split=0.2, ) # Save model model.save("s2s_model.keras") ``` <div class="k-default-codeblock"> ``` Epoch 1/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 5s 21ms/step - accuracy: 0.7338 - loss: 1.5405 - val_accuracy: 0.7138 - val_loss: 1.0745 Epoch 2/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 2s 10ms/step - accuracy: 0.7470 - loss: 0.9546 - val_accuracy: 0.7188 - val_loss: 1.0219 Epoch 3/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 2s 10ms/step - accuracy: 0.7590 - loss: 0.8659 - val_accuracy: 0.7482 - val_loss: 0.8677 Epoch 4/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 2s 10ms/step - accuracy: 0.7878 - loss: 0.7588 - val_accuracy: 0.7744 - val_loss: 0.7864 Epoch 5/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 2s 10ms/step - accuracy: 0.7957 - loss: 0.7092 - val_accuracy: 0.7904 - val_loss: 0.7256 Epoch 6/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 2s 10ms/step - accuracy: 0.8151 - loss: 0.6375 - val_accuracy: 0.8003 - val_loss: 0.6926 Epoch 7/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 2s 10ms/step - accuracy: 0.8217 - loss: 0.6095 - val_accuracy: 0.8081 - val_loss: 0.6633 Epoch 8/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8299 - loss: 0.5818 - val_accuracy: 0.8146 - val_loss: 0.6355 Epoch 9/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8346 - loss: 0.5632 - val_accuracy: 0.8179 - val_loss: 0.6285 Epoch 10/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8378 - loss: 0.5496 - val_accuracy: 0.8233 - val_loss: 0.6056 Epoch 11/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8450 - loss: 0.5301 - val_accuracy: 0.8300 - val_loss: 0.5913 Epoch 12/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8487 - loss: 0.5148 - val_accuracy: 0.8324 - val_loss: 0.5805 Epoch 13/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8537 - loss: 0.4996 - val_accuracy: 0.8354 - val_loss: 0.5718 Epoch 14/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8570 - loss: 0.4874 - val_accuracy: 0.8388 - val_loss: 0.5535 Epoch 15/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8603 - loss: 0.4749 - val_accuracy: 0.8428 - val_loss: 0.5451 Epoch 16/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8636 - loss: 0.4642 - val_accuracy: 0.8448 - val_loss: 0.5332 Epoch 17/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8658 - loss: 0.4551 - val_accuracy: 0.8473 - val_loss: 0.5260 Epoch 18/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8689 - loss: 0.4443 - val_accuracy: 0.8465 - val_loss: 0.5236 Epoch 19/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8711 - loss: 0.4363 - val_accuracy: 0.8531 - val_loss: 0.5078 Epoch 20/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8731 - loss: 0.4285 - val_accuracy: 0.8508 - val_loss: 0.5121 Epoch 21/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8759 - loss: 0.4180 - val_accuracy: 0.8546 - val_loss: 0.5005 Epoch 22/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8788 - loss: 0.4075 - val_accuracy: 0.8550 - val_loss: 0.4981 Epoch 23/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8799 - loss: 0.4043 - val_accuracy: 0.8563 - val_loss: 0.4918 Epoch 24/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8820 - loss: 0.3960 - val_accuracy: 0.8584 - val_loss: 0.4870 Epoch 25/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8830 - loss: 0.3927 - val_accuracy: 0.8605 - val_loss: 0.4794 Epoch 26/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8852 - loss: 0.3862 - val_accuracy: 0.8607 - val_loss: 0.4784 Epoch 27/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8877 - loss: 0.3767 - val_accuracy: 0.8616 - val_loss: 0.4753 Epoch 28/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8890 - loss: 0.3730 - val_accuracy: 0.8633 - val_loss: 0.4685 Epoch 29/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8897 - loss: 0.3695 - val_accuracy: 0.8633 - val_loss: 0.4685 Epoch 30/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8924 - loss: 0.3604 - val_accuracy: 0.8648 - val_loss: 0.4664 Epoch 31/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8946 - loss: 0.3538 - val_accuracy: 0.8658 - val_loss: 0.4613 Epoch 32/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8948 - loss: 0.3526 - val_accuracy: 0.8668 - val_loss: 0.4618 Epoch 33/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8972 - loss: 0.3442 - val_accuracy: 0.8662 - val_loss: 0.4597 Epoch 34/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8969 - loss: 0.3435 - val_accuracy: 0.8672 - val_loss: 0.4594 Epoch 35/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.8996 - loss: 0.3364 - val_accuracy: 0.8673 - val_loss: 0.4569 Epoch 36/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9003 - loss: 0.3340 - val_accuracy: 0.8677 - val_loss: 0.4601 Epoch 37/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9024 - loss: 0.3260 - val_accuracy: 0.8671 - val_loss: 0.4569 Epoch 38/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9048 - loss: 0.3200 - val_accuracy: 0.8685 - val_loss: 0.4540 Epoch 39/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9051 - loss: 0.3187 - val_accuracy: 0.8692 - val_loss: 0.4545 Epoch 40/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9071 - loss: 0.3119 - val_accuracy: 0.8708 - val_loss: 0.4490 Epoch 41/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9085 - loss: 0.3064 - val_accuracy: 0.8706 - val_loss: 0.4506 Epoch 42/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9092 - loss: 0.3061 - val_accuracy: 0.8711 - val_loss: 0.4484 Epoch 43/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9100 - loss: 0.3011 - val_accuracy: 0.8718 - val_loss: 0.4485 Epoch 44/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9101 - loss: 0.3007 - val_accuracy: 0.8716 - val_loss: 0.4509 Epoch 45/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9126 - loss: 0.2920 - val_accuracy: 0.8723 - val_loss: 0.4474 Epoch 46/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9144 - loss: 0.2881 - val_accuracy: 0.8714 - val_loss: 0.4505 Epoch 47/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9155 - loss: 0.2829 - val_accuracy: 0.8727 - val_loss: 0.4487 Epoch 48/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9158 - loss: 0.2816 - val_accuracy: 0.8725 - val_loss: 0.4519 Epoch 49/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9174 - loss: 0.2763 - val_accuracy: 0.8739 - val_loss: 0.4454 Epoch 50/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9188 - loss: 0.2706 - val_accuracy: 0.8738 - val_loss: 0.4473 Epoch 51/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9199 - loss: 0.2682 - val_accuracy: 0.8716 - val_loss: 0.4542 Epoch 52/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9202 - loss: 0.2665 - val_accuracy: 0.8725 - val_loss: 0.4533 Epoch 53/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9228 - loss: 0.2579 - val_accuracy: 0.8735 - val_loss: 0.4485 Epoch 54/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9230 - loss: 0.2580 - val_accuracy: 0.8735 - val_loss: 0.4507 Epoch 55/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9237 - loss: 0.2546 - val_accuracy: 0.8737 - val_loss: 0.4579 Epoch 56/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9253 - loss: 0.2482 - val_accuracy: 0.8749 - val_loss: 0.4496 Epoch 57/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9264 - loss: 0.2448 - val_accuracy: 0.8755 - val_loss: 0.4503 Epoch 58/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9271 - loss: 0.2426 - val_accuracy: 0.8747 - val_loss: 0.4526 Epoch 59/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9289 - loss: 0.2380 - val_accuracy: 0.8750 - val_loss: 0.4543 Epoch 60/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9292 - loss: 0.2358 - val_accuracy: 0.8745 - val_loss: 0.4563 Epoch 61/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9297 - loss: 0.2339 - val_accuracy: 0.8750 - val_loss: 0.4555 Epoch 62/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9308 - loss: 0.2299 - val_accuracy: 0.8741 - val_loss: 0.4590 Epoch 63/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9324 - loss: 0.2259 - val_accuracy: 0.8761 - val_loss: 0.4611 Epoch 64/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9329 - loss: 0.2247 - val_accuracy: 0.8751 - val_loss: 0.4608 Epoch 65/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9344 - loss: 0.2187 - val_accuracy: 0.8756 - val_loss: 0.4628 Epoch 66/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9354 - loss: 0.2156 - val_accuracy: 0.8750 - val_loss: 0.4664 Epoch 67/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9360 - loss: 0.2136 - val_accuracy: 0.8751 - val_loss: 0.4665 Epoch 68/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9370 - loss: 0.2093 - val_accuracy: 0.8751 - val_loss: 0.4688 Epoch 69/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9385 - loss: 0.2057 - val_accuracy: 0.8747 - val_loss: 0.4757 Epoch 70/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9388 - loss: 0.2039 - val_accuracy: 0.8752 - val_loss: 0.4748 Epoch 71/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9393 - loss: 0.2020 - val_accuracy: 0.8749 - val_loss: 0.4749 Epoch 72/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9403 - loss: 0.1991 - val_accuracy: 0.8756 - val_loss: 0.4754 Epoch 73/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9417 - loss: 0.1946 - val_accuracy: 0.8752 - val_loss: 0.4774 Epoch 74/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9427 - loss: 0.1911 - val_accuracy: 0.8746 - val_loss: 0.4809 Epoch 75/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9430 - loss: 0.1900 - val_accuracy: 0.8746 - val_loss: 0.4809 Epoch 76/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9443 - loss: 0.1856 - val_accuracy: 0.8749 - val_loss: 0.4836 Epoch 77/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9438 - loss: 0.1867 - val_accuracy: 0.8759 - val_loss: 0.4866 Epoch 78/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9454 - loss: 0.1811 - val_accuracy: 0.8751 - val_loss: 0.4869 Epoch 79/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9462 - loss: 0.1788 - val_accuracy: 0.8767 - val_loss: 0.4899 Epoch 80/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9467 - loss: 0.1777 - val_accuracy: 0.8754 - val_loss: 0.4932 Epoch 81/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9474 - loss: 0.1748 - val_accuracy: 0.8758 - val_loss: 0.4932 Epoch 82/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9481 - loss: 0.1731 - val_accuracy: 0.8751 - val_loss: 0.5027 Epoch 83/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9484 - loss: 0.1708 - val_accuracy: 0.8748 - val_loss: 0.5012 Epoch 84/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9491 - loss: 0.1675 - val_accuracy: 0.8748 - val_loss: 0.5091 Epoch 85/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9514 - loss: 0.1624 - val_accuracy: 0.8744 - val_loss: 0.5082 Epoch 86/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9508 - loss: 0.1627 - val_accuracy: 0.8733 - val_loss: 0.5159 Epoch 87/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9517 - loss: 0.1606 - val_accuracy: 0.8749 - val_loss: 0.5139 Epoch 88/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9519 - loss: 0.1579 - val_accuracy: 0.8746 - val_loss: 0.5189 Epoch 89/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9526 - loss: 0.1565 - val_accuracy: 0.8752 - val_loss: 0.5171 Epoch 90/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9531 - loss: 0.1549 - val_accuracy: 0.8750 - val_loss: 0.5169 Epoch 91/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9543 - loss: 0.1506 - val_accuracy: 0.8740 - val_loss: 0.5182 Epoch 92/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9547 - loss: 0.1497 - val_accuracy: 0.8752 - val_loss: 0.5207 Epoch 93/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9554 - loss: 0.1471 - val_accuracy: 0.8750 - val_loss: 0.5293 Epoch 94/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9560 - loss: 0.1467 - val_accuracy: 0.8749 - val_loss: 0.5298 Epoch 95/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9563 - loss: 0.1449 - val_accuracy: 0.8746 - val_loss: 0.5309 Epoch 96/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9571 - loss: 0.1421 - val_accuracy: 0.8728 - val_loss: 0.5391 Epoch 97/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9577 - loss: 0.1390 - val_accuracy: 0.8755 - val_loss: 0.5318 Epoch 98/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9583 - loss: 0.1375 - val_accuracy: 0.8744 - val_loss: 0.5433 Epoch 99/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9591 - loss: 0.1363 - val_accuracy: 0.8746 - val_loss: 0.5359 Epoch 100/100 125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - accuracy: 0.9592 - loss: 0.1351 - val_accuracy: 0.8738 - val_loss: 0.5482 ``` </div> --- ## Run inference (sampling) 1. encode input and retrieve initial decoder state 2. run one step of decoder with this initial state and a "start of sequence" token as target. Output will be the next target token. 3. Repeat with the current target token and current states ```python # Define sampling models # Restore the model and construct the encoder and decoder. model = keras.models.load_model("s2s_model.keras") encoder_inputs = model.input[0] # input_1 encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1 encoder_states = [state_h_enc, state_c_enc] encoder_model = keras.Model(encoder_inputs, encoder_states) decoder_inputs = model.input[1] # input_2 decoder_state_input_h = keras.Input(shape=(latent_dim,)) decoder_state_input_c = keras.Input(shape=(latent_dim,)) decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] decoder_lstm = model.layers[3] decoder_outputs, state_h_dec, state_c_dec = decoder_lstm( decoder_inputs, initial_state=decoder_states_inputs ) decoder_states = [state_h_dec, state_c_dec] decoder_dense = model.layers[4] decoder_outputs = decoder_dense(decoder_outputs) decoder_model = keras.Model( [decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states ) # Reverse-lookup token index to decode sequences back to # something readable. reverse_input_char_index = dict((i, char) for char, i in input_token_index.items()) reverse_target_char_index = dict((i, char) for char, i in target_token_index.items()) def decode_sequence(input_seq): # Encode the input as state vectors. states_value = encoder_model.predict(input_seq, verbose=0) # Generate empty target sequence of length 1. target_seq = np.zeros((1, 1, num_decoder_tokens)) # Populate the first character of target sequence with the start character. target_seq[0, 0, target_token_index["\t"]] = 1.0 # Sampling loop for a batch of sequences # (to simplify, here we assume a batch of size 1). stop_condition = False decoded_sentence = "" while not stop_condition: output_tokens, h, c = decoder_model.predict( [target_seq] + states_value, verbose=0 ) # Sample a token sampled_token_index = np.argmax(output_tokens[0, -1, :]) sampled_char = reverse_target_char_index[sampled_token_index] decoded_sentence += sampled_char # Exit condition: either hit max length # or find stop character. if sampled_char == "\n" or len(decoded_sentence) > max_decoder_seq_length: stop_condition = True # Update the target sequence (of length 1). target_seq = np.zeros((1, 1, num_decoder_tokens)) target_seq[0, 0, sampled_token_index] = 1.0 # Update states states_value = [h, c] return decoded_sentence ``` You can now generate decoded sentences as such: ```python for seq_index in range(20): # Take one sequence (part of the training set) # for trying out decoding. input_seq = encoder_input_data[seq_index : seq_index + 1] decoded_sentence = decode_sequence(input_seq) print("-") print("Input sentence:", input_texts[seq_index]) print("Decoded sentence:", decoded_sentence) ``` <div class="k-default-codeblock"> ``` - Input sentence: Go. Decoded sentence: Va ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Go. Decoded sentence: Va ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Go. Decoded sentence: Va ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Go. Decoded sentence: Va ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Hi. Decoded sentence: Salut. ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Hi. Decoded sentence: Salut. ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run! Decoded sentence: Fuyez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run. Decoded sentence: Courez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run. Decoded sentence: Courez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run. Decoded sentence: Courez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run. Decoded sentence: Courez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run. Decoded sentence: Courez ! ``` </div> <div class="k-default-codeblock"> ``` - Input sentence: Run. Decoded sentence: Courez ! ``` </div>
keras-io/examples/nlp/md/lstm_seq2seq.md/0
{ "file_path": "keras-io/examples/nlp/md/lstm_seq2seq.md", "repo_id": "keras-io", "token_count": 11768 }
111
""" Title: Question Answering with Hugging Face Transformers Author: Matthew Carrigan and Merve Noyan Date created: 13/01/2022 Last modified: 13/01/2022 Description: Question answering implementation using Keras and Hugging Face Transformers. Accelerator: GPU """ """ ## Introduction to Question Answering Question answering is a common NLP task with several variants. In some variants, the task is multiple-choice: A list of possible answers are supplied with each question, and the model simply needs to return a probability distribution over the options. A more challenging variant of question answering, which is more applicable to real-life tasks, is when the options are not provided. Instead, the model is given an input document -- called context -- and a question about the document, and it must extract the span of text in the document that contains the answer. In this case, the model is not computing a probability distribution over answers, but two probability distributions over the tokens in the document text, representing the start and end of the span containing the answer. This variant is called "extractive question answering". Extractive question answering is a very challenging NLP task, and the dataset size required to train such a model from scratch when the questions and answers are natural language is prohibitively huge. As a result, question answering (like almost all NLP tasks) benefits enormously from starting from a strong pretrained foundation model - starting from a strong pretrained language model can reduce the dataset size required to reach a given accuracy by multiple orders of magnitude, enabling you to reach very strong performance with surprisingly reasonable datasets. Starting with a pretrained model adds difficulties, though - where do you get the model from? How do you ensure that your input data is preprocessed and tokenized the same way as the original model? How do you modify the model to add an output head that matches your task of interest? In this example, we'll show you how to load a model from the Hugging Face [🤗Transformers](https://github.com/huggingface/transformers) library to tackle this challenge. We'll also load a benchmark question answering dataset from the [🤗Datasets](https://github.com/huggingface/datasets) library - this is another open-source repository containing a wide range of datasets across many modalities, from NLP to vision and beyond. Note, though, that there is no requirement that these libraries must be used with each other. If you want to train a model from [🤗Transformers](https://github.com/huggingface/transformers) on your own data, or you want to load data from [🤗 Datasets](https://github.com/huggingface/datasets) and train your own entirely unrelated models with it, that is of course possible (and highly encouraged!) """ """ ## Installing the requirements """ """shell pip install git+https://github.com/huggingface/transformers.git pip install datasets pip install huggingface-hub """ """ ## Loading the dataset """ """ We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the SQUAD question answering dataset using `load_dataset()`. """ from datasets import load_dataset datasets = load_dataset("squad") """ The `datasets` object itself is a `DatasetDict`, which contains one key for the training, validation and test set. We can see the training, validation and test sets all have a column for the context, the question and the answers to those questions. To access an actual element, you need to select a split first, then give an index. We can see the answers are indicated by their start position in the text and their full text, which is a substring of the context as we mentioned above. Let's take a look at what a single training example looks like. """ print(datasets["train"][0]) """ ## Preprocessing the training data """ """ Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires. To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure: - We get a tokenizer that corresponds to the model architecture we want to use. - We download the vocabulary used when pretraining this specific checkpoint. That vocabulary will be cached, so it's not downloaded again the next time we run the cell. The `from_pretrained()` method expects the name of a model. If you're unsure which model to pick, don't panic! The list of models to choose from can be bewildering, but in general there is a simple tradeoff: Larger models are slower and consume more memory, but usually yield slightly better final accuracies after fine-tuning. For this example, we have chosen the (relatively) lightweight `"distilbert"`, a smaller, distilled version of the famous BERT language model. If you absolutely must have the highest possible accuracy for an important task, though, and you have the GPU memory (and free time) to handle it, you may prefer to use a larger model, such as `"roberta-large"`. Newer and even larger models than `"roberta"` exist in [🤗 Transformers](https://github.com/huggingface/transformers), but we leave the task of finding and training them as an exercise to readers who are either particularly masochistic or have 40GB of VRAM to throw around. """ from transformers import AutoTokenizer model_checkpoint = "distilbert-base-cased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) """ Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), but you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested. One specific issue for the preprocessing in question answering is how to deal with very long documents. We usually truncate them in other tasks, when they are longer than the model maximum sentence length, but here, removing part of the the context might result in losing the answer we are looking for. To deal with this, we will allow one (long) example in our dataset to give several input features, each of length shorter than the maximum length of the model (or the one we set as a hyper-parameter). Also, just in case the answer lies at the point we split a long context, we allow some overlap between the features we generate controlled by the hyper-parameter `doc_stride`. If we simply truncate with a fixed size (`max_length`), we will lose information. We want to avoid truncating the question, and instead only truncate the context to ensure the task remains solvable. To do that, we'll set `truncation` to `"only_second"`, so that only the second sequence (the context) in each pair is truncated. To get the list of features capped by the maximum length, we need to set `return_overflowing_tokens` to True and pass the `doc_stride` to `stride`. To see which feature of the original context contain the answer, we can return `"offset_mapping"`. """ max_length = 384 # The maximum length of a feature (question and context) doc_stride = ( 128 # The authorized overlap between two part of the context when splitting ) # it is needed. """ In the case of impossible answers (the answer is in another feature given by an example with a long context), we set the cls index for both the start and end position. We could also simply discard those examples from the training set if the flag `allow_impossible_answers` is `False`. Since the preprocessing is already complex enough as it is, we've kept is simple for this part. """ def prepare_train_features(examples): # Tokenize our examples with truncation and padding, but keep the overflows using a # stride. This results in one example possible giving several features when a context is long, # each of those features having a context that overlaps a bit the context of the previous # feature. examples["question"] = [q.lstrip() for q in examples["question"]] examples["context"] = [c.lstrip() for c in examples["context"]] tokenized_examples = tokenizer( examples["question"], examples["context"], truncation="only_second", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a # map from a feature to its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original # context. This will help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what # is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this # span of text. sample_index = sample_mapping[i] answers = examples["answers"][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != 1: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != 1: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the # CLS index). if not ( offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char ): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the # answer. # Note: we could go after the last offset if the answer is the last word (edge # case). while ( token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char ): token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) return tokenized_examples """ To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map()` method of our `Dataset` object, which will apply the function on all the elements of. We'll use `batched=True` to encode the texts in batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. We also use the `remove_columns` argument to remove the columns that existed before tokenization was applied - this ensures that the only features remaining are the ones we actually want to pass to our model. """ tokenized_datasets = datasets.map( prepare_train_features, batched=True, remove_columns=datasets["train"].column_names, num_proc=3, ) """ Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map()` to not use the cached files and force the preprocessing to be applied again. Because all our data has been padded or truncated to the same length, and it is not too large, we can now simply convert it to a dict of numpy arrays, ready for training. Although we will not use it here, 🤗 Datasets have a `to_tf_dataset()` helper method designed to assist you when the data cannot be easily converted to arrays, such as when it has variable sequence lengths, or is too large to fit in memory. This method wraps a `tf.data.Dataset` around the underlying 🤗 Dataset, streaming samples from the underlying dataset and batching them on the fly, thus minimizing wasted memory and computation from unnecessary padding. If your use-case requires it, please see the [docs](https://huggingface.co/docs/transformers/custom_datasets#finetune-with-tensorflow) on to_tf_dataset and data collator for an example. If not, feel free to follow this example and simply convert to dicts! """ train_set = tokenized_datasets["train"].with_format("numpy")[ : ] # Load the whole dataset as a dict of numpy arrays validation_set = tokenized_datasets["validation"].with_format("numpy")[:] """ ## Fine-tuning the model """ """ That was a lot of work! But now that our data is ready, everything is going to run very smoothly. First, we download the pretrained model and fine-tune it. Since our task is question answering, we use the `TFAutoModelForQuestionAnswering` class. Like with the tokenizer, the `from_pretrained()` method will download and cache the model for us: """ from transformers import TFAutoModelForQuestionAnswering model = TFAutoModelForQuestionAnswering.from_pretrained(model_checkpoint) """ The warning is telling us we are throwing away some weights and newly initializing some others. Don't panic! This is absolutely normal. Recall that models like BERT and Distilbert are trained on a **language modeling** task, but we're loading the model as a `TFAutoModelForQuestionAnswering`, which means we want the model to perform a **question answering** task. This change requires the final output layer or "head" to be removed and replaced with a new head suited for the new task. The `from_pretrained` method will handle all of this for us, and the warning is there simply to remind us that some model surgery has been performed, and that the model will not generate useful predictions until the newly-initialized layers have been fine-tuned on some data. Next, we can create an optimizer and specify a loss function. You can usually get slightly better performance by using learning rate decay and decoupled weight decay, but for the purposes of this example the standard `Adam` optimizer will work fine. Note, however, that when fine-tuning a pretrained transformer model you will generally want to use a low learning rate! We find the best results are obtained with values in the range 1e-5 to 1e-4, and training may completely diverge at the default Adam learning rate of 1e-3. """ import tensorflow as tf from tensorflow import keras optimizer = keras.optimizers.Adam(learning_rate=5e-5) """ And now we just compile and fit the model. As a convenience, all 🤗 Transformers models come with a default loss which matches their output head, although you're of course free to use your own. Because the built-in loss is computed internally during the forward pass, when using it you may find that some Keras metrics misbehave or give unexpected outputs. This is an area of very active development in 🤗 Transformers, though, so hopefully we'll have a good solution to that issue soon! For now, though, let's use the built-in loss without any metrics. To get the built-in loss, simply leave out the `loss` argument to `compile`. """ # Optionally uncomment the next line for float16 training keras.mixed_precision.set_global_policy("mixed_float16") model.compile(optimizer=optimizer) """ And now we can train our model. Note that we're not passing separate labels - the labels are keys in the input dict, to make them visible to the model during the forward pass so it can compute the built-in loss. """ model.fit(train_set, validation_data=validation_set, epochs=1) """ And we're done! Let's give it a try, using some text from the keras.io frontpage: """ context = """Keras is an API designed for human beings, not machines. Keras follows best practices for reducing cognitive load: it offers consistent & simple APIs, it minimizes the number of user actions required for common use cases, and it provides clear & actionable error messages. It also has extensive documentation and developer guides. """ question = "What is Keras?" inputs = tokenizer([context], [question], return_tensors="np") outputs = model(inputs) start_position = tf.argmax(outputs.start_logits, axis=1) end_position = tf.argmax(outputs.end_logits, axis=1) print(int(start_position), int(end_position[0])) """ Looks like our model thinks the answer is the span from tokens 1 to 12 (inclusive). No prizes for guessing which tokens those are! """ answer = inputs["input_ids"][0, int(start_position) : int(end_position) + 1] print(answer) """ And now we can use the `tokenizer.decode()` method to turn those token IDs back into text: """ print(tokenizer.decode(answer)) """ And that's it! Remember that this example was designed to be quick to run rather than state-of-the-art, and the model trained here will certainly make mistakes. If you use a larger model to base your training on, and you take time to tune the hyperparameters appropriately, you'll find that you can achieve much better losses (and correspondingly more accurate answers). Finally, you can push the model to the HuggingFace Hub. By pushing this model you will have: - A nice model card generated for you containing hyperparameters and metrics of the model training, - A web API for inference calls, - A widget in the model page that enables others to test your model. This model is currently hosted [here](https://huggingface.co/keras-io/transformers-qa) and we have prepared a separate neat UI for you [here](https://huggingface.co/spaces/keras-io/keras-qa). ```python model.push_to_hub("transformers-qa", organization="keras-io") tokenizer.push_to_hub("transformers-qa", organization="keras-io") ``` If you have non-Transformers based Keras models, you can also push them with `push_to_hub_keras`. You can use `from_pretrained_keras` to load easily. ```python from huggingface_hub.keras_mixin import push_to_hub_keras push_to_hub_keras( model=model, repo_url="https://huggingface.co/your-username/your-awesome-model" ) from_pretrained_keras("your-username/your-awesome-model") # load your model ``` """
keras-io/examples/nlp/question_answering.py/0
{ "file_path": "keras-io/examples/nlp/question_answering.py", "repo_id": "keras-io", "token_count": 5852 }
112
<jupyter_start><jupyter_text>Deep Q-Learning for Atari Breakout**Author:** [Jacob Chapman](https://twitter.com/jacoblchapman) and [Mathias Lechner](https://twitter.com/MLech20)**Date created:** 2020/05/23**Last modified:** 2020/06/17**Description:** Play Atari Breakout with a Deep Q-Network. IntroductionThis script shows an implementation of Deep Q-Learning on the`BreakoutNoFrameskip-v4` environment. Deep Q-LearningAs an agent takes actions and moves through an environment, it learns to mapthe observed state of the environment to an action. An agent will choose an actionin a given state based on a "Q-value", which is a weighted reward based on theexpected highest long-term reward. A Q-Learning Agent learns to perform itstask such that the recommended action maximizes the potential future rewards.This method is considered an "Off-Policy" method,meaning its Q values are updated assuming that the best action was chosen, evenif the best action was not chosen. Atari BreakoutIn this environment, a board moves along the bottom of the screen returning a ball thatwill destroy blocks at the top of the screen.The aim of the game is to remove all blocks and breakout of thelevel. The agent must learn to control the board by moving left and right, returning theball and removing all the blocks without the ball passing the board. NoteThe Deepmind paper trained for "a total of 50 million frames (that is, around 38 days ofgame experience in total)". However this script will give good results at around 10million frames which are processed in less than 24 hours on a modern machine. References- [Q-Learning](https://link.springer.com/content/pdf/10.1007/BF00992698.pdf)- [Deep Q-Learning](https://deepmind.com/research/publications/human-level-control-through-deep-reinforcement-learning) Setup<jupyter_code>from baselines.common.atari_wrappers import make_atari, wrap_deepmind import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # Configuration paramaters for the whole setup seed = 42 gamma = 0.99 # Discount factor for past rewards epsilon = 1.0 # Epsilon greedy parameter epsilon_min = 0.1 # Minimum epsilon greedy parameter epsilon_max = 1.0 # Maximum epsilon greedy parameter epsilon_interval = ( epsilon_max - epsilon_min ) # Rate at which to reduce chance of random action being taken batch_size = 32 # Size of batch taken from replay buffer max_steps_per_episode = 10000 # Use the Baseline Atari environment because of Deepmind helper functions env = make_atari("BreakoutNoFrameskip-v4") # Warp the frames, grey scale, stake four frame and scale to smaller ratio env = wrap_deepmind(env, frame_stack=True, scale=True) env.seed(seed)<jupyter_output><empty_output><jupyter_text>Implement the Deep Q-NetworkThis network learns an approximation of the Q-table, which is a mapping betweenthe states and actions that an agent will take. For every state we'll have fouractions, that can be taken. The environment provides the state, and the actionis chosen by selecting the larger of the four Q-values predicted in the output layer.<jupyter_code>num_actions = 4 def create_q_model(): # Network defined by the Deepmind paper inputs = layers.Input(shape=(84, 84, 4,)) # Convolutions on the frames on the screen layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs) layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1) layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2) layer4 = layers.Flatten()(layer3) layer5 = layers.Dense(512, activation="relu")(layer4) action = layers.Dense(num_actions, activation="linear")(layer5) return keras.Model(inputs=inputs, outputs=action) # The first model makes the predictions for Q-values which are used to # make a action. model = create_q_model() # Build a target model for the prediction of future rewards. # The weights of a target model get updated every 10000 steps thus when the # loss between the Q-values is calculated the target Q-value is stable. model_target = create_q_model()<jupyter_output><empty_output><jupyter_text>Train<jupyter_code># In the Deepmind paper they use RMSProp however then Adam optimizer # improves training time optimizer = keras.optimizers.Adam(learning_rate=0.00025, clipnorm=1.0) # Experience replay buffers action_history = [] state_history = [] state_next_history = [] rewards_history = [] done_history = [] episode_reward_history = [] running_reward = 0 episode_count = 0 frame_count = 0 # Number of frames to take random action and observe output epsilon_random_frames = 50000 # Number of frames for exploration epsilon_greedy_frames = 1000000.0 # Maximum replay length # Note: The Deepmind paper suggests 1000000 however this causes memory issues max_memory_length = 100000 # Train the model after 4 actions update_after_actions = 4 # How often to update the target network update_target_network = 10000 # Using huber loss for stability loss_function = keras.losses.Huber() while True: # Run until solved state = np.array(env.reset()) episode_reward = 0 for timestep in range(1, max_steps_per_episode): # env.render(); Adding this line would show the attempts # of the agent in a pop up window. frame_count += 1 # Use epsilon-greedy for exploration if frame_count < epsilon_random_frames or epsilon > np.random.rand(1)[0]: # Take random action action = np.random.choice(num_actions) else: # Predict action Q-values # From environment state state_tensor = tf.convert_to_tensor(state) state_tensor = tf.expand_dims(state_tensor, 0) action_probs = model(state_tensor, training=False) # Take best action action = tf.argmax(action_probs[0]).numpy() # Decay probability of taking random action epsilon -= epsilon_interval / epsilon_greedy_frames epsilon = max(epsilon, epsilon_min) # Apply the sampled action in our environment state_next, reward, done, _ = env.step(action) state_next = np.array(state_next) episode_reward += reward # Save actions and states in replay buffer action_history.append(action) state_history.append(state) state_next_history.append(state_next) done_history.append(done) rewards_history.append(reward) state = state_next # Update every fourth frame and once batch size is over 32 if frame_count % update_after_actions == 0 and len(done_history) > batch_size: # Get indices of samples for replay buffers indices = np.random.choice(range(len(done_history)), size=batch_size) # Using list comprehension to sample from replay buffer state_sample = np.array([state_history[i] for i in indices]) state_next_sample = np.array([state_next_history[i] for i in indices]) rewards_sample = [rewards_history[i] for i in indices] action_sample = [action_history[i] for i in indices] done_sample = tf.convert_to_tensor( [float(done_history[i]) for i in indices] ) # Build the updated Q-values for the sampled future states # Use the target model for stability future_rewards = model_target.predict(state_next_sample) # Q value = reward + discount factor * expected future reward updated_q_values = rewards_sample + gamma * tf.reduce_max( future_rewards, axis=1 ) # If final frame set the last value to -1 updated_q_values = updated_q_values * (1 - done_sample) - done_sample # Create a mask so we only calculate loss on the updated Q-values masks = tf.one_hot(action_sample, num_actions) with tf.GradientTape() as tape: # Train the model on the states and updated Q-values q_values = model(state_sample) # Apply the masks to the Q-values to get the Q-value for action taken q_action = tf.reduce_sum(tf.multiply(q_values, masks), axis=1) # Calculate loss between new Q-value and old Q-value loss = loss_function(updated_q_values, q_action) # Backpropagation grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) if frame_count % update_target_network == 0: # update the the target network with new weights model_target.set_weights(model.get_weights()) # Log details template = "running reward: {:.2f} at episode {}, frame count {}" print(template.format(running_reward, episode_count, frame_count)) # Limit the state and reward history if len(rewards_history) > max_memory_length: del rewards_history[:1] del state_history[:1] del state_next_history[:1] del action_history[:1] del done_history[:1] if done: break # Update running reward to check condition for solving episode_reward_history.append(episode_reward) if len(episode_reward_history) > 100: del episode_reward_history[:1] running_reward = np.mean(episode_reward_history) episode_count += 1 if running_reward > 40: # Condition to consider the task solved print("Solved at episode {}!".format(episode_count)) break<jupyter_output><empty_output>
keras-io/examples/rl/ipynb/deep_q_network_breakout.ipynb/0
{ "file_path": "keras-io/examples/rl/ipynb/deep_q_network_breakout.ipynb", "repo_id": "keras-io", "token_count": 3406 }
113
<jupyter_start><jupyter_text>Structured data classification from scratch**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/06/09**Last modified:** 2020/06/09**Description:** Binary classification of structured data including numerical and categorical features. IntroductionThis example demonstrates how to do structured data classification, starting from a rawCSV file. Our data includes both numerical and categorical features. We will use Keraspreprocessing layers to normalize the numerical features and vectorize the categoricalones.Note that this example should be run with TensorFlow 2.5 or higher. The dataset[Our dataset](https://archive.ics.uci.edu/ml/datasets/heart+Disease) is provided by theCleveland Clinic Foundation for Heart Disease.It's a CSV file with 303 rows. Each row contains information about a patient (a**sample**), and each column describes an attribute of the patient (a **feature**). Weuse the features to predict whether a patient has a heart disease (**binaryclassification**).Here's the description of each feature:Column| Description| Feature Type------------|--------------------|----------------------Age | Age in years | NumericalSex | (1 = male; 0 = female) | CategoricalCP | Chest pain type (0, 1, 2, 3, 4) | CategoricalTrestbpd | Resting blood pressure (in mm Hg on admission) | NumericalChol | Serum cholesterol in mg/dl | NumericalFBS | fasting blood sugar in 120 mg/dl (1 = true; 0 = false) | CategoricalRestECG | Resting electrocardiogram results (0, 1, 2) | CategoricalThalach | Maximum heart rate achieved | NumericalExang | Exercise induced angina (1 = yes; 0 = no) | CategoricalOldpeak | ST depression induced by exercise relative to rest | NumericalSlope | Slope of the peak exercise ST segment | NumericalCA | Number of major vessels (0-3) colored by fluoroscopy | Both numerical & categoricalThal | 3 = normal; 6 = fixed defect; 7 = reversible defect | CategoricalTarget | Diagnosis of heart disease (1 = true; 0 = false) | Target Setup<jupyter_code>import os # TensorFlow is the only backend that supports string inputs. os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import pandas as pd import keras from keras import layers<jupyter_output><empty_output><jupyter_text>Preparing the dataLet's download the data and load it into a Pandas dataframe:<jupyter_code>file_url = "http://storage.googleapis.com/download.tensorflow.org/data/heart.csv" dataframe = pd.read_csv(file_url)<jupyter_output><empty_output><jupyter_text>The dataset includes 303 samples with 14 columns per sample (13 features, plus the targetlabel):<jupyter_code>dataframe.shape<jupyter_output><empty_output><jupyter_text>Here's a preview of a few samples:<jupyter_code>dataframe.head()<jupyter_output><empty_output><jupyter_text>The last column, "target", indicates whether the patient has a heart disease (1) or not(0).Let's split the data into a training and validation set:<jupyter_code>val_dataframe = dataframe.sample(frac=0.2, random_state=1337) train_dataframe = dataframe.drop(val_dataframe.index) print( f"Using {len(train_dataframe)} samples for training " f"and {len(val_dataframe)} for validation" )<jupyter_output><empty_output><jupyter_text>Let's generate `tf.data.Dataset` objects for each dataframe:<jupyter_code>def dataframe_to_dataset(dataframe): dataframe = dataframe.copy() labels = dataframe.pop("target") ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) ds = ds.shuffle(buffer_size=len(dataframe)) return ds train_ds = dataframe_to_dataset(train_dataframe) val_ds = dataframe_to_dataset(val_dataframe)<jupyter_output><empty_output><jupyter_text>Each `Dataset` yields a tuple `(input, target)` where `input` is a dictionary of featuresand `target` is the value `0` or `1`:<jupyter_code>for x, y in train_ds.take(1): print("Input:", x) print("Target:", y)<jupyter_output><empty_output><jupyter_text>Let's batch the datasets:<jupyter_code>train_ds = train_ds.batch(32) val_ds = val_ds.batch(32)<jupyter_output><empty_output><jupyter_text>Feature preprocessing with Keras layersThe following features are categorical features encoded as integers:- `sex`- `cp`- `fbs`- `restecg`- `exang`- `ca`We will encode these features using **one-hot encoding**. We have two optionshere: - Use `CategoryEncoding()`, which requires knowing the range of input values and will error on input outside the range. - Use `IntegerLookup()` which will build a lookup table for inputs and reserve an output index for unkown input values.For this example, we want a simple solution that will handle out of range inputsat inference, so we will use `IntegerLookup()`.We also have a categorical feature encoded as a string: `thal`. We will create anindex of all possible features and encode output using the `StringLookup()` layer.Finally, the following feature are continuous numerical features:- `age`- `trestbps`- `chol`- `thalach`- `oldpeak`- `slope`For each of these features, we will use a `Normalization()` layer to make sure the meanof each feature is 0 and its standard deviation is 1.Below, we define 3 utility functions to do the operations:- `encode_numerical_feature` to apply featurewise normalization to numerical features.- `encode_string_categorical_feature` to first turn string inputs into integer indices,then one-hot encode these integer indices.- `encode_integer_categorical_feature` to one-hot encode integer categorical features.<jupyter_code>def encode_numerical_feature(feature, name, dataset): # Create a Normalization layer for our feature normalizer = layers.Normalization() # Prepare a Dataset that only yields our feature feature_ds = dataset.map(lambda x, y: x[name]) feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1)) # Learn the statistics of the data normalizer.adapt(feature_ds) # Normalize the input feature encoded_feature = normalizer(feature) return encoded_feature def encode_categorical_feature(feature, name, dataset, is_string): lookup_class = layers.StringLookup if is_string else layers.IntegerLookup # Create a lookup layer which will turn strings into integer indices lookup = lookup_class(output_mode="binary") # Prepare a Dataset that only yields our feature feature_ds = dataset.map(lambda x, y: x[name]) feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1)) # Learn the set of possible string values and assign them a fixed integer index lookup.adapt(feature_ds) # Turn the string input into integer indices encoded_feature = lookup(feature) return encoded_feature<jupyter_output><empty_output><jupyter_text>Build a modelWith this done, we can create our end-to-end model:<jupyter_code># Categorical features encoded as integers sex = keras.Input(shape=(1,), name="sex", dtype="int64") cp = keras.Input(shape=(1,), name="cp", dtype="int64") fbs = keras.Input(shape=(1,), name="fbs", dtype="int64") restecg = keras.Input(shape=(1,), name="restecg", dtype="int64") exang = keras.Input(shape=(1,), name="exang", dtype="int64") ca = keras.Input(shape=(1,), name="ca", dtype="int64") # Categorical feature encoded as string thal = keras.Input(shape=(1,), name="thal", dtype="string") # Numerical features age = keras.Input(shape=(1,), name="age") trestbps = keras.Input(shape=(1,), name="trestbps") chol = keras.Input(shape=(1,), name="chol") thalach = keras.Input(shape=(1,), name="thalach") oldpeak = keras.Input(shape=(1,), name="oldpeak") slope = keras.Input(shape=(1,), name="slope") all_inputs = [ sex, cp, fbs, restecg, exang, ca, thal, age, trestbps, chol, thalach, oldpeak, slope, ] # Integer categorical features sex_encoded = encode_categorical_feature(sex, "sex", train_ds, False) cp_encoded = encode_categorical_feature(cp, "cp", train_ds, False) fbs_encoded = encode_categorical_feature(fbs, "fbs", train_ds, False) restecg_encoded = encode_categorical_feature(restecg, "restecg", train_ds, False) exang_encoded = encode_categorical_feature(exang, "exang", train_ds, False) ca_encoded = encode_categorical_feature(ca, "ca", train_ds, False) # String categorical features thal_encoded = encode_categorical_feature(thal, "thal", train_ds, True) # Numerical features age_encoded = encode_numerical_feature(age, "age", train_ds) trestbps_encoded = encode_numerical_feature(trestbps, "trestbps", train_ds) chol_encoded = encode_numerical_feature(chol, "chol", train_ds) thalach_encoded = encode_numerical_feature(thalach, "thalach", train_ds) oldpeak_encoded = encode_numerical_feature(oldpeak, "oldpeak", train_ds) slope_encoded = encode_numerical_feature(slope, "slope", train_ds) all_features = layers.concatenate( [ sex_encoded, cp_encoded, fbs_encoded, restecg_encoded, exang_encoded, slope_encoded, ca_encoded, thal_encoded, age_encoded, trestbps_encoded, chol_encoded, thalach_encoded, oldpeak_encoded, ] ) x = layers.Dense(32, activation="relu")(all_features) x = layers.Dropout(0.5)(x) output = layers.Dense(1, activation="sigmoid")(x) model = keras.Model(all_inputs, output) model.compile("adam", "binary_crossentropy", metrics=["accuracy"])<jupyter_output><empty_output><jupyter_text>Let's visualize our connectivity graph:<jupyter_code># `rankdir='LR'` is to make the graph horizontal. keras.utils.plot_model(model, show_shapes=True, rankdir="LR")<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>model.fit(train_ds, epochs=50, validation_data=val_ds)<jupyter_output><empty_output><jupyter_text>We quickly get to 80% validation accuracy. Inference on new dataTo get a prediction for a new sample, you can simply call `model.predict()`. There arejust two things you need to do:1. wrap scalars into a list so as to have a batch dimension (models only process batchesof data, not single samples)2. Call `convert_to_tensor` on each feature<jupyter_code>sample = { "age": 60, "sex": 1, "cp": 1, "trestbps": 145, "chol": 233, "fbs": 1, "restecg": 2, "thalach": 150, "exang": 0, "oldpeak": 2.3, "slope": 3, "ca": 0, "thal": "fixed", } input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()} predictions = model.predict(input_dict) print( f"This particular patient had a {100 * predictions[0][0]:.1f} " "percent probability of having a heart disease, " "as evaluated by our model." )<jupyter_output><empty_output>
keras-io/examples/structured_data/ipynb/structured_data_classification_from_scratch.ipynb/0
{ "file_path": "keras-io/examples/structured_data/ipynb/structured_data_classification_from_scratch.ipynb", "repo_id": "keras-io", "token_count": 3577 }
114
""" Title: Structured data classification from scratch Author: [fchollet](https://twitter.com/fchollet) Date created: 2020/06/09 Last modified: 2020/06/09 Description: Binary classification of structured data including numerical and categorical features. Accelerator: GPU """ """ ## Introduction This example demonstrates how to do structured data classification, starting from a raw CSV file. Our data includes both numerical and categorical features. We will use Keras preprocessing layers to normalize the numerical features and vectorize the categorical ones. Note that this example should be run with TensorFlow 2.5 or higher. ### The dataset [Our dataset](https://archive.ics.uci.edu/ml/datasets/heart+Disease) is provided by the Cleveland Clinic Foundation for Heart Disease. It's a CSV file with 303 rows. Each row contains information about a patient (a **sample**), and each column describes an attribute of the patient (a **feature**). We use the features to predict whether a patient has a heart disease (**binary classification**). Here's the description of each feature: Column| Description| Feature Type ------------|--------------------|---------------------- Age | Age in years | Numerical Sex | (1 = male; 0 = female) | Categorical CP | Chest pain type (0, 1, 2, 3, 4) | Categorical Trestbpd | Resting blood pressure (in mm Hg on admission) | Numerical Chol | Serum cholesterol in mg/dl | Numerical FBS | fasting blood sugar in 120 mg/dl (1 = true; 0 = false) | Categorical RestECG | Resting electrocardiogram results (0, 1, 2) | Categorical Thalach | Maximum heart rate achieved | Numerical Exang | Exercise induced angina (1 = yes; 0 = no) | Categorical Oldpeak | ST depression induced by exercise relative to rest | Numerical Slope | Slope of the peak exercise ST segment | Numerical CA | Number of major vessels (0-3) colored by fluoroscopy | Both numerical & categorical Thal | 3 = normal; 6 = fixed defect; 7 = reversible defect | Categorical Target | Diagnosis of heart disease (1 = true; 0 = false) | Target """ """ ## Setup """ import os # TensorFlow is the only backend that supports string inputs. os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import pandas as pd import keras from keras import layers """ ## Preparing the data Let's download the data and load it into a Pandas dataframe: """ file_url = "http://storage.googleapis.com/download.tensorflow.org/data/heart.csv" dataframe = pd.read_csv(file_url) """ The dataset includes 303 samples with 14 columns per sample (13 features, plus the target label): """ dataframe.shape """ Here's a preview of a few samples: """ dataframe.head() """ The last column, "target", indicates whether the patient has a heart disease (1) or not (0). Let's split the data into a training and validation set: """ val_dataframe = dataframe.sample(frac=0.2, random_state=1337) train_dataframe = dataframe.drop(val_dataframe.index) print( f"Using {len(train_dataframe)} samples for training " f"and {len(val_dataframe)} for validation" ) """ Let's generate `tf.data.Dataset` objects for each dataframe: """ def dataframe_to_dataset(dataframe): dataframe = dataframe.copy() labels = dataframe.pop("target") ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) ds = ds.shuffle(buffer_size=len(dataframe)) return ds train_ds = dataframe_to_dataset(train_dataframe) val_ds = dataframe_to_dataset(val_dataframe) """ Each `Dataset` yields a tuple `(input, target)` where `input` is a dictionary of features and `target` is the value `0` or `1`: """ for x, y in train_ds.take(1): print("Input:", x) print("Target:", y) """ Let's batch the datasets: """ train_ds = train_ds.batch(32) val_ds = val_ds.batch(32) """ ## Feature preprocessing with Keras layers The following features are categorical features encoded as integers: - `sex` - `cp` - `fbs` - `restecg` - `exang` - `ca` We will encode these features using **one-hot encoding**. We have two options here: - Use `CategoryEncoding()`, which requires knowing the range of input values and will error on input outside the range. - Use `IntegerLookup()` which will build a lookup table for inputs and reserve an output index for unkown input values. For this example, we want a simple solution that will handle out of range inputs at inference, so we will use `IntegerLookup()`. We also have a categorical feature encoded as a string: `thal`. We will create an index of all possible features and encode output using the `StringLookup()` layer. Finally, the following feature are continuous numerical features: - `age` - `trestbps` - `chol` - `thalach` - `oldpeak` - `slope` For each of these features, we will use a `Normalization()` layer to make sure the mean of each feature is 0 and its standard deviation is 1. Below, we define 3 utility functions to do the operations: - `encode_numerical_feature` to apply featurewise normalization to numerical features. - `encode_string_categorical_feature` to first turn string inputs into integer indices, then one-hot encode these integer indices. - `encode_integer_categorical_feature` to one-hot encode integer categorical features. """ def encode_numerical_feature(feature, name, dataset): # Create a Normalization layer for our feature normalizer = layers.Normalization() # Prepare a Dataset that only yields our feature feature_ds = dataset.map(lambda x, y: x[name]) feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1)) # Learn the statistics of the data normalizer.adapt(feature_ds) # Normalize the input feature encoded_feature = normalizer(feature) return encoded_feature def encode_categorical_feature(feature, name, dataset, is_string): lookup_class = layers.StringLookup if is_string else layers.IntegerLookup # Create a lookup layer which will turn strings into integer indices lookup = lookup_class(output_mode="binary") # Prepare a Dataset that only yields our feature feature_ds = dataset.map(lambda x, y: x[name]) feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1)) # Learn the set of possible string values and assign them a fixed integer index lookup.adapt(feature_ds) # Turn the string input into integer indices encoded_feature = lookup(feature) return encoded_feature """ ## Build a model With this done, we can create our end-to-end model: """ # Categorical features encoded as integers sex = keras.Input(shape=(1,), name="sex", dtype="int64") cp = keras.Input(shape=(1,), name="cp", dtype="int64") fbs = keras.Input(shape=(1,), name="fbs", dtype="int64") restecg = keras.Input(shape=(1,), name="restecg", dtype="int64") exang = keras.Input(shape=(1,), name="exang", dtype="int64") ca = keras.Input(shape=(1,), name="ca", dtype="int64") # Categorical feature encoded as string thal = keras.Input(shape=(1,), name="thal", dtype="string") # Numerical features age = keras.Input(shape=(1,), name="age") trestbps = keras.Input(shape=(1,), name="trestbps") chol = keras.Input(shape=(1,), name="chol") thalach = keras.Input(shape=(1,), name="thalach") oldpeak = keras.Input(shape=(1,), name="oldpeak") slope = keras.Input(shape=(1,), name="slope") all_inputs = [ sex, cp, fbs, restecg, exang, ca, thal, age, trestbps, chol, thalach, oldpeak, slope, ] # Integer categorical features sex_encoded = encode_categorical_feature(sex, "sex", train_ds, False) cp_encoded = encode_categorical_feature(cp, "cp", train_ds, False) fbs_encoded = encode_categorical_feature(fbs, "fbs", train_ds, False) restecg_encoded = encode_categorical_feature(restecg, "restecg", train_ds, False) exang_encoded = encode_categorical_feature(exang, "exang", train_ds, False) ca_encoded = encode_categorical_feature(ca, "ca", train_ds, False) # String categorical features thal_encoded = encode_categorical_feature(thal, "thal", train_ds, True) # Numerical features age_encoded = encode_numerical_feature(age, "age", train_ds) trestbps_encoded = encode_numerical_feature(trestbps, "trestbps", train_ds) chol_encoded = encode_numerical_feature(chol, "chol", train_ds) thalach_encoded = encode_numerical_feature(thalach, "thalach", train_ds) oldpeak_encoded = encode_numerical_feature(oldpeak, "oldpeak", train_ds) slope_encoded = encode_numerical_feature(slope, "slope", train_ds) all_features = layers.concatenate( [ sex_encoded, cp_encoded, fbs_encoded, restecg_encoded, exang_encoded, slope_encoded, ca_encoded, thal_encoded, age_encoded, trestbps_encoded, chol_encoded, thalach_encoded, oldpeak_encoded, ] ) x = layers.Dense(32, activation="relu")(all_features) x = layers.Dropout(0.5)(x) output = layers.Dense(1, activation="sigmoid")(x) model = keras.Model(all_inputs, output) model.compile("adam", "binary_crossentropy", metrics=["accuracy"]) """ Let's visualize our connectivity graph: """ # `rankdir='LR'` is to make the graph horizontal. keras.utils.plot_model(model, show_shapes=True, rankdir="LR") """ ## Train the model """ model.fit(train_ds, epochs=50, validation_data=val_ds) """ We quickly get to 80% validation accuracy. """ """ ## Inference on new data To get a prediction for a new sample, you can simply call `model.predict()`. There are just two things you need to do: 1. wrap scalars into a list so as to have a batch dimension (models only process batches of data, not single samples) 2. Call `convert_to_tensor` on each feature """ sample = { "age": 60, "sex": 1, "cp": 1, "trestbps": 145, "chol": 233, "fbs": 1, "restecg": 2, "thalach": 150, "exang": 0, "oldpeak": 2.3, "slope": 3, "ca": 0, "thal": "fixed", } input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()} predictions = model.predict(input_dict) print( f"This particular patient had a {100 * predictions[0][0]:.1f} " "percent probability of having a heart disease, " "as evaluated by our model." )
keras-io/examples/structured_data/structured_data_classification_from_scratch.py/0
{ "file_path": "keras-io/examples/structured_data/structured_data_classification_from_scratch.py", "repo_id": "keras-io", "token_count": 3444 }
115
<jupyter_start><jupyter_text>Timeseries anomaly detection using an Autoencoder**Author:** [pavithrasv](https://github.com/pavithrasv)**Date created:** 2020/05/31**Last modified:** 2020/05/31**Description:** Detect anomalies in a timeseries using an Autoencoder. IntroductionThis script demonstrates how you can use a reconstruction convolutionalautoencoder model to detect anomalies in timeseries data. Setup<jupyter_code>import numpy as np import pandas as pd import keras from keras import layers from matplotlib import pyplot as plt<jupyter_output><empty_output><jupyter_text>Load the dataWe will use the [Numenta Anomaly Benchmark(NAB)](https://www.kaggle.com/boltzmannbrain/nab) dataset. It provides artificialtimeseries data containing labeled anomalous periods of behavior. Data areordered, timestamped, single-valued metrics.We will use the `art_daily_small_noise.csv` file for training and the`art_daily_jumpsup.csv` file for testing. The simplicity of this datasetallows us to demonstrate anomaly detection effectively.<jupyter_code>master_url_root = "https://raw.githubusercontent.com/numenta/NAB/master/data/" df_small_noise_url_suffix = "artificialNoAnomaly/art_daily_small_noise.csv" df_small_noise_url = master_url_root + df_small_noise_url_suffix df_small_noise = pd.read_csv( df_small_noise_url, parse_dates=True, index_col="timestamp" ) df_daily_jumpsup_url_suffix = "artificialWithAnomaly/art_daily_jumpsup.csv" df_daily_jumpsup_url = master_url_root + df_daily_jumpsup_url_suffix df_daily_jumpsup = pd.read_csv( df_daily_jumpsup_url, parse_dates=True, index_col="timestamp" )<jupyter_output><empty_output><jupyter_text>Quick look at the data<jupyter_code>print(df_small_noise.head()) print(df_daily_jumpsup.head())<jupyter_output><empty_output><jupyter_text>Visualize the data Timeseries data without anomaliesWe will use the following data for training.<jupyter_code>fig, ax = plt.subplots() df_small_noise.plot(legend=False, ax=ax) plt.show()<jupyter_output><empty_output><jupyter_text>Timeseries data with anomaliesWe will use the following data for testing and see if the sudden jump up in thedata is detected as an anomaly.<jupyter_code>fig, ax = plt.subplots() df_daily_jumpsup.plot(legend=False, ax=ax) plt.show()<jupyter_output><empty_output><jupyter_text>Prepare training dataGet data values from the training timeseries data file and normalize the`value` data. We have a `value` for every 5 mins for 14 days.- 24 * 60 / 5 = **288 timesteps per day**- 288 * 14 = **4032 data points** in total<jupyter_code># Normalize and save the mean and std we get, # for normalizing test data. training_mean = df_small_noise.mean() training_std = df_small_noise.std() df_training_value = (df_small_noise - training_mean) / training_std print("Number of training samples:", len(df_training_value))<jupyter_output><empty_output><jupyter_text>Create sequencesCreate sequences combining `TIME_STEPS` contiguous data values from thetraining data.<jupyter_code>TIME_STEPS = 288 # Generated training sequences for use in the model. def create_sequences(values, time_steps=TIME_STEPS): output = [] for i in range(len(values) - time_steps + 1): output.append(values[i : (i + time_steps)]) return np.stack(output) x_train = create_sequences(df_training_value.values) print("Training input shape: ", x_train.shape)<jupyter_output><empty_output><jupyter_text>Build a modelWe will build a convolutional reconstruction autoencoder model. The model willtake input of shape `(batch_size, sequence_length, num_features)` and returnoutput of the same shape. In this case, `sequence_length` is 288 and`num_features` is 1.<jupyter_code>model = keras.Sequential( [ layers.Input(shape=(x_train.shape[1], x_train.shape[2])), layers.Conv1D( filters=32, kernel_size=7, padding="same", strides=2, activation="relu", ), layers.Dropout(rate=0.2), layers.Conv1D( filters=16, kernel_size=7, padding="same", strides=2, activation="relu", ), layers.Conv1DTranspose( filters=16, kernel_size=7, padding="same", strides=2, activation="relu", ), layers.Dropout(rate=0.2), layers.Conv1DTranspose( filters=32, kernel_size=7, padding="same", strides=2, activation="relu", ), layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same"), ] ) model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse") model.summary()<jupyter_output><empty_output><jupyter_text>Train the modelPlease note that we are using `x_train` as both the input and the targetsince this is a reconstruction model.<jupyter_code>history = model.fit( x_train, x_train, epochs=50, batch_size=128, validation_split=0.1, callbacks=[ keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min") ], )<jupyter_output><empty_output><jupyter_text>Let's plot training and validation loss to see how the training went.<jupyter_code>plt.plot(history.history["loss"], label="Training Loss") plt.plot(history.history["val_loss"], label="Validation Loss") plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>Detecting anomaliesWe will detect anomalies by determining how well our model can reconstructthe input data.1. Find MAE loss on training samples.2. Find max MAE loss value. This is the worst our model has performed tryingto reconstruct a sample. We will make this the `threshold` for anomalydetection.3. If the reconstruction loss for a sample is greater than this `threshold`value then we can infer that the model is seeing a pattern that it isn'tfamiliar with. We will label this sample as an `anomaly`.<jupyter_code># Get train MAE loss. x_train_pred = model.predict(x_train) train_mae_loss = np.mean(np.abs(x_train_pred - x_train), axis=1) plt.hist(train_mae_loss, bins=50) plt.xlabel("Train MAE loss") plt.ylabel("No of samples") plt.show() # Get reconstruction loss threshold. threshold = np.max(train_mae_loss) print("Reconstruction error threshold: ", threshold)<jupyter_output><empty_output><jupyter_text>Compare recontructionJust for fun, let's see how our model has recontructed the first sample.This is the 288 timesteps from day 1 of our training dataset.<jupyter_code># Checking how the first sequence is learnt plt.plot(x_train[0]) plt.plot(x_train_pred[0]) plt.show()<jupyter_output><empty_output><jupyter_text>Prepare test data<jupyter_code>df_test_value = (df_daily_jumpsup - training_mean) / training_std fig, ax = plt.subplots() df_test_value.plot(legend=False, ax=ax) plt.show() # Create sequences from test values. x_test = create_sequences(df_test_value.values) print("Test input shape: ", x_test.shape) # Get test MAE loss. x_test_pred = model.predict(x_test) test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1) test_mae_loss = test_mae_loss.reshape((-1)) plt.hist(test_mae_loss, bins=50) plt.xlabel("test MAE loss") plt.ylabel("No of samples") plt.show() # Detect all the samples which are anomalies. anomalies = test_mae_loss > threshold print("Number of anomaly samples: ", np.sum(anomalies)) print("Indices of anomaly samples: ", np.where(anomalies))<jupyter_output><empty_output><jupyter_text>Plot anomaliesWe now know the samples of the data which are anomalies. With this, we willfind the corresponding `timestamps` from the original test data. We will beusing the following method to do that:Let's say time_steps = 3 and we have 10 training values. Our `x_train` willlook like this:- 0, 1, 2- 1, 2, 3- 2, 3, 4- 3, 4, 5- 4, 5, 6- 5, 6, 7- 6, 7, 8- 7, 8, 9All except the initial and the final time_steps-1 data values, will appear in`time_steps` number of samples. So, if we know that the samples[(3, 4, 5), (4, 5, 6), (5, 6, 7)] are anomalies, we can say that the data point5 is an anomaly.<jupyter_code># data i is an anomaly if samples [(i - timesteps + 1) to (i)] are anomalies anomalous_data_indices = [] for data_idx in range(TIME_STEPS - 1, len(df_test_value) - TIME_STEPS + 1): if np.all(anomalies[data_idx - TIME_STEPS + 1 : data_idx]): anomalous_data_indices.append(data_idx)<jupyter_output><empty_output><jupyter_text>Let's overlay the anomalies on the original test data plot.<jupyter_code>df_subset = df_daily_jumpsup.iloc[anomalous_data_indices] fig, ax = plt.subplots() df_daily_jumpsup.plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") plt.show()<jupyter_output><empty_output>
keras-io/examples/timeseries/ipynb/timeseries_anomaly_detection.ipynb/0
{ "file_path": "keras-io/examples/timeseries/ipynb/timeseries_anomaly_detection.ipynb", "repo_id": "keras-io", "token_count": 3212 }
116
""" Title: Timeseries forecasting for weather prediction Authors: [Prabhanshu Attri](https://prabhanshu.com/github), [Yashika Sharma](https://github.com/yashika51), [Kristi Takach](https://github.com/ktakattack), [Falak Shah](https://github.com/falaktheoptimist) Date created: 2020/06/23 Last modified: 2023/11/22 Description: This notebook demonstrates how to do timeseries forecasting using a LSTM model. Accelerator: GPU """ """ ## Setup """ import pandas as pd import matplotlib.pyplot as plt import keras """ ## Climate Data Time-Series We will be using Jena Climate dataset recorded by the [Max Planck Institute for Biogeochemistry](https://www.bgc-jena.mpg.de/wetter/). The dataset consists of 14 features such as temperature, pressure, humidity etc, recorded once per 10 minutes. **Location**: Weather Station, Max Planck Institute for Biogeochemistry in Jena, Germany **Time-frame Considered**: Jan 10, 2009 - December 31, 2016 The table below shows the column names, their value formats, and their description. Index| Features |Format |Description -----|---------------|-------------------|----------------------- 1 |Date Time |01.01.2009 00:10:00|Date-time reference 2 |p (mbar) |996.52 |The pascal SI derived unit of pressure used to quantify internal pressure. Meteorological reports typically state atmospheric pressure in millibars. 3 |T (degC) |-8.02 |Temperature in Celsius 4 |Tpot (K) |265.4 |Temperature in Kelvin 5 |Tdew (degC) |-8.9 |Temperature in Celsius relative to humidity. Dew Point is a measure of the absolute amount of water in the air, the DP is the temperature at which the air cannot hold all the moisture in it and water condenses. 6 |rh (%) |93.3 |Relative Humidity is a measure of how saturated the air is with water vapor, the %RH determines the amount of water contained within collection objects. 7 |VPmax (mbar) |3.33 |Saturation vapor pressure 8 |VPact (mbar) |3.11 |Vapor pressure 9 |VPdef (mbar) |0.22 |Vapor pressure deficit 10 |sh (g/kg) |1.94 |Specific humidity 11 |H2OC (mmol/mol)|3.12 |Water vapor concentration 12 |rho (g/m ** 3) |1307.75 |Airtight 13 |wv (m/s) |1.03 |Wind speed 14 |max. wv (m/s) |1.75 |Maximum wind speed 15 |wd (deg) |152.3 |Wind direction in degrees """ from zipfile import ZipFile uri = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip" zip_path = keras.utils.get_file(origin=uri, fname="jena_climate_2009_2016.csv.zip") zip_file = ZipFile(zip_path) zip_file.extractall() csv_path = "jena_climate_2009_2016.csv" df = pd.read_csv(csv_path) """ ## Raw Data Visualization To give us a sense of the data we are working with, each feature has been plotted below. This shows the distinct pattern of each feature over the time period from 2009 to 2016. It also shows where anomalies are present, which will be addressed during normalization. """ titles = [ "Pressure", "Temperature", "Temperature in Kelvin", "Temperature (dew point)", "Relative Humidity", "Saturation vapor pressure", "Vapor pressure", "Vapor pressure deficit", "Specific humidity", "Water vapor concentration", "Airtight", "Wind speed", "Maximum wind speed", "Wind direction in degrees", ] feature_keys = [ "p (mbar)", "T (degC)", "Tpot (K)", "Tdew (degC)", "rh (%)", "VPmax (mbar)", "VPact (mbar)", "VPdef (mbar)", "sh (g/kg)", "H2OC (mmol/mol)", "rho (g/m**3)", "wv (m/s)", "max. wv (m/s)", "wd (deg)", ] colors = [ "blue", "orange", "green", "red", "purple", "brown", "pink", "gray", "olive", "cyan", ] date_time_key = "Date Time" def show_raw_visualization(data): time_data = data[date_time_key] fig, axes = plt.subplots( nrows=7, ncols=2, figsize=(15, 20), dpi=80, facecolor="w", edgecolor="k" ) for i in range(len(feature_keys)): key = feature_keys[i] c = colors[i % (len(colors))] t_data = data[key] t_data.index = time_data t_data.head() ax = t_data.plot( ax=axes[i // 2, i % 2], color=c, title="{} - {}".format(titles[i], key), rot=25, ) ax.legend([titles[i]]) plt.tight_layout() show_raw_visualization(df) """ ## Data Preprocessing Here we are picking ~300,000 data points for training. Observation is recorded every 10 mins, that means 6 times per hour. We will resample one point per hour since no drastic change is expected within 60 minutes. We do this via the `sampling_rate` argument in `timeseries_dataset_from_array` utility. We are tracking data from past 720 timestamps (720/6=120 hours). This data will be used to predict the temperature after 72 timestamps (72/6=12 hours). Since every feature has values with varying ranges, we do normalization to confine feature values to a range of `[0, 1]` before training a neural network. We do this by subtracting the mean and dividing by the standard deviation of each feature. 71.5 % of the data will be used to train the model, i.e. 300,693 rows. `split_fraction` can be changed to alter this percentage. The model is shown data for first 5 days i.e. 720 observations, that are sampled every hour. The temperature after 72 (12 hours * 6 observation per hour) observation will be used as a label. """ split_fraction = 0.715 train_split = int(split_fraction * int(df.shape[0])) step = 6 past = 720 future = 72 learning_rate = 0.001 batch_size = 256 epochs = 10 def normalize(data, train_split): data_mean = data[:train_split].mean(axis=0) data_std = data[:train_split].std(axis=0) return (data - data_mean) / data_std """ We can see from the correlation heatmap, few parameters like Relative Humidity and Specific Humidity are redundant. Hence we will be using select features, not all. """ print( "The selected parameters are:", ", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]), ) selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]] features = df[selected_features] features.index = df[date_time_key] features.head() features = normalize(features.values, train_split) features = pd.DataFrame(features) features.head() train_data = features.loc[0 : train_split - 1] val_data = features.loc[train_split:] """ # Training dataset The training dataset labels starts from the 792nd observation (720 + 72). """ start = past + future end = start + train_split x_train = train_data[[i for i in range(7)]].values y_train = features.iloc[start:end][[1]] sequence_length = int(past / step) """ The `timeseries_dataset_from_array` function takes in a sequence of data-points gathered at equal intervals, along with time series parameters such as length of the sequences/windows, spacing between two sequence/windows, etc., to produce batches of sub-timeseries inputs and targets sampled from the main timeseries. """ dataset_train = keras.preprocessing.timeseries_dataset_from_array( x_train, y_train, sequence_length=sequence_length, sampling_rate=step, batch_size=batch_size, ) """ ## Validation dataset The validation dataset must not contain the last 792 rows as we won't have label data for those records, hence 792 must be subtracted from the end of the data. The validation label dataset must start from 792 after train_split, hence we must add past + future (792) to label_start. """ x_end = len(val_data) - past - future label_start = train_split + past + future x_val = val_data.iloc[:x_end][[i for i in range(7)]].values y_val = features.iloc[label_start:][[1]] dataset_val = keras.preprocessing.timeseries_dataset_from_array( x_val, y_val, sequence_length=sequence_length, sampling_rate=step, batch_size=batch_size, ) for batch in dataset_train.take(1): inputs, targets = batch print("Input shape:", inputs.numpy().shape) print("Target shape:", targets.numpy().shape) """ ## Training """ inputs = keras.layers.Input(shape=(inputs.shape[1], inputs.shape[2])) lstm_out = keras.layers.LSTM(32)(inputs) outputs = keras.layers.Dense(1)(lstm_out) model = keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss="mse") model.summary() """ We'll use the `ModelCheckpoint` callback to regularly save checkpoints, and the `EarlyStopping` callback to interrupt training when the validation loss is not longer improving. """ path_checkpoint = "model_checkpoint.weights.h5" es_callback = keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=5) modelckpt_callback = keras.callbacks.ModelCheckpoint( monitor="val_loss", filepath=path_checkpoint, verbose=1, save_weights_only=True, save_best_only=True, ) history = model.fit( dataset_train, epochs=epochs, validation_data=dataset_val, callbacks=[es_callback, modelckpt_callback], ) """ We can visualize the loss with the function below. After one point, the loss stops decreasing. """ def visualize_loss(history, title): loss = history.history["loss"] val_loss = history.history["val_loss"] epochs = range(len(loss)) plt.figure() plt.plot(epochs, loss, "b", label="Training loss") plt.plot(epochs, val_loss, "r", label="Validation loss") plt.title(title) plt.xlabel("Epochs") plt.ylabel("Loss") plt.legend() plt.show() visualize_loss(history, "Training and Validation Loss") """ ## Prediction The trained model above is now able to make predictions for 5 sets of values from validation set. """ def show_plot(plot_data, delta, title): labels = ["History", "True Future", "Model Prediction"] marker = [".-", "rx", "go"] time_steps = list(range(-(plot_data[0].shape[0]), 0)) if delta: future = delta else: future = 0 plt.title(title) for i, val in enumerate(plot_data): if i: plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i]) else: plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i]) plt.legend() plt.xlim([time_steps[0], (future + 5) * 2]) plt.xlabel("Time-Step") plt.show() return for x, y in dataset_val.take(5): show_plot( [x[0][:, 1].numpy(), y[0].numpy(), model.predict(x)[0]], 12, "Single Step Prediction", )
keras-io/examples/timeseries/timeseries_weather_forecasting.py/0
{ "file_path": "keras-io/examples/timeseries/timeseries_weather_forecasting.py", "repo_id": "keras-io", "token_count": 4061 }
117
<jupyter_start><jupyter_text>Consistency training with supervision**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/04/13**Last modified:** 2021/04/19**Description:** Training with consistency regularization for robustness against data distribution shifts. Deep learning models excel in many image recognition tasks when the data is independentand identically distributed (i.i.d.). However, they can suffer from performancedegradation caused by subtle distribution shifts in the input data (such as randomnoise, contrast change, and blurring). So, naturally, there arises a question ofwhy. As discussed in [A Fourier Perspective on Model Robustness in Computer Vision](https://arxiv.org/pdf/1906.08988.pdf)),there's no reason for deep learning models to be robust against such shifts. Standardmodel training procedures (such as standard image classification training workflows)*don't* enable a model to learn beyond what's fed to it in the form of training data.In this example, we will be training an image classification model enforcing a sense of*consistency* inside it by doing the following:* Train a standard image classification model.* Train an _equal or larger_ model on a noisy version of the dataset (augmented using[RandAugment](https://arxiv.org/abs/1909.13719)).* To do this, we will first obtain predictions of the previous model on the clean imagesof the dataset.* We will then use these predictions and train the second model to match thesepredictions on the noisy variant of the same images. This is identical to the workflow of[*Knowledge Distillation*](https://keras.io/examples/vision/knowledge_distillation/) butsince the student model is equal or larger in size this process is also referred to as***Self-Training***.This overall training workflow finds its roots in works like[FixMatch](https://arxiv.org/abs/2001.07685), [Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848),and [Noisy Student Training](https://arxiv.org/abs/1911.04252). Since this trainingprocess encourages a model yield consistent predictions for clean as well as noisyimages, it's often referred to as *consistency training* or *training with consistencyregularization*. Although the example focuses on using consistency training to enhancethe robustness of models to common corruptions this example can also serve a templatefor performing _weakly supervised learning_.This example requires TensorFlow 2.4 or higher, as well as TensorFlow Hub and TensorFlowModels, which can be installed using the following command:<jupyter_code>!pip install -q tf-models-official tensorflow-addons<jupyter_output><empty_output><jupyter_text>Imports and setup<jupyter_code>from official.vision.image_classification.augment import RandAugment from tensorflow.keras import layers import tensorflow as tf import tensorflow_addons as tfa import matplotlib.pyplot as plt tf.random.set_seed(42)<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>AUTO = tf.data.AUTOTUNE BATCH_SIZE = 128 EPOCHS = 5 CROP_TO = 72 RESIZE_TO = 96<jupyter_output><empty_output><jupyter_text>Load the CIFAR-10 dataset<jupyter_code>(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() val_samples = 49500 new_train_x, new_y_train = x_train[: val_samples + 1], y_train[: val_samples + 1] val_x, val_y = x_train[val_samples:], y_train[val_samples:]<jupyter_output><empty_output><jupyter_text>Create TensorFlow `Dataset` objects<jupyter_code># Initialize `RandAugment` object with 2 layers of # augmentation transforms and strength of 9. augmenter = RandAugment(num_layers=2, magnitude=9)<jupyter_output><empty_output><jupyter_text>For training the teacher model, we will only be using two geometric augmentationtransforms: random horizontal flip and random crop.<jupyter_code>def preprocess_train(image, label, noisy=True): image = tf.image.random_flip_left_right(image) # We first resize the original image to a larger dimension # and then we take random crops from it. image = tf.image.resize(image, [RESIZE_TO, RESIZE_TO]) image = tf.image.random_crop(image, [CROP_TO, CROP_TO, 3]) if noisy: image = augmenter.distort(image) return image, label def preprocess_test(image, label): image = tf.image.resize(image, [CROP_TO, CROP_TO]) return image, label train_ds = tf.data.Dataset.from_tensor_slices((new_train_x, new_y_train)) validation_ds = tf.data.Dataset.from_tensor_slices((val_x, val_y)) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))<jupyter_output><empty_output><jupyter_text>We make sure `train_clean_ds` and `train_noisy_ds` are shuffled using the *same* seed toensure their orders are exactly the same. This will be helpful during training thestudent model.<jupyter_code># This dataset will be used to train the first model. train_clean_ds = ( train_ds.shuffle(BATCH_SIZE * 10, seed=42) .map(lambda x, y: (preprocess_train(x, y, noisy=False)), num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) # This prepares the `Dataset` object to use RandAugment. train_noisy_ds = ( train_ds.shuffle(BATCH_SIZE * 10, seed=42) .map(preprocess_train, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) validation_ds = ( validation_ds.map(preprocess_test, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) test_ds = ( test_ds.map(preprocess_test, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) # This dataset will be used to train the second model. consistency_training_ds = tf.data.Dataset.zip((train_clean_ds, train_noisy_ds))<jupyter_output><empty_output><jupyter_text>Visualize the datasets<jupyter_code>sample_images, sample_labels = next(iter(train_clean_ds)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off") sample_images, sample_labels = next(iter(train_noisy_ds)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off")<jupyter_output><empty_output><jupyter_text>Define a model building utility functionWe now define our model building utility. Our model is based on the [ResNet50V2 architecture](https://arxiv.org/abs/1603.05027).<jupyter_code>def get_training_model(num_classes=10): resnet50_v2 = tf.keras.applications.ResNet50V2( weights=None, include_top=False, input_shape=(CROP_TO, CROP_TO, 3), ) model = tf.keras.Sequential( [ layers.Input((CROP_TO, CROP_TO, 3)), layers.Rescaling(scale=1.0 / 127.5, offset=-1), resnet50_v2, layers.GlobalAveragePooling2D(), layers.Dense(num_classes), ] ) return model<jupyter_output><empty_output><jupyter_text>In the interest of reproducibility, we serialize the initial random weights of theteacher network.<jupyter_code>initial_teacher_model = get_training_model() initial_teacher_model.save_weights("initial_teacher_model.h5")<jupyter_output><empty_output><jupyter_text>Train the teacher modelAs noted in Noisy Student Training, if the teacher model is trained with *geometricensembling* and when the student model is forced to mimic that, it leads to betterperformance. The original work uses [Stochastic Depth](https://arxiv.org/abs/1603.09382)and [Dropout](https://jmlr.org/papers/v15/srivastava14a.html) to bring in the ensemblingpart but for this example, we will use [Stochastic Weight Averaging](https://arxiv.org/abs/1803.05407)(SWA) which also resembles geometric ensembling.<jupyter_code># Define the callbacks. reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(patience=3) early_stopping = tf.keras.callbacks.EarlyStopping( patience=10, restore_best_weights=True ) # Initialize SWA from tf-hub. SWA = tfa.optimizers.SWA # Compile and train the teacher model. teacher_model = get_training_model() teacher_model.load_weights("initial_teacher_model.h5") teacher_model.compile( # Notice that we are wrapping our optimizer within SWA optimizer=SWA(tf.keras.optimizers.Adam()), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) history = teacher_model.fit( train_clean_ds, epochs=EPOCHS, validation_data=validation_ds, callbacks=[reduce_lr, early_stopping], ) # Evaluate the teacher model on the test set. _, acc = teacher_model.evaluate(test_ds, verbose=0) print(f"Test accuracy: {acc*100}%")<jupyter_output><empty_output><jupyter_text>Define a self-training utilityFor this part, we will borrow the `Distiller` class from [this Keras Example](https://keras.io/examples/vision/knowledge_distillation/).<jupyter_code># Majority of the code is taken from: # https://keras.io/examples/vision/knowledge_distillation/ class SelfTrainer(tf.keras.Model): def __init__(self, student, teacher): super().__init__() self.student = student self.teacher = teacher def compile( self, optimizer, metrics, student_loss_fn, distillation_loss_fn, temperature=3, ): super().compile(optimizer=optimizer, metrics=metrics) self.student_loss_fn = student_loss_fn self.distillation_loss_fn = distillation_loss_fn self.temperature = temperature def train_step(self, data): # Since our dataset is a zip of two independent datasets, # after initially parsing them, we segregate the # respective images and labels next. clean_ds, noisy_ds = data clean_images, _ = clean_ds noisy_images, y = noisy_ds # Forward pass of teacher teacher_predictions = self.teacher(clean_images, training=False) with tf.GradientTape() as tape: # Forward pass of student student_predictions = self.student(noisy_images, training=True) # Compute losses student_loss = self.student_loss_fn(y, student_predictions) distillation_loss = self.distillation_loss_fn( tf.nn.softmax(teacher_predictions / self.temperature, axis=1), tf.nn.softmax(student_predictions / self.temperature, axis=1), ) total_loss = (student_loss + distillation_loss) / 2 # Compute gradients trainable_vars = self.student.trainable_variables gradients = tape.gradient(total_loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Update the metrics configured in `compile()` self.compiled_metrics.update_state( y, tf.nn.softmax(student_predictions, axis=1) ) # Return a dict of performance results = {m.name: m.result() for m in self.metrics} results.update({"total_loss": total_loss}) return results def test_step(self, data): # During inference, we only pass a dataset consisting images and labels. x, y = data # Compute predictions y_prediction = self.student(x, training=False) # Update the metrics self.compiled_metrics.update_state(y, tf.nn.softmax(y_prediction, axis=1)) # Return a dict of performance results = {m.name: m.result() for m in self.metrics} return results<jupyter_output><empty_output><jupyter_text>The only difference in this implementation is the way loss is being calculated. **Insteadof weighted the distillation loss and student loss differently we are taking theiraverage following Noisy Student Training**. Train the student model<jupyter_code># Define the callbacks. # We are using a larger decay factor to stabilize the training. reduce_lr = tf.keras.callbacks.ReduceLROnPlateau( patience=3, factor=0.5, monitor="val_accuracy" ) early_stopping = tf.keras.callbacks.EarlyStopping( patience=10, restore_best_weights=True, monitor="val_accuracy" ) # Compile and train the student model. self_trainer = SelfTrainer(student=get_training_model(), teacher=teacher_model) self_trainer.compile( # Notice we are *not* using SWA here. optimizer="adam", metrics=["accuracy"], student_loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), distillation_loss_fn=tf.keras.losses.KLDivergence(), temperature=10, ) history = self_trainer.fit( consistency_training_ds, epochs=EPOCHS, validation_data=validation_ds, callbacks=[reduce_lr, early_stopping], ) # Evaluate the student model. acc = self_trainer.evaluate(test_ds, verbose=0) print(f"Test accuracy from student model: {acc*100}%")<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/consistency_training.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/consistency_training.ipynb", "repo_id": "keras-io", "token_count": 4540 }
118
<jupyter_start><jupyter_text>Image Captioning**Author:** [A_K_Nain](https://twitter.com/A_K_Nain)**Date created:** 2021/05/29**Last modified:** 2021/10/31**Description:** Implement an image captioning model using a CNN and a Transformer. Setup<jupyter_code>import os os.environ["KERAS_BACKEND"] = "tensorflow" import re import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import keras from keras import layers from keras.applications import efficientnet from keras.layers import TextVectorization keras.utils.set_random_seed(111)<jupyter_output><empty_output><jupyter_text>Download the datasetWe will be using the Flickr8K dataset for this tutorial. This dataset comprises over8,000 images, that are each paired with five different captions.<jupyter_code>!wget -q https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_Dataset.zip !wget -q https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_text.zip !unzip -qq Flickr8k_Dataset.zip !unzip -qq Flickr8k_text.zip !rm Flickr8k_Dataset.zip Flickr8k_text.zip # Path to the images IMAGES_PATH = "Flicker8k_Dataset" # Desired image dimensions IMAGE_SIZE = (299, 299) # Vocabulary size VOCAB_SIZE = 10000 # Fixed length allowed for any sequence SEQ_LENGTH = 25 # Dimension for the image embeddings and token embeddings EMBED_DIM = 512 # Per-layer units in the feed-forward network FF_DIM = 512 # Other training parameters BATCH_SIZE = 64 EPOCHS = 30 AUTOTUNE = tf.data.AUTOTUNE<jupyter_output><empty_output><jupyter_text>Preparing the dataset<jupyter_code>def load_captions_data(filename): """Loads captions (text) data and maps them to corresponding images. Args: filename: Path to the text file containing caption data. Returns: caption_mapping: Dictionary mapping image names and the corresponding captions text_data: List containing all the available captions """ with open(filename) as caption_file: caption_data = caption_file.readlines() caption_mapping = {} text_data = [] images_to_skip = set() for line in caption_data: line = line.rstrip("\n") # Image name and captions are separated using a tab img_name, caption = line.split("\t") # Each image is repeated five times for the five different captions. # Each image name has a suffix `#(caption_number)` img_name = img_name.split("#")[0] img_name = os.path.join(IMAGES_PATH, img_name.strip()) # We will remove caption that are either too short to too long tokens = caption.strip().split() if len(tokens) < 5 or len(tokens) > SEQ_LENGTH: images_to_skip.add(img_name) continue if img_name.endswith("jpg") and img_name not in images_to_skip: # We will add a start and an end token to each caption caption = "<start> " + caption.strip() + " <end>" text_data.append(caption) if img_name in caption_mapping: caption_mapping[img_name].append(caption) else: caption_mapping[img_name] = [caption] for img_name in images_to_skip: if img_name in caption_mapping: del caption_mapping[img_name] return caption_mapping, text_data def train_val_split(caption_data, train_size=0.8, shuffle=True): """Split the captioning dataset into train and validation sets. Args: caption_data (dict): Dictionary containing the mapped caption data train_size (float): Fraction of all the full dataset to use as training data shuffle (bool): Whether to shuffle the dataset before splitting Returns: Traning and validation datasets as two separated dicts """ # 1. Get the list of all image names all_images = list(caption_data.keys()) # 2. Shuffle if necessary if shuffle: np.random.shuffle(all_images) # 3. Split into training and validation sets train_size = int(len(caption_data) * train_size) training_data = { img_name: caption_data[img_name] for img_name in all_images[:train_size] } validation_data = { img_name: caption_data[img_name] for img_name in all_images[train_size:] } # 4. Return the splits return training_data, validation_data # Load the dataset captions_mapping, text_data = load_captions_data("Flickr8k.token.txt") # Split the dataset into training and validation sets train_data, valid_data = train_val_split(captions_mapping) print("Number of training samples: ", len(train_data)) print("Number of validation samples: ", len(valid_data))<jupyter_output><empty_output><jupyter_text>Vectorizing the text dataWe'll use the `TextVectorization` layer to vectorize the text data,that is to say, to turn theoriginal strings into integer sequences where each integer represents the index ofa word in a vocabulary. We will use a custom string standardization scheme(strip punctuation characters except ``) and the defaultsplitting scheme (split on whitespace).<jupyter_code>def custom_standardization(input_string): lowercase = tf.strings.lower(input_string) return tf.strings.regex_replace(lowercase, "[%s]" % re.escape(strip_chars), "") strip_chars = "!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~" strip_chars = strip_chars.replace("<", "") strip_chars = strip_chars.replace(">", "") vectorization = TextVectorization( max_tokens=VOCAB_SIZE, output_mode="int", output_sequence_length=SEQ_LENGTH, standardize=custom_standardization, ) vectorization.adapt(text_data) # Data augmentation for image data image_augmentation = keras.Sequential( [ layers.RandomFlip("horizontal"), layers.RandomRotation(0.2), layers.RandomContrast(0.3), ] )<jupyter_output><empty_output><jupyter_text>Building a `tf.data.Dataset` pipeline for trainingWe will generate pairs of images and corresponding captions using a `tf.data.Dataset` object.The pipeline consists of two steps:1. Read the image from the disk2. Tokenize all the five captions corresponding to the image<jupyter_code>def decode_and_resize(img_path): img = tf.io.read_file(img_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, IMAGE_SIZE) img = tf.image.convert_image_dtype(img, tf.float32) return img def process_input(img_path, captions): return decode_and_resize(img_path), vectorization(captions) def make_dataset(images, captions): dataset = tf.data.Dataset.from_tensor_slices((images, captions)) dataset = dataset.shuffle(BATCH_SIZE * 8) dataset = dataset.map(process_input, num_parallel_calls=AUTOTUNE) dataset = dataset.batch(BATCH_SIZE).prefetch(AUTOTUNE) return dataset # Pass the list of images and the list of corresponding captions train_dataset = make_dataset(list(train_data.keys()), list(train_data.values())) valid_dataset = make_dataset(list(valid_data.keys()), list(valid_data.values()))<jupyter_output><empty_output><jupyter_text>Building the modelOur image captioning architecture consists of three models:1. A CNN: used to extract the image features2. A TransformerEncoder: The extracted image features are then passed to a Transformer based encoder that generates a new representation of the inputs3. A TransformerDecoder: This model takes the encoder output and the text data (sequences) as inputs and tries to learn to generate the caption.<jupyter_code>def get_cnn_model(): base_model = efficientnet.EfficientNetB0( input_shape=(*IMAGE_SIZE, 3), include_top=False, weights="imagenet", ) # We freeze our feature extractor base_model.trainable = False base_model_out = base_model.output base_model_out = layers.Reshape((-1, base_model_out.shape[-1]))(base_model_out) cnn_model = keras.models.Model(base_model.input, base_model_out) return cnn_model class TransformerEncoderBlock(layers.Layer): def __init__(self, embed_dim, dense_dim, num_heads, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.dense_dim = dense_dim self.num_heads = num_heads self.attention_1 = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim, dropout=0.0 ) self.layernorm_1 = layers.LayerNormalization() self.layernorm_2 = layers.LayerNormalization() self.dense_1 = layers.Dense(embed_dim, activation="relu") def call(self, inputs, training, mask=None): inputs = self.layernorm_1(inputs) inputs = self.dense_1(inputs) attention_output_1 = self.attention_1( query=inputs, value=inputs, key=inputs, attention_mask=None, training=training, ) out_1 = self.layernorm_2(inputs + attention_output_1) return out_1 class PositionalEmbedding(layers.Layer): def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs): super().__init__(**kwargs) self.token_embeddings = layers.Embedding( input_dim=vocab_size, output_dim=embed_dim ) self.position_embeddings = layers.Embedding( input_dim=sequence_length, output_dim=embed_dim ) self.sequence_length = sequence_length self.vocab_size = vocab_size self.embed_dim = embed_dim self.embed_scale = tf.math.sqrt(tf.cast(embed_dim, tf.float32)) def call(self, inputs): length = tf.shape(inputs)[-1] positions = tf.range(start=0, limit=length, delta=1) embedded_tokens = self.token_embeddings(inputs) embedded_tokens = embedded_tokens * self.embed_scale embedded_positions = self.position_embeddings(positions) return embedded_tokens + embedded_positions def compute_mask(self, inputs, mask=None): return tf.math.not_equal(inputs, 0) class TransformerDecoderBlock(layers.Layer): def __init__(self, embed_dim, ff_dim, num_heads, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.ff_dim = ff_dim self.num_heads = num_heads self.attention_1 = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim, dropout=0.1 ) self.attention_2 = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim, dropout=0.1 ) self.ffn_layer_1 = layers.Dense(ff_dim, activation="relu") self.ffn_layer_2 = layers.Dense(embed_dim) self.layernorm_1 = layers.LayerNormalization() self.layernorm_2 = layers.LayerNormalization() self.layernorm_3 = layers.LayerNormalization() self.embedding = PositionalEmbedding( embed_dim=EMBED_DIM, sequence_length=SEQ_LENGTH, vocab_size=VOCAB_SIZE, ) self.out = layers.Dense(VOCAB_SIZE, activation="softmax") self.dropout_1 = layers.Dropout(0.3) self.dropout_2 = layers.Dropout(0.5) self.supports_masking = True def call(self, inputs, encoder_outputs, training, mask=None): inputs = self.embedding(inputs) causal_mask = self.get_causal_attention_mask(inputs) if mask is not None: padding_mask = tf.cast(mask[:, :, tf.newaxis], dtype=tf.int32) combined_mask = tf.cast(mask[:, tf.newaxis, :], dtype=tf.int32) combined_mask = tf.minimum(combined_mask, causal_mask) attention_output_1 = self.attention_1( query=inputs, value=inputs, key=inputs, attention_mask=combined_mask, training=training, ) out_1 = self.layernorm_1(inputs + attention_output_1) attention_output_2 = self.attention_2( query=out_1, value=encoder_outputs, key=encoder_outputs, attention_mask=padding_mask, training=training, ) out_2 = self.layernorm_2(out_1 + attention_output_2) ffn_out = self.ffn_layer_1(out_2) ffn_out = self.dropout_1(ffn_out, training=training) ffn_out = self.ffn_layer_2(ffn_out) ffn_out = self.layernorm_3(ffn_out + out_2, training=training) ffn_out = self.dropout_2(ffn_out, training=training) preds = self.out(ffn_out) return preds def get_causal_attention_mask(self, inputs): input_shape = tf.shape(inputs) batch_size, sequence_length = input_shape[0], input_shape[1] i = tf.range(sequence_length)[:, tf.newaxis] j = tf.range(sequence_length) mask = tf.cast(i >= j, dtype="int32") mask = tf.reshape(mask, (1, input_shape[1], input_shape[1])) mult = tf.concat( [ tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32), ], axis=0, ) return tf.tile(mask, mult) class ImageCaptioningModel(keras.Model): def __init__( self, cnn_model, encoder, decoder, num_captions_per_image=5, image_aug=None, ): super().__init__() self.cnn_model = cnn_model self.encoder = encoder self.decoder = decoder self.loss_tracker = keras.metrics.Mean(name="loss") self.acc_tracker = keras.metrics.Mean(name="accuracy") self.num_captions_per_image = num_captions_per_image self.image_aug = image_aug def calculate_loss(self, y_true, y_pred, mask): loss = self.loss(y_true, y_pred) mask = tf.cast(mask, dtype=loss.dtype) loss *= mask return tf.reduce_sum(loss) / tf.reduce_sum(mask) def calculate_accuracy(self, y_true, y_pred, mask): accuracy = tf.equal(y_true, tf.argmax(y_pred, axis=2)) accuracy = tf.math.logical_and(mask, accuracy) accuracy = tf.cast(accuracy, dtype=tf.float32) mask = tf.cast(mask, dtype=tf.float32) return tf.reduce_sum(accuracy) / tf.reduce_sum(mask) def _compute_caption_loss_and_acc(self, img_embed, batch_seq, training=True): encoder_out = self.encoder(img_embed, training=training) batch_seq_inp = batch_seq[:, :-1] batch_seq_true = batch_seq[:, 1:] mask = tf.math.not_equal(batch_seq_true, 0) batch_seq_pred = self.decoder( batch_seq_inp, encoder_out, training=training, mask=mask ) loss = self.calculate_loss(batch_seq_true, batch_seq_pred, mask) acc = self.calculate_accuracy(batch_seq_true, batch_seq_pred, mask) return loss, acc def train_step(self, batch_data): batch_img, batch_seq = batch_data batch_loss = 0 batch_acc = 0 if self.image_aug: batch_img = self.image_aug(batch_img) # 1. Get image embeddings img_embed = self.cnn_model(batch_img) # 2. Pass each of the five captions one by one to the decoder # along with the encoder outputs and compute the loss as well as accuracy # for each caption. for i in range(self.num_captions_per_image): with tf.GradientTape() as tape: loss, acc = self._compute_caption_loss_and_acc( img_embed, batch_seq[:, i, :], training=True ) # 3. Update loss and accuracy batch_loss += loss batch_acc += acc # 4. Get the list of all the trainable weights train_vars = ( self.encoder.trainable_variables + self.decoder.trainable_variables ) # 5. Get the gradients grads = tape.gradient(loss, train_vars) # 6. Update the trainable weights self.optimizer.apply_gradients(zip(grads, train_vars)) # 7. Update the trackers batch_acc /= float(self.num_captions_per_image) self.loss_tracker.update_state(batch_loss) self.acc_tracker.update_state(batch_acc) # 8. Return the loss and accuracy values return { "loss": self.loss_tracker.result(), "acc": self.acc_tracker.result(), } def test_step(self, batch_data): batch_img, batch_seq = batch_data batch_loss = 0 batch_acc = 0 # 1. Get image embeddings img_embed = self.cnn_model(batch_img) # 2. Pass each of the five captions one by one to the decoder # along with the encoder outputs and compute the loss as well as accuracy # for each caption. for i in range(self.num_captions_per_image): loss, acc = self._compute_caption_loss_and_acc( img_embed, batch_seq[:, i, :], training=False ) # 3. Update batch loss and batch accuracy batch_loss += loss batch_acc += acc batch_acc /= float(self.num_captions_per_image) # 4. Update the trackers self.loss_tracker.update_state(batch_loss) self.acc_tracker.update_state(batch_acc) # 5. Return the loss and accuracy values return { "loss": self.loss_tracker.result(), "acc": self.acc_tracker.result(), } @property def metrics(self): # We need to list our metrics here so the `reset_states()` can be # called automatically. return [self.loss_tracker, self.acc_tracker] cnn_model = get_cnn_model() encoder = TransformerEncoderBlock(embed_dim=EMBED_DIM, dense_dim=FF_DIM, num_heads=1) decoder = TransformerDecoderBlock(embed_dim=EMBED_DIM, ff_dim=FF_DIM, num_heads=2) caption_model = ImageCaptioningModel( cnn_model=cnn_model, encoder=encoder, decoder=decoder, image_aug=image_augmentation, )<jupyter_output><empty_output><jupyter_text>Model training<jupyter_code># Define the loss function cross_entropy = keras.losses.SparseCategoricalCrossentropy( from_logits=False, reduction=None, ) # EarlyStopping criteria early_stopping = keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True) # Learning Rate Scheduler for the optimizer class LRSchedule(keras.optimizers.schedules.LearningRateSchedule): def __init__(self, post_warmup_learning_rate, warmup_steps): super().__init__() self.post_warmup_learning_rate = post_warmup_learning_rate self.warmup_steps = warmup_steps def __call__(self, step): global_step = tf.cast(step, tf.float32) warmup_steps = tf.cast(self.warmup_steps, tf.float32) warmup_progress = global_step / warmup_steps warmup_learning_rate = self.post_warmup_learning_rate * warmup_progress return tf.cond( global_step < warmup_steps, lambda: warmup_learning_rate, lambda: self.post_warmup_learning_rate, ) # Create a learning rate schedule num_train_steps = len(train_dataset) * EPOCHS num_warmup_steps = num_train_steps // 15 lr_schedule = LRSchedule(post_warmup_learning_rate=1e-4, warmup_steps=num_warmup_steps) # Compile the model caption_model.compile(optimizer=keras.optimizers.Adam(lr_schedule), loss=cross_entropy) # Fit the model caption_model.fit( train_dataset, epochs=EPOCHS, validation_data=valid_dataset, callbacks=[early_stopping], )<jupyter_output><empty_output><jupyter_text>Check sample predictions<jupyter_code>vocab = vectorization.get_vocabulary() index_lookup = dict(zip(range(len(vocab)), vocab)) max_decoded_sentence_length = SEQ_LENGTH - 1 valid_images = list(valid_data.keys()) def generate_caption(): # Select a random image from the validation dataset sample_img = np.random.choice(valid_images) # Read the image from the disk sample_img = decode_and_resize(sample_img) img = sample_img.numpy().clip(0, 255).astype(np.uint8) plt.imshow(img) plt.show() # Pass the image to the CNN img = tf.expand_dims(sample_img, 0) img = caption_model.cnn_model(img) # Pass the image features to the Transformer encoder encoded_img = caption_model.encoder(img, training=False) # Generate the caption using the Transformer decoder decoded_caption = "<start> " for i in range(max_decoded_sentence_length): tokenized_caption = vectorization([decoded_caption])[:, :-1] mask = tf.math.not_equal(tokenized_caption, 0) predictions = caption_model.decoder( tokenized_caption, encoded_img, training=False, mask=mask ) sampled_token_index = np.argmax(predictions[0, i, :]) sampled_token = index_lookup[sampled_token_index] if sampled_token == "<end>": break decoded_caption += " " + sampled_token decoded_caption = decoded_caption.replace("<start> ", "") decoded_caption = decoded_caption.replace(" <end>", "").strip() print("Predicted Caption: ", decoded_caption) # Check predictions for a few samples generate_caption() generate_caption() generate_caption()<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/image_captioning.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/image_captioning.ipynb", "repo_id": "keras-io", "token_count": 8984 }
119
<jupyter_start><jupyter_text>Simple MNIST convnet**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2015/06/19**Last modified:** 2020/04/21**Description:** A simple convnet that achieves ~99% test accuracy on MNIST. Setup<jupyter_code>import numpy as np import keras from keras import layers<jupyter_output><empty_output><jupyter_text>Prepare the data<jupyter_code># Model / data parameters num_classes = 10 input_shape = (28, 28, 1) # Load the data and split it between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print("x_train shape:", x_train.shape) print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes)<jupyter_output><empty_output><jupyter_text>Build the model<jupyter_code>model = keras.Sequential( [ keras.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dropout(0.5), layers.Dense(num_classes, activation="softmax"), ] ) model.summary()<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>batch_size = 128 epochs = 15 model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)<jupyter_output><empty_output><jupyter_text>Evaluate the trained model<jupyter_code>score = model.evaluate(x_test, y_test, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1])<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/mnist_convnet.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/mnist_convnet.ipynb", "repo_id": "keras-io", "token_count": 841 }
120
<jupyter_start><jupyter_text>Segment Anything Model with 🤗Transformers**Authors:** [Merve Noyan](https://twitter.com/mervenoyann) & [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2023/07/11**Last modified:** 2023/07/11**Description:** Fine-tuning Segment Anything Model using Keras and 🤗 Transformers. IntroductionLarge language models (LLMs) make it easy for the end users to apply them to variousapplications through "prompting". For example if we wanted an LLM to predict thesentiment of the following sentence -- "That movie was amazing, I thoroughly enjoyed it"-- we'd do prompt the LLM with something like:> What's the sentiment of the following sentence: "That movie was amazing, I thoroughlyenjoyed it"?In return, the LLM would return sentiment token.But when it comes to visual recognition tasks, how can we engineer "visual" cues toprompt foundation vision models? For example, we could have an input image and prompt themodel with bounding box on that image and ask it to perform segmentation. The boundingbox would serve as our visual prompt here.In the [Segment Anything Model](https://segment-anything.com/) (dubbed as SAM),researchers from Meta extended the space of language prompting to visual prompting. SAMis capable of performing zero-shot segmentation with a prompt input, inspired by largelanguage models. The prompt here can be a set of foreground/background points, free text,a box or a mask. There are many downstream segmentation tasks, including semanticsegmentation and edge detection. The goal of SAM is to enable all of these downstreamsegmentation tasks through prompting.In this example, we'll learn how to use the SAM model from 🤗 Transformers for performinginference and fine-tuning. Installation<jupyter_code>!!pip install -q git+https://github.com/huggingface/transformers<jupyter_output><empty_output><jupyter_text>Let's import everything we need for this example.<jupyter_code>from tensorflow import keras from transformers import TFSamModel, SamProcessor import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.python.ops.numpy_ops import np_config from PIL import Image import requests import glob import os<jupyter_output><empty_output><jupyter_text>SAM in a few wordsSAM has the following components:||:--:|| Image taken from the official[SAM blog post](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/) || The image encoder is responsible for computing image embeddings. When interacting withSAM, we compute the image embedding one time (as the image encoder is heavy) and thenreuse it with different prompts mentioned above (points, bounding boxes, masks).Points and boxes (so-called sparse prompts) go through a lightweight prompt encoder,while masks (dense prompts) go through a convolutional layer. We couple the imageembedding extracted from the image encoder and the prompt embedding and both go to alightweight mask decoder. The decoder is responsible for predicting the mask.| ||:--:|| Figure taken from the [SAM paper](https://arxiv.org/abs/2304.02643) | SAM was pre-trained to predict a _valid_ mask for any acceptable prompt. This requirementallows SAM to output a valid mask even when the prompt is ambiguous to understand -- thismakes SAM ambiguity-aware. Moreover, SAM predicts multiple masks for a single prompt.We highly encourage you to check out the [SAM paper](https://arxiv.org/abs/2304.02643)and the[blog post](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/)to learn more about the additional details of SAM and the dataset used to pre-trained it. Running inference with SAMThere are three checkpoints for SAM:* [sam-vit-base](https://huggingface.co/facebook/sam-vit-base)* [sam-vit-large](https://huggingface.co/facebook/sam-vit-large)* [sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge).We load `sam-vit-base` in[`TFSamModel`](https://huggingface.co/docs/transformers/main/model_doc/samtransformers.TFSamModel).We also need `SamProcessor`for the associated checkpoint.<jupyter_code>model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base")<jupyter_output><empty_output><jupyter_text>Next, we write some utility functions for visualization. Most of these functions aretaken from[this notebook](https://github.com/huggingface/notebooks/blob/main/examples/segment_anything.ipynb).<jupyter_code>np_config.enable_numpy_behavior() def show_mask(mask, ax, random_color=False): if random_color: color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) else: color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6]) h, w = mask.shape[-2:] mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) ax.imshow(mask_image) def show_box(box, ax): x0, y0 = box[0], box[1] w, h = box[2] - box[0], box[3] - box[1] ax.add_patch( plt.Rectangle((x0, y0), w, h, edgecolor="green", facecolor=(0, 0, 0, 0), lw=2) ) def show_boxes_on_image(raw_image, boxes): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) for box in boxes: show_box(box, plt.gca()) plt.axis("on") plt.show() def show_points_on_image(raw_image, input_points, input_labels=None): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) plt.axis("on") plt.show() def show_points_and_boxes_on_image(raw_image, boxes, input_points, input_labels=None): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) for box in boxes: show_box(box, plt.gca()) plt.axis("on") plt.show() def show_points_and_boxes_on_image(raw_image, boxes, input_points, input_labels=None): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) for box in boxes: show_box(box, plt.gca()) plt.axis("on") plt.show() def show_points(coords, labels, ax, marker_size=375): pos_points = coords[labels == 1] neg_points = coords[labels == 0] ax.scatter( pos_points[:, 0], pos_points[:, 1], color="green", marker="*", s=marker_size, edgecolor="white", linewidth=1.25, ) ax.scatter( neg_points[:, 0], neg_points[:, 1], color="red", marker="*", s=marker_size, edgecolor="white", linewidth=1.25, ) def show_masks_on_image(raw_image, masks, scores): if len(masks[0].shape) == 4: final_masks = tf.squeeze(masks[0]) if scores.shape[0] == 1: final_scores = tf.squeeze(scores) nb_predictions = scores.shape[-1] fig, axes = plt.subplots(1, nb_predictions, figsize=(15, 15)) for i, (mask, score) in enumerate(zip(final_masks, final_scores)): mask = tf.stop_gradient(mask) axes[i].imshow(np.array(raw_image)) show_mask(mask, axes[i]) axes[i].title.set_text(f"Mask {i+1}, Score: {score.numpy().item():.3f}") axes[i].axis("off") plt.show()<jupyter_output><empty_output><jupyter_text>We will segment a car image using a point prompt. Make sure to set `return_tensors` to`tf` when calling the processor.Let's load an image of a car and segment it.<jupyter_code>img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") plt.imshow(raw_image) plt.show()<jupyter_output><empty_output><jupyter_text>Let's now define a set of points we will use as the prompt.<jupyter_code>input_points = [[[450, 600]]] # Visualize a single point. show_points_on_image(raw_image, input_points[0])<jupyter_output><empty_output><jupyter_text>And segment:<jupyter_code># Preprocess the input image. inputs = processor(raw_image, input_points=input_points, return_tensors="tf") # Predict for segmentation with the prompt. outputs = model(**inputs)<jupyter_output><empty_output><jupyter_text>`outputs` has got two attributes of our interest:* `outputs.pred_masks`: which denotes the predicted masks.* `outputs.iou_scores`: which denotes the IoU scores associated with the masks. Let's post-process the masks and visualize them with their IoU scores:<jupyter_code>masks = processor.image_processor.post_process_masks( outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"], return_tensors="tf", ) show_masks_on_image(raw_image, masks, outputs.iou_scores)<jupyter_output><empty_output><jupyter_text>And there we go!As can be noticed, all the masks are _valid_ masks for the point prompt we provided.SAM is flexible enough to support different visual prompts and we encourage you to checkout [thisnotebook](https://github.com/huggingface/notebooks/blob/main/examples/segment_anything.ipynb) to know more about them! Fine-tuningWe'll use [this dataset](https://huggingface.co/datasets/nielsr/breast-cancer) consistingof breast cancer scans. In the medical imaging domain, being able to segment the cellscontaining malignancy is an important task. Data preparationLet's first get the dataset.<jupyter_code>remote_path = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/breast-cancer-dataset.tar.gz" dataset_path = keras.utils.get_file( "breast-cancer-dataset.tar.gz", remote_path, untar=True )<jupyter_output><empty_output><jupyter_text>Let's now visualize a sample from the dataset.*(The `show_mask()` utility is taken from[this notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Fine_tune_SAM_(segment_anything)_on_a_custom_dataset.ipynb))*<jupyter_code>def show_mask(mask, ax, random_color=False): if random_color: color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) else: color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6]) h, w = mask.shape[-2:] mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) ax.imshow(mask_image) # Load all the image and label paths. image_paths = sorted(glob.glob(os.path.join(dataset_path, "images/*.png"))) label_paths = sorted(glob.glob(os.path.join(dataset_path, "labels/*.png"))) # Load the image and label. idx = 15 image = Image.open(image_paths[idx]) label = Image.open(label_paths[idx]) image = np.array(image) ground_truth_seg = np.array(label) # Display. fig, axes = plt.subplots() axes.imshow(image) show_mask(ground_truth_seg, axes) axes.title.set_text(f"Ground truth mask") axes.axis("off") plt.show() tf.shape(ground_truth_seg)<jupyter_output><empty_output><jupyter_text>Preparing `tf.data.Dataset`We now write a generator class to prepare the images and the segmentation masks using the`processor` utilized above. We will leverage this generator class to create a`tf.data.Dataset` object for our training set by using`tf.data.Dataset.from_generator()`. Utilities of this class have been adapted from[this notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Fine_tune_SAM_(segment_anything)_on_a_custom_dataset.ipynb).The generator is responsible for yielding the preprocessed images and the segmentationmasks, and some other metadata needed by the SAM model.<jupyter_code>class Generator: """Generator class for processing the images and the masks for SAM fine-tuning.""" def __init__(self, dataset_path, processor): self.dataset_path = dataset_path self.image_paths = sorted( glob.glob(os.path.join(self.dataset_path, "images/*.png")) ) self.label_paths = sorted( glob.glob(os.path.join(self.dataset_path, "labels/*.png")) ) self.processor = processor def __call__(self): for image_path, label_path in zip(self.image_paths, self.label_paths): image = np.array(Image.open(image_path)) ground_truth_mask = np.array(Image.open(label_path)) # get bounding box prompt prompt = self.get_bounding_box(ground_truth_mask) # prepare image and prompt for the model inputs = self.processor(image, input_boxes=[[prompt]], return_tensors="np") # remove batch dimension which the processor adds by default inputs = {k: v.squeeze(0) for k, v in inputs.items()} # add ground truth segmentation inputs["ground_truth_mask"] = ground_truth_mask yield inputs def get_bounding_box(self, ground_truth_map): # get bounding box from mask y_indices, x_indices = np.where(ground_truth_map > 0) x_min, x_max = np.min(x_indices), np.max(x_indices) y_min, y_max = np.min(y_indices), np.max(y_indices) # add perturbation to bounding box coordinates H, W = ground_truth_map.shape x_min = max(0, x_min - np.random.randint(0, 20)) x_max = min(W, x_max + np.random.randint(0, 20)) y_min = max(0, y_min - np.random.randint(0, 20)) y_max = min(H, y_max + np.random.randint(0, 20)) bbox = [x_min, y_min, x_max, y_max] return bbox<jupyter_output><empty_output><jupyter_text>`get_bounding_box()` is responsible for turning the ground-truth segmentation maps intobounding boxes. These bounding boxes are fed to SAM as prompts (along with the originalimages) during fine-tuning and SAM is then trained to predict valid masks.The advantage of this first creating a generator and then using it to create a`tf.data.Dataset` is the flexbility. Sometimes, we may need to use utitlities from otherlibraries ([`albumentations`](https://albumentations.ai/), for example) which may notcome in native TensorFlow implementations. By using this workflow, we can easilyaccommodate such use case.But the non-TF counterparts might introduce performance bottlenecks, though. However, forour example, it should work just fine.Now, we prepare the `tf.data.Dataset` from our training set.<jupyter_code># Define the output signature of the generator class. output_signature = { "pixel_values": tf.TensorSpec(shape=(3, None, None), dtype=tf.float32), "original_sizes": tf.TensorSpec(shape=(None,), dtype=tf.int64), "reshaped_input_sizes": tf.TensorSpec(shape=(None,), dtype=tf.int64), "input_boxes": tf.TensorSpec(shape=(None, None), dtype=tf.float64), "ground_truth_mask": tf.TensorSpec(shape=(None, None), dtype=tf.int32), } # Prepare the dataset object. train_dataset_gen = Generator(dataset_path, processor) train_ds = tf.data.Dataset.from_generator( train_dataset_gen, output_signature=output_signature )<jupyter_output><empty_output><jupyter_text>Next, we configure the dataset for performance.<jupyter_code>auto = tf.data.AUTOTUNE batch_size = 2 shuffle_buffer = 4 train_ds = ( train_ds.cache() .shuffle(shuffle_buffer) .batch(batch_size) .prefetch(buffer_size=auto) )<jupyter_output><empty_output><jupyter_text>Take a single batch of data and inspect the shapes of the elements present inside of it.<jupyter_code>sample = next(iter(train_ds)) for k in sample: print(k, sample[k].shape, sample[k].dtype, isinstance(sample[k], tf.Tensor))<jupyter_output><empty_output><jupyter_text>Training We will now write DICE loss. This implementation is based on[MONAI DICE loss](https://docs.monai.io/en/stable/_modules/monai/losses/dice.htmlDiceLoss).<jupyter_code>def dice_loss(y_true, y_pred, smooth=1e-5): y_pred = tf.sigmoid(y_pred) reduce_axis = list(range(2, len(y_pred.shape))) if batch_size > 1: # reducing spatial dimensions and batch reduce_axis = [0] + reduce_axis intersection = tf.reduce_sum(y_true * y_pred, axis=reduce_axis) y_true_sq = tf.math.pow(y_true, 2) y_pred_sq = tf.math.pow(y_pred, 2) ground_o = tf.reduce_sum(y_true_sq, axis=reduce_axis) pred_o = tf.reduce_sum(y_pred_sq, axis=reduce_axis) denominator = ground_o + pred_o # calculate DICE coefficient loss = 1.0 - (2.0 * intersection + 1e-5) / (denominator + 1e-5) loss = tf.reduce_mean(loss) return loss<jupyter_output><empty_output><jupyter_text>Fine-tuning SAMWe will now fine-tune SAM's decoder part. We will freeze the vision encoder and promptencoder layers.<jupyter_code># initialize SAM model and optimizer sam = TFSamModel.from_pretrained("facebook/sam-vit-base") optimizer = keras.optimizers.Adam(1e-5) for layer in sam.layers: if layer.name in ["vision_encoder", "prompt_encoder"]: layer.trainable = False @tf.function def train_step(inputs): with tf.GradientTape() as tape: # pass inputs to SAM model outputs = sam( pixel_values=inputs["pixel_values"], input_boxes=inputs["input_boxes"], multimask_output=False, training=True, ) predicted_masks = tf.squeeze(outputs.pred_masks, 1) ground_truth_masks = tf.cast(inputs["ground_truth_mask"], tf.float32) # calculate loss over predicted and ground truth masks loss = dice_loss(tf.expand_dims(ground_truth_masks, 1), predicted_masks) # update trainable variables trainable_vars = sam.trainable_variables grads = tape.gradient(loss, trainable_vars) optimizer.apply_gradients(zip(grads, trainable_vars)) return loss<jupyter_output><empty_output><jupyter_text>We can now run the training for three epochs. We might have a warning about gradientsnot existing on IoU prediction head of mask decoder, we can safely ignore that.<jupyter_code># run training for epoch in range(3): for inputs in train_ds: loss = train_step(inputs) print(f"Epoch {epoch + 1}: Loss = {loss}")<jupyter_output><empty_output><jupyter_text>Serialize the model We serialized the model and pushed for you below. `push_to_hub` method serializes model,generates a model card and pushes it to Hugging Face Hub, so that other people can loadthe model using `from_pretrained` method to infer or further fine-tune. We also need topush the same preprocessor in the repository. Find the model and the preprocessor[here](https://huggingface.co/merve/sam-finetuned).<jupyter_code># sam.push_to_hub("merve/sam-finetuned") # processor.push_to_hub("merve/sam-finetuned")<jupyter_output><empty_output><jupyter_text>We can now infer with the model.<jupyter_code># Load another image for inference. idx = 20 raw_image_inference = Image.open(image_paths[idx]) # process the image and infer preprocessed_img = processor(raw_image_inference) outputs = sam(preprocessed_img)<jupyter_output><empty_output><jupyter_text>Lastly, we can visualize the results.<jupyter_code>infer_masks = outputs["pred_masks"] iou_scores = outputs["iou_scores"] show_masks_on_image(raw_image_inference, masks=infer_masks, scores=iou_scores)<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/sam.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/sam.ipynb", "repo_id": "keras-io", "token_count": 7123 }
121
<jupyter_start><jupyter_text>Train a Vision Transformer on small datasets**Author:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498)**Date created:** 2022/01/07**Last modified:** 2022/01/10**Description:** Training a ViT from scratch on smaller datasets with shifted patch tokenization and locality self-attention. IntroductionIn the academic paper[An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929),the authors mention that Vision Transformers (ViT) are data-hungry. Therefore,pretraining a ViT on a large-sized dataset like JFT300M and fine-tuningit on medium-sized datasets (like ImageNet) is the only way to beatstate-of-the-art Convolutional Neural Network models.The self-attention layer of ViT lacks **locality inductive bias** (the notion thatimage pixels are locally correlated and that their correlation maps are translation-invariant).This is the reason why ViTs need more data. On the other hand, CNNs look at images throughspatial sliding windows, which helps them get better results with smaller datasets.In the academic paper[Vision Transformer for Small-Size Datasets](https://arxiv.org/abs/2112.13492v1),the authors set out to tackle the problem of locality inductive bias in ViTs.The main ideas are:- **Shifted Patch Tokenization**- **Locality Self Attention**This example implements the ideas of the paper. A large part of thisexample is inspired from[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/)._Note_: This example requires TensorFlow 2.6 or higher, as well as[TensorFlow Addons](https://www.tensorflow.org/addons), which can beinstalled using the following command:```pythonpip install -qq -U tensorflow-addons``` Setup<jupyter_code>import math import numpy as np import tensorflow as tf from tensorflow import keras import tensorflow_addons as tfa import matplotlib.pyplot as plt from tensorflow.keras import layers # Setting seed for reproducibiltiy SEED = 42 keras.utils.set_random_seed(SEED)<jupyter_output><empty_output><jupyter_text>Prepare the data<jupyter_code>NUM_CLASSES = 100 INPUT_SHAPE = (32, 32, 3) (x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data() print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}") print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")<jupyter_output><empty_output><jupyter_text>Configure the hyperparametersThe hyperparameters are different from the paper. Feel free to tunethe hyperparameters yourself.<jupyter_code># DATA BUFFER_SIZE = 512 BATCH_SIZE = 256 # AUGMENTATION IMAGE_SIZE = 72 PATCH_SIZE = 6 NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2 # OPTIMIZER LEARNING_RATE = 0.001 WEIGHT_DECAY = 0.0001 # TRAINING EPOCHS = 50 # ARCHITECTURE LAYER_NORM_EPS = 1e-6 TRANSFORMER_LAYERS = 8 PROJECTION_DIM = 64 NUM_HEADS = 4 TRANSFORMER_UNITS = [ PROJECTION_DIM * 2, PROJECTION_DIM, ] MLP_HEAD_UNITS = [2048, 1024]<jupyter_output><empty_output><jupyter_text>Use data augmentationA snippet from the paper:*"According to DeiT, various techniques are required to effectivelytrain ViTs. Thus, we applied data augmentations such as CutMix, Mixup,Auto Augment, Repeated Augment to all models."*In this example, we will focus solely on the novelty of the approachand not on reproducing the paper results. For this reason, wedon't use the mentioned data augmentation schemes. Please feelfree to add to or remove from the augmentation pipeline.<jupyter_code>data_augmentation = keras.Sequential( [ layers.Normalization(), layers.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.RandomFlip("horizontal"), layers.RandomRotation(factor=0.02), layers.RandomZoom(height_factor=0.2, width_factor=0.2), ], name="data_augmentation", ) # Compute the mean and the variance of the training data for normalization. data_augmentation.layers[0].adapt(x_train)<jupyter_output><empty_output><jupyter_text>Implement Shifted Patch TokenizationIn a ViT pipeline, the input images are divided into patches that arethen linearly projected into tokens. Shifted patch tokenization (STP)is introduced to combat the low receptive field of ViTs. The stepsfor Shifted Patch Tokenization are as follows:- Start with an image.- Shift the image in diagonal directions.- Concat the diagonally shifted images with the original image.- Extract patches of the concatenated images.- Flatten the spatial dimension of all patches.- Layer normalize the flattened patches and then project it.| || :--: || Shifted Patch Tokenization [Source](https://arxiv.org/abs/2112.13492v1) |<jupyter_code>class ShiftedPatchTokenization(layers.Layer): def __init__( self, image_size=IMAGE_SIZE, patch_size=PATCH_SIZE, num_patches=NUM_PATCHES, projection_dim=PROJECTION_DIM, vanilla=False, **kwargs, ): super().__init__(**kwargs) self.vanilla = vanilla # Flag to swtich to vanilla patch extractor self.image_size = image_size self.patch_size = patch_size self.half_patch = patch_size // 2 self.flatten_patches = layers.Reshape((num_patches, -1)) self.projection = layers.Dense(units=projection_dim) self.layer_norm = layers.LayerNormalization(epsilon=LAYER_NORM_EPS) def crop_shift_pad(self, images, mode): # Build the diagonally shifted images if mode == "left-up": crop_height = self.half_patch crop_width = self.half_patch shift_height = 0 shift_width = 0 elif mode == "left-down": crop_height = 0 crop_width = self.half_patch shift_height = self.half_patch shift_width = 0 elif mode == "right-up": crop_height = self.half_patch crop_width = 0 shift_height = 0 shift_width = self.half_patch else: crop_height = 0 crop_width = 0 shift_height = self.half_patch shift_width = self.half_patch # Crop the shifted images and pad them crop = tf.image.crop_to_bounding_box( images, offset_height=crop_height, offset_width=crop_width, target_height=self.image_size - self.half_patch, target_width=self.image_size - self.half_patch, ) shift_pad = tf.image.pad_to_bounding_box( crop, offset_height=shift_height, offset_width=shift_width, target_height=self.image_size, target_width=self.image_size, ) return shift_pad def call(self, images): if not self.vanilla: # Concat the shifted images with the original image images = tf.concat( [ images, self.crop_shift_pad(images, mode="left-up"), self.crop_shift_pad(images, mode="left-down"), self.crop_shift_pad(images, mode="right-up"), self.crop_shift_pad(images, mode="right-down"), ], axis=-1, ) # Patchify the images and flatten it patches = tf.image.extract_patches( images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding="VALID", ) flat_patches = self.flatten_patches(patches) if not self.vanilla: # Layer normalize the flat patches and linearly project it tokens = self.layer_norm(flat_patches) tokens = self.projection(tokens) else: # Linearly project the flat patches tokens = self.projection(flat_patches) return (tokens, patches)<jupyter_output><empty_output><jupyter_text>Visualize the patches<jupyter_code># Get a random image from the training dataset # and resize the image image = x_train[np.random.choice(range(x_train.shape[0]))] resized_image = tf.image.resize( tf.convert_to_tensor([image]), size=(IMAGE_SIZE, IMAGE_SIZE) ) # Vanilla patch maker: This takes an image and divides into # patches as in the original ViT paper (token, patch) = ShiftedPatchTokenization(vanilla=True)(resized_image / 255.0) (token, patch) = (token[0], patch[0]) n = patch.shape[0] count = 1 plt.figure(figsize=(4, 4)) for row in range(n): for col in range(n): plt.subplot(n, n, count) count = count + 1 image = tf.reshape(patch[row][col], (PATCH_SIZE, PATCH_SIZE, 3)) plt.imshow(image) plt.axis("off") plt.show() # Shifted Patch Tokenization: This layer takes the image, shifts it # diagonally and then extracts patches from the concatinated images (token, patch) = ShiftedPatchTokenization(vanilla=False)(resized_image / 255.0) (token, patch) = (token[0], patch[0]) n = patch.shape[0] shifted_images = ["ORIGINAL", "LEFT-UP", "LEFT-DOWN", "RIGHT-UP", "RIGHT-DOWN"] for index, name in enumerate(shifted_images): print(name) count = 1 plt.figure(figsize=(4, 4)) for row in range(n): for col in range(n): plt.subplot(n, n, count) count = count + 1 image = tf.reshape(patch[row][col], (PATCH_SIZE, PATCH_SIZE, 5 * 3)) plt.imshow(image[..., 3 * index : 3 * index + 3]) plt.axis("off") plt.show()<jupyter_output><empty_output><jupyter_text>Implement the patch encoding layerThis layer accepts projected patches and then adds positionalinformation to them.<jupyter_code>class PatchEncoder(layers.Layer): def __init__( self, num_patches=NUM_PATCHES, projection_dim=PROJECTION_DIM, **kwargs ): super().__init__(**kwargs) self.num_patches = num_patches self.position_embedding = layers.Embedding( input_dim=num_patches, output_dim=projection_dim ) self.positions = tf.range(start=0, limit=self.num_patches, delta=1) def call(self, encoded_patches): encoded_positions = self.position_embedding(self.positions) encoded_patches = encoded_patches + encoded_positions return encoded_patches<jupyter_output><empty_output><jupyter_text>Implement Locality Self AttentionThe regular attention equation is stated below.| || :--: || [Source](https://towardsdatascience.com/attention-is-all-you-need-discovering-the-transformer-paper-73e5ff5e0634) |The attention module takes a query, key, and value. First, we compute thesimilarity between the query and key via a dot product. Then, the resultis scaled by the square root of the key dimension. The scaling preventsthe softmax function from having an overly small gradient. Softmax is thenapplied to the scaled dot product to produce the attention weights.The value is then modulated via the attention weights.In self-attention, query, key and value come from the same input.The dot product would result in large self-token relations rather thaninter-token relations. This also means that the softmax gives higherprobabilities to self-token relations than the inter-token relations.To combat this, the authors propose masking the diagonal of the dot product.This way, we force the attention module to pay more attention to theinter-token relations.The scaling factor is a constant in the regular attention module.This acts like a temperature term that can modulate the softmax function.The authors suggest a learnable temperature term instead of a constant.| || :--: || Locality Self Attention [Source](https://arxiv.org/abs/2112.13492v1) |The above two pointers make the Locality Self Attention. We have subclassed the[`layers.MultiHeadAttention`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention)and implemented the trainable temperature. The attention mask is builtat a later stage.<jupyter_code>class MultiHeadAttentionLSA(tf.keras.layers.MultiHeadAttention): def __init__(self, **kwargs): super().__init__(**kwargs) # The trainable temperature term. The initial value is # the square root of the key dimension. self.tau = tf.Variable(math.sqrt(float(self._key_dim)), trainable=True) def _compute_attention(self, query, key, value, attention_mask=None, training=None): query = tf.multiply(query, 1.0 / self.tau) attention_scores = tf.einsum(self._dot_product_equation, key, query) attention_scores = self._masked_softmax(attention_scores, attention_mask) attention_scores_dropout = self._dropout_layer( attention_scores, training=training ) attention_output = tf.einsum( self._combine_equation, attention_scores_dropout, value ) return attention_output, attention_scores<jupyter_output><empty_output><jupyter_text>Implement the MLP<jupyter_code>def mlp(x, hidden_units, dropout_rate): for units in hidden_units: x = layers.Dense(units, activation=tf.nn.gelu)(x) x = layers.Dropout(dropout_rate)(x) return x # Build the diagonal attention mask diag_attn_mask = 1 - tf.eye(NUM_PATCHES) diag_attn_mask = tf.cast([diag_attn_mask], dtype=tf.int8)<jupyter_output><empty_output><jupyter_text>Build the ViT<jupyter_code>def create_vit_classifier(vanilla=False): inputs = layers.Input(shape=INPUT_SHAPE) # Augment data. augmented = data_augmentation(inputs) # Create patches. (tokens, _) = ShiftedPatchTokenization(vanilla=vanilla)(augmented) # Encode patches. encoded_patches = PatchEncoder()(tokens) # Create multiple layers of the Transformer block. for _ in range(TRANSFORMER_LAYERS): # Layer normalization 1. x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) # Create a multi-head attention layer. if not vanilla: attention_output = MultiHeadAttentionLSA( num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1 )(x1, x1, attention_mask=diag_attn_mask) else: attention_output = layers.MultiHeadAttention( num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1 )(x1, x1) # Skip connection 1. x2 = layers.Add()([attention_output, encoded_patches]) # Layer normalization 2. x3 = layers.LayerNormalization(epsilon=1e-6)(x2) # MLP. x3 = mlp(x3, hidden_units=TRANSFORMER_UNITS, dropout_rate=0.1) # Skip connection 2. encoded_patches = layers.Add()([x3, x2]) # Create a [batch_size, projection_dim] tensor. representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) representation = layers.Flatten()(representation) representation = layers.Dropout(0.5)(representation) # Add MLP. features = mlp(representation, hidden_units=MLP_HEAD_UNITS, dropout_rate=0.5) # Classify outputs. logits = layers.Dense(NUM_CLASSES)(features) # Create the Keras model. model = keras.Model(inputs=inputs, outputs=logits) return model<jupyter_output><empty_output><jupyter_text>Compile, train, and evaluate the mode<jupyter_code># Some code is taken from: # https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2. class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule): def __init__( self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps ): super().__init__() self.learning_rate_base = learning_rate_base self.total_steps = total_steps self.warmup_learning_rate = warmup_learning_rate self.warmup_steps = warmup_steps self.pi = tf.constant(np.pi) def __call__(self, step): if self.total_steps < self.warmup_steps: raise ValueError("Total_steps must be larger or equal to warmup_steps.") cos_annealed_lr = tf.cos( self.pi * (tf.cast(step, tf.float32) - self.warmup_steps) / float(self.total_steps - self.warmup_steps) ) learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr) if self.warmup_steps > 0: if self.learning_rate_base < self.warmup_learning_rate: raise ValueError( "Learning_rate_base must be larger or equal to " "warmup_learning_rate." ) slope = ( self.learning_rate_base - self.warmup_learning_rate ) / self.warmup_steps warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate learning_rate = tf.where( step < self.warmup_steps, warmup_rate, learning_rate ) return tf.where( step > self.total_steps, 0.0, learning_rate, name="learning_rate" ) def run_experiment(model): total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS) warmup_epoch_percentage = 0.10 warmup_steps = int(total_steps * warmup_epoch_percentage) scheduled_lrs = WarmUpCosine( learning_rate_base=LEARNING_RATE, total_steps=total_steps, warmup_learning_rate=0.0, warmup_steps=warmup_steps, ) optimizer = tfa.optimizers.AdamW( learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY ) model.compile( optimizer=optimizer, loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[ keras.metrics.SparseCategoricalAccuracy(name="accuracy"), keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"), ], ) history = model.fit( x=x_train, y=y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_split=0.1, ) _, accuracy, top_5_accuracy = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE) print(f"Test accuracy: {round(accuracy * 100, 2)}%") print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%") return history # Run experiments with the vanilla ViT vit = create_vit_classifier(vanilla=True) history = run_experiment(vit) # Run experiments with the Shifted Patch Tokenization and # Locality Self Attention modified ViT vit_sl = create_vit_classifier(vanilla=False) history = run_experiment(vit_sl)<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/vit_small_ds.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/vit_small_ds.ipynb", "repo_id": "keras-io", "token_count": 7337 }
122
# Class Attention Image Transformers with LayerScale **Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br> **Date created:** 2022/09/19<br> **Last modified:** 2022/11/21<br> **Description:** Implementing an image transformer equipped with Class Attention and LayerScale. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/cait.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/cait.py) --- ## Introduction In this tutorial, we implement the CaiT (Class-Attention in Image Transformers) proposed in [Going deeper with Image Transformers](https://arxiv.org/abs/2103.17239) by Touvron et al. Depth scaling, i.e. increasing the model depth for obtaining better performance and generalization has been quite successful for convolutional neural networks ([Tan et al.](https://arxiv.org/abs/1905.11946), [Dollár et al.](https://arxiv.org/abs/2103.06877), for example). But applying the same model scaling principles to Vision Transformers ([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929)) doesn't translate equally well -- their performance gets saturated quickly with depth scaling. Note that one assumption here is that the underlying pre-training dataset is always kept fixed when performing model scaling. In the CaiT paper, the authors investigate this phenomenon and propose modifications to the vanilla ViT (Vision Transformers) architecture to mitigate this problem. The tutorial is structured like so: * Implementation of the individual blocks of CaiT * Collating all the blocks to create the CaiT model * Loading a pre-trained CaiT model * Obtaining prediction results * Visualization of the different attention layers of CaiT The readers are assumed to be familiar with Vision Transformers already. Here is an implementation of Vision Transformers in Keras: [Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/). --- ## Imports ```python import os os.environ["KERAS_BACKEND"] = "tensorflow" import io import typing from urllib.request import urlopen import matplotlib.pyplot as plt import numpy as np import PIL import keras from keras import layers from keras import ops ``` --- ## The LayerScale layer We begin by implementing a **LayerScale** layer which is one of the two modifications proposed in the CaiT paper. When increasing the depth of the ViT models, they meet with optimization instability and eventually don't converge. The residual connections within each Transformer block introduce information bottleneck. When there is an increased amount of depth, this bottleneck can quickly explode and deviate the optimization pathway for the underlying model. The following equations denote where residual connections are added within a Transformer block: <div align="center"> <img src="https://i.ibb.co/jWV5bFb/image.png"/> </div> where, **SA** stands for self-attention, **FFN** stands for feed-forward network, and **eta** denotes the LayerNorm operator ([Ba et al.](https://arxiv.org/abs/1607.06450)). LayerScale is formally implemented like so: <div align="center"> <img src="https://i.ibb.co/VYDWNn9/image.png"/> </div> where, the lambdas are learnable parameters and are initialized with a very small value ({0.1, 1e-5, 1e-6}). **diag** represents a diagonal matrix. Intuitively, LayerScale helps control the contribution of the residual branches. The learnable parameters of LayerScale are initialized to a small value to let the branches act like identity functions and then let them figure out the degrees of interactions during the training. The diagonal matrix additionally helps control the contributions of the individual dimensions of the residual inputs as it is applied on a per-channel basis. The practical implementation of LayerScale is simpler than it might sound. ```python class LayerScale(layers.Layer): """LayerScale as introduced in CaiT: https://arxiv.org/abs/2103.17239. Args: init_values (float): value to initialize the diagonal matrix of LayerScale. projection_dim (int): projection dimension used in LayerScale. """ def __init__(self, init_values: float, projection_dim: int, **kwargs): super().__init__(**kwargs) self.gamma = self.add_weight( shape=(projection_dim,), initializer=keras.initializers.Constant(init_values), ) def call(self, x, training=False): return x * self.gamma ``` --- ## Stochastic depth layer Since its introduction ([Huang et al.](https://arxiv.org/abs/1603.09382)), Stochastic Depth has become a favorite component in almost all modern neural network architectures. CaiT is no exception. Discussing Stochastic Depth is out of scope for this notebook. You can refer to [this resource](https://paperswithcode.com/method/stochastic-depth) in case you need a refresher. ```python class StochasticDepth(layers.Layer): """Stochastic Depth layer (https://arxiv.org/abs/1603.09382). Reference: https://github.com/rwightman/pytorch-image-models """ def __init__(self, drop_prob: float, **kwargs): super().__init__(**kwargs) self.drop_prob = drop_prob self.seed_generator = keras.random.SeedGenerator(1337) def call(self, x, training=False): if training: keep_prob = 1 - self.drop_prob shape = (ops.shape(x)[0],) + (1,) * (len(x.shape) - 1) random_tensor = keep_prob + ops.random.uniform( shape, minval=0, maxval=1, seed=self.seed_generator ) random_tensor = ops.floor(random_tensor) return (x / keep_prob) * random_tensor return x ``` --- ## Class attention The vanilla ViT uses self-attention (SA) layers for modelling how the image patches and the _learnable_ CLS token interact with each other. The CaiT authors propose to decouple the attention layers responsible for attending to the image patches and the CLS tokens. When using ViTs for any discriminative tasks (classification, for example), we usually take the representations belonging to the CLS token and then pass them to the task-specific heads. This is as opposed to using something like global average pooling as is typically done in convolutional neural networks. The interactions between the CLS token and other image patches are processed uniformly through self-attention layers. As the CaiT authors point out, this setup has got an entangled effect. On one hand, the self-attention layers are responsible for modelling the image patches. On the other hand, they're also responsible for summarizing the modelled information via the CLS token so that it's useful for the learning objective. To help disentangle these two things, the authors propose to: * Introduce the CLS token at a later stage in the network. * Model the interaction between the CLS token and the representations related to the image patches through a separate set of attention layers. The authors call this **Class Attention** (CA). The figure below (taken from the original paper) depicts this idea: <div align="center"> <img src="https://i.imgur.com/cxeooHr.png"/ width=350> </div> This is achieved by treating the CLS token embeddings as the queries in the CA layers. CLS token embeddings and the image patch embeddings are fed as keys as well values. **Note** that "embeddings" and "representations" have been used interchangeably here. ```python class ClassAttention(layers.Layer): """Class attention as proposed in CaiT: https://arxiv.org/abs/2103.17239. Args: projection_dim (int): projection dimension for the query, key, and value of attention. num_heads (int): number of attention heads. dropout_rate (float): dropout rate to be used for dropout in the attention scores as well as the final projected outputs. """ def __init__( self, projection_dim: int, num_heads: int, dropout_rate: float, **kwargs ): super().__init__(**kwargs) self.num_heads = num_heads head_dim = projection_dim // num_heads self.scale = head_dim**-0.5 self.q = layers.Dense(projection_dim) self.k = layers.Dense(projection_dim) self.v = layers.Dense(projection_dim) self.attn_drop = layers.Dropout(dropout_rate) self.proj = layers.Dense(projection_dim) self.proj_drop = layers.Dropout(dropout_rate) def call(self, x, training=False): batch_size, num_patches, num_channels = ( ops.shape(x)[0], ops.shape(x)[1], ops.shape(x)[2], ) # Query projection. `cls_token` embeddings are queries. q = ops.expand_dims(self.q(x[:, 0]), axis=1) q = ops.reshape( q, (batch_size, 1, self.num_heads, num_channels // self.num_heads) ) # Shape: (batch_size, 1, num_heads, dimension_per_head) q = ops.transpose(q, axes=[0, 2, 1, 3]) scale = ops.cast(self.scale, dtype=q.dtype) q = q * scale # Key projection. Patch embeddings as well the cls embedding are used as keys. k = self.k(x) k = ops.reshape( k, (batch_size, num_patches, self.num_heads, num_channels // self.num_heads) ) # Shape: (batch_size, num_tokens, num_heads, dimension_per_head) k = ops.transpose(k, axes=[0, 2, 3, 1]) # Value projection. Patch embeddings as well the cls embedding are used as values. v = self.v(x) v = ops.reshape( v, (batch_size, num_patches, self.num_heads, num_channels // self.num_heads) ) v = ops.transpose(v, axes=[0, 2, 1, 3]) # Calculate attention scores between cls_token embedding and patch embeddings. attn = ops.matmul(q, k) attn = ops.nn.softmax(attn, axis=-1) attn = self.attn_drop(attn, training=training) x_cls = ops.matmul(attn, v) x_cls = ops.transpose(x_cls, axes=[0, 2, 1, 3]) x_cls = ops.reshape(x_cls, (batch_size, 1, num_channels)) x_cls = self.proj(x_cls) x_cls = self.proj_drop(x_cls, training=training) return x_cls, attn ``` --- ## Talking Head Attention The CaiT authors use the Talking Head attention ([Shazeer et al.](https://arxiv.org/abs/2003.02436)) instead of the vanilla scaled dot-product multi-head attention used in the original Transformer paper ([Vaswani et al.](https://papers.nips.cc/paper/7181-attention-is-all-you-need)). They introduce two linear projections before and after the softmax operations for obtaining better results. For a more rigorous treatment of the Talking Head attention and the vanilla attention mechanisms, please refer to their respective papers (linked above). ```python class TalkingHeadAttention(layers.Layer): """Talking-head attention as proposed in CaiT: https://arxiv.org/abs/2003.02436. Args: projection_dim (int): projection dimension for the query, key, and value of attention. num_heads (int): number of attention heads. dropout_rate (float): dropout rate to be used for dropout in the attention scores as well as the final projected outputs. """ def __init__( self, projection_dim: int, num_heads: int, dropout_rate: float, **kwargs ): super().__init__(**kwargs) self.num_heads = num_heads head_dim = projection_dim // self.num_heads self.scale = head_dim**-0.5 self.qkv = layers.Dense(projection_dim * 3) self.attn_drop = layers.Dropout(dropout_rate) self.proj = layers.Dense(projection_dim) self.proj_l = layers.Dense(self.num_heads) self.proj_w = layers.Dense(self.num_heads) self.proj_drop = layers.Dropout(dropout_rate) def call(self, x, training=False): B, N, C = ops.shape(x)[0], ops.shape(x)[1], ops.shape(x)[2] # Project the inputs all at once. qkv = self.qkv(x) # Reshape the projected output so that they're segregated in terms of # query, key, and value projections. qkv = ops.reshape(qkv, (B, N, 3, self.num_heads, C // self.num_heads)) # Transpose so that the `num_heads` becomes the leading dimensions. # Helps to better segregate the representation sub-spaces. qkv = ops.transpose(qkv, axes=[2, 0, 3, 1, 4]) scale = ops.cast(self.scale, dtype=qkv.dtype) q, k, v = qkv[0] * scale, qkv[1], qkv[2] # Obtain the raw attention scores. attn = ops.matmul(q, ops.transpose(k, axes=[0, 1, 3, 2])) # Linear projection of the similarities between the query and key projections. attn = self.proj_l(ops.transpose(attn, axes=[0, 2, 3, 1])) # Normalize the attention scores. attn = ops.transpose(attn, axes=[0, 3, 1, 2]) attn = ops.nn.softmax(attn, axis=-1) # Linear projection on the softmaxed scores. attn = self.proj_w(ops.transpose(attn, axes=[0, 2, 3, 1])) attn = ops.transpose(attn, axes=[0, 3, 1, 2]) attn = self.attn_drop(attn, training=training) # Final set of projections as done in the vanilla attention mechanism. x = ops.matmul(attn, v) x = ops.transpose(x, axes=[0, 2, 1, 3]) x = ops.reshape(x, (B, N, C)) x = self.proj(x) x = self.proj_drop(x, training=training) return x, attn ``` --- ## Feed-forward Network Next, we implement the feed-forward network which is one of the components within a Transformer block. ```python def mlp(x, dropout_rate: float, hidden_units: typing.List[int]): """FFN for a Transformer block.""" for idx, units in enumerate(hidden_units): x = layers.Dense( units, activation=ops.nn.gelu if idx == 0 else None, bias_initializer=keras.initializers.RandomNormal(stddev=1e-6), )(x) x = layers.Dropout(dropout_rate)(x) return x ``` --- ## Other blocks In the next two cells, we implement the remaining blocks as standalone functions: * `LayerScaleBlockClassAttention()` which returns a `keras.Model`. It is a Transformer block equipped with Class Attention, LayerScale, and Stochastic Depth. It operates on the CLS embeddings and the image patch embeddings. * `LayerScaleBlock()` which returns a `keras.model`. It is also a Transformer block that operates only on the embeddings of the image patches. It is equipped with LayerScale and Stochastic Depth. ```python def LayerScaleBlockClassAttention( projection_dim: int, num_heads: int, layer_norm_eps: float, init_values: float, mlp_units: typing.List[int], dropout_rate: float, sd_prob: float, name: str, ): """Pre-norm transformer block meant to be applied to the embeddings of the cls token and the embeddings of image patches. Includes LayerScale and Stochastic Depth. Args: projection_dim (int): projection dimension to be used in the Transformer blocks and patch projection layer. num_heads (int): number of attention heads. layer_norm_eps (float): epsilon to be used for Layer Normalization. init_values (float): initial value for the diagonal matrix used in LayerScale. mlp_units (List[int]): dimensions of the feed-forward network used in the Transformer blocks. dropout_rate (float): dropout rate to be used for dropout in the attention scores as well as the final projected outputs. sd_prob (float): stochastic depth rate. name (str): a name identifier for the block. Returns: A keras.Model instance. """ x = keras.Input((None, projection_dim)) x_cls = keras.Input((None, projection_dim)) inputs = keras.layers.Concatenate(axis=1)([x_cls, x]) # Class attention (CA). x1 = layers.LayerNormalization(epsilon=layer_norm_eps)(inputs) attn_output, attn_scores = ClassAttention(projection_dim, num_heads, dropout_rate)( x1 ) attn_output = ( LayerScale(init_values, projection_dim)(attn_output) if init_values else attn_output ) attn_output = StochasticDepth(sd_prob)(attn_output) if sd_prob else attn_output x2 = keras.layers.Add()([x_cls, attn_output]) # FFN. x3 = layers.LayerNormalization(epsilon=layer_norm_eps)(x2) x4 = mlp(x3, hidden_units=mlp_units, dropout_rate=dropout_rate) x4 = LayerScale(init_values, projection_dim)(x4) if init_values else x4 x4 = StochasticDepth(sd_prob)(x4) if sd_prob else x4 outputs = keras.layers.Add()([x2, x4]) return keras.Model([x, x_cls], [outputs, attn_scores], name=name) def LayerScaleBlock( projection_dim: int, num_heads: int, layer_norm_eps: float, init_values: float, mlp_units: typing.List[int], dropout_rate: float, sd_prob: float, name: str, ): """Pre-norm transformer block meant to be applied to the embeddings of the image patches. Includes LayerScale and Stochastic Depth. Args: projection_dim (int): projection dimension to be used in the Transformer blocks and patch projection layer. num_heads (int): number of attention heads. layer_norm_eps (float): epsilon to be used for Layer Normalization. init_values (float): initial value for the diagonal matrix used in LayerScale. mlp_units (List[int]): dimensions of the feed-forward network used in the Transformer blocks. dropout_rate (float): dropout rate to be used for dropout in the attention scores as well as the final projected outputs. sd_prob (float): stochastic depth rate. name (str): a name identifier for the block. Returns: A keras.Model instance. """ encoded_patches = keras.Input((None, projection_dim)) # Self-attention. x1 = layers.LayerNormalization(epsilon=layer_norm_eps)(encoded_patches) attn_output, attn_scores = TalkingHeadAttention( projection_dim, num_heads, dropout_rate )(x1) attn_output = ( LayerScale(init_values, projection_dim)(attn_output) if init_values else attn_output ) attn_output = StochasticDepth(sd_prob)(attn_output) if sd_prob else attn_output x2 = layers.Add()([encoded_patches, attn_output]) # FFN. x3 = layers.LayerNormalization(epsilon=layer_norm_eps)(x2) x4 = mlp(x3, hidden_units=mlp_units, dropout_rate=dropout_rate) x4 = LayerScale(init_values, projection_dim)(x4) if init_values else x4 x4 = StochasticDepth(sd_prob)(x4) if sd_prob else x4 outputs = layers.Add()([x2, x4]) return keras.Model(encoded_patches, [outputs, attn_scores], name=name) ``` Given all these blocks, we are now ready to collate them into the final CaiT model. --- ## Putting the pieces together: The CaiT model ```python class CaiT(keras.Model): """CaiT model. Args: projection_dim (int): projection dimension to be used in the Transformer blocks and patch projection layer. patch_size (int): patch size of the input images. num_patches (int): number of patches after extracting the image patches. init_values (float): initial value for the diagonal matrix used in LayerScale. mlp_units: (List[int]): dimensions of the feed-forward network used in the Transformer blocks. sa_ffn_layers (int): number of self-attention Transformer blocks. ca_ffn_layers (int): number of class-attention Transformer blocks. num_heads (int): number of attention heads. layer_norm_eps (float): epsilon to be used for Layer Normalization. dropout_rate (float): dropout rate to be used for dropout in the attention scores as well as the final projected outputs. sd_prob (float): stochastic depth rate. global_pool (str): denotes how to pool the representations coming out of the final Transformer block. pre_logits (bool): if set to True then don't add a classification head. num_classes (int): number of classes to construct the final classification layer with. """ def __init__( self, projection_dim: int, patch_size: int, num_patches: int, init_values: float, mlp_units: typing.List[int], sa_ffn_layers: int, ca_ffn_layers: int, num_heads: int, layer_norm_eps: float, dropout_rate: float, sd_prob: float, global_pool: str, pre_logits: bool, num_classes: int, **kwargs, ): if global_pool not in ["token", "avg"]: raise ValueError( 'Invalid value received for `global_pool`, should be either `"token"` or `"avg"`.' ) super().__init__(**kwargs) # Responsible for patchifying the input images and the linearly projecting them. self.projection = keras.Sequential( [ layers.Conv2D( filters=projection_dim, kernel_size=(patch_size, patch_size), strides=(patch_size, patch_size), padding="VALID", name="conv_projection", kernel_initializer="lecun_normal", ), layers.Reshape( target_shape=(-1, projection_dim), name="flatten_projection", ), ], name="projection", ) # CLS token and the positional embeddings. self.cls_token = self.add_weight( shape=(1, 1, projection_dim), initializer="zeros" ) self.pos_embed = self.add_weight( shape=(1, num_patches, projection_dim), initializer="zeros" ) # Projection dropout. self.pos_drop = layers.Dropout(dropout_rate, name="projection_dropout") # Stochastic depth schedule. dpr = [sd_prob for _ in range(sa_ffn_layers)] # Self-attention (SA) Transformer blocks operating only on the image patch # embeddings. self.blocks = [ LayerScaleBlock( projection_dim=projection_dim, num_heads=num_heads, layer_norm_eps=layer_norm_eps, init_values=init_values, mlp_units=mlp_units, dropout_rate=dropout_rate, sd_prob=dpr[i], name=f"sa_ffn_block_{i}", ) for i in range(sa_ffn_layers) ] # Class Attention (CA) Transformer blocks operating on the CLS token and image patch # embeddings. self.blocks_token_only = [ LayerScaleBlockClassAttention( projection_dim=projection_dim, num_heads=num_heads, layer_norm_eps=layer_norm_eps, init_values=init_values, mlp_units=mlp_units, dropout_rate=dropout_rate, name=f"ca_ffn_block_{i}", sd_prob=0.0, # No Stochastic Depth in the class attention layers. ) for i in range(ca_ffn_layers) ] # Pre-classification layer normalization. self.norm = layers.LayerNormalization(epsilon=layer_norm_eps, name="head_norm") # Representation pooling for classification head. self.global_pool = global_pool # Classification head. self.pre_logits = pre_logits self.num_classes = num_classes if not pre_logits: self.head = layers.Dense(num_classes, name="classification_head") def call(self, x, training=False): # Notice how CLS token is not added here. x = self.projection(x) x = x + self.pos_embed x = self.pos_drop(x) # SA+FFN layers. sa_ffn_attn = {} for blk in self.blocks: x, attn_scores = blk(x) sa_ffn_attn[f"{blk.name}_att"] = attn_scores # CA+FFN layers. ca_ffn_attn = {} cls_tokens = ops.tile(self.cls_token, (ops.shape(x)[0], 1, 1)) for blk in self.blocks_token_only: cls_tokens, attn_scores = blk([x, cls_tokens]) ca_ffn_attn[f"{blk.name}_att"] = attn_scores x = ops.concatenate([cls_tokens, x], axis=1) x = self.norm(x) # Always return the attention scores from the SA+FFN and CA+FFN layers # for convenience. if self.global_pool: x = ( ops.reduce_mean(x[:, 1:], axis=1) if self.global_pool == "avg" else x[:, 0] ) return ( (x, sa_ffn_attn, ca_ffn_attn) if self.pre_logits else (self.head(x), sa_ffn_attn, ca_ffn_attn) ) ``` Having the SA and CA layers segregated this way helps the model to focus on underlying objectives more concretely: * model dependencies in between the image patches * summarize the information from the image patches in a CLS token that can be used for the task at hand Now that we have defined the CaiT model, it's time to test it. We will start by defining a model configuration that will be passed to our `CaiT` class for initialization. --- ## Defining Model Configuration ```python def get_config( image_size: int = 224, patch_size: int = 16, projection_dim: int = 192, sa_ffn_layers: int = 24, ca_ffn_layers: int = 2, num_heads: int = 4, mlp_ratio: int = 4, layer_norm_eps=1e-6, init_values: float = 1e-5, dropout_rate: float = 0.0, sd_prob: float = 0.0, global_pool: str = "token", pre_logits: bool = False, num_classes: int = 1000, ) -> typing.Dict: """Default configuration for CaiT models (cait_xxs24_224). Reference: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cait.py """ config = {} # Patchification and projection. config["patch_size"] = patch_size config["num_patches"] = (image_size // patch_size) ** 2 # LayerScale. config["init_values"] = init_values # Dropout and Stochastic Depth. config["dropout_rate"] = dropout_rate config["sd_prob"] = sd_prob # Shared across different blocks and layers. config["layer_norm_eps"] = layer_norm_eps config["projection_dim"] = projection_dim config["mlp_units"] = [ projection_dim * mlp_ratio, projection_dim, ] # Attention layers. config["num_heads"] = num_heads config["sa_ffn_layers"] = sa_ffn_layers config["ca_ffn_layers"] = ca_ffn_layers # Representation pooling and task specific parameters. config["global_pool"] = global_pool config["pre_logits"] = pre_logits config["num_classes"] = num_classes return config ``` Most of the configuration variables should sound familiar to you if you already know the ViT architecture. Point of focus is given to `sa_ffn_layers` and `ca_ffn_layers` that control the number of SA-Transformer blocks and CA-Transformer blocks. You can easily amend this `get_config()` method to instantiate a CaiT model for your own dataset. --- ## Model Instantiation ```python image_size = 224 num_channels = 3 batch_size = 2 config = get_config() cait_xxs24_224 = CaiT(**config) dummy_inputs = ops.ones((batch_size, image_size, image_size, num_channels)) _ = cait_xxs24_224(dummy_inputs) ``` We can successfully perform inference with the model. But what about implementation correctness? There are many ways to verify it: * Obtain the performance of the model (given it's been populated with the pre-trained parameters) on the ImageNet-1k validation set (as the pretraining dataset was ImageNet-1k). * Fine-tune the model on a different dataset. In order to verify that, we will load another instance of the same model that has been already populated with the pre-trained parameters. Please refer to [this repository](https://github.com/sayakpaul/cait-tf) (developed by the author of this notebook) for more details. Additionally, the repository provides code to verify model performance on the [ImageNet-1k validation set](https://github.com/sayakpaul/cait-tf/tree/main/i1k_eval) as well as [fine-tuning](https://github.com/sayakpaul/cait-tf/blob/main/notebooks/finetune.ipynb). --- ## Load a pretrained model ```python model_gcs_path = "gs://tfhub-modules/sayakpaul/cait_xxs24_224/1/uncompressed" pretrained_model = keras.Sequential( [keras.layers.TFSMLayer(model_gcs_path, call_endpoint="serving_default")] ) ``` --- ## Inference utilities In the next couple of cells, we develop preprocessing utilities needed to run inference with the pretrained model. ```python # The preprocessing transformations include center cropping, and normalizing # the pixel values with the ImageNet-1k training stats (mean and standard deviation). crop_layer = keras.layers.CenterCrop(image_size, image_size) norm_layer = keras.layers.Normalization( mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2], ) def preprocess_image(image, size=image_size): image = np.array(image) image_resized = ops.expand_dims(image, 0) resize_size = int((256 / image_size) * size) image_resized = ops.image.resize( image_resized, (resize_size, resize_size), interpolation="bicubic" ) image_resized = crop_layer(image_resized) return norm_layer(image_resized).numpy() def load_image_from_url(url): image_bytes = io.BytesIO(urlopen(url).read()) image = PIL.Image.open(image_bytes) preprocessed_image = preprocess_image(image) return image, preprocessed_image ``` Now, we retrieve the ImageNet-1k labels and load them as the model we're loading was pretrained on the ImageNet-1k dataset. ```python # ImageNet-1k class labels. imagenet_labels = ( "https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt" ) label_path = keras.utils.get_file(origin=imagenet_labels) with open(label_path, "r") as f: lines = f.readlines() imagenet_labels = [line.rstrip() for line in lines] ``` --- ## Load an Image ```python img_url = "https://i.imgur.com/ErgfLTn.jpg" image, preprocessed_image = load_image_from_url(img_url) # https://unsplash.com/photos/Ho93gVTRWW8 plt.imshow(image) plt.axis("off") plt.show() ``` ![png](/img/examples/vision/cait/cait_33_0.png) --- ## Obtain Predictions ```python outputs = pretrained_model.predict(preprocessed_image) logits = outputs["output_1"] ca_ffn_block_0_att = outputs["output_3_ca_ffn_block_0_att"] ca_ffn_block_1_att = outputs["output_3_ca_ffn_block_1_att"] predicted_label = imagenet_labels[int(np.argmax(logits))] print(predicted_label) ``` <div class="k-default-codeblock"> ``` 1/1 ━━━━━━━━━━━━━━━━━━━━ 30s 30s/step monarch, monarch_butterfly, milkweed_butterfly, Danaus_plexippus WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1700601113.319904 361514 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. ``` </div> Now that we have obtained the predictions (which appear to be as expected), we can further extend our investigation. Following the CaiT authors, we can investigate the attention scores from the attention layers. This helps us to get deeper insights into the modifications introduced in the CaiT paper. --- ## Visualizing the Attention Layers We start by inspecting the shape of the attention weights returned by a Class Attention layer. ```python # (batch_size, nb_attention_heads, num_cls_token, seq_length) print("Shape of the attention scores from a class attention block:") print(ca_ffn_block_0_att.shape) ``` <div class="k-default-codeblock"> ``` Shape of the attention scores from a class attention block: (1, 4, 1, 197) ``` </div> The shape denotes we have got attention weights for each of the individual attention heads. They quantify the information about how the CLS token is related to itself and the rest of the image patches. Next, we write a utility to: * Visualize what the individual attention heads in the Class Attention layers are focusing on. This helps us to get an idea of how the _spatial-class relationship_ is induced in the CaiT model. * Obtain a saliency map from the first Class Attention layer that helps to understand how CA layer aggregates information from the region(s) of interest in the images. This utility is referred from Figures 6 and 7 of the original [CaiT paper](https://arxiv.org/abs/2103.17239). This is also a part of [this notebook](https://github.com/sayakpaul/cait-tf/blob/main/notebooks/classification.ipynb) (developed by the author of this tutorial). ```python # Reference: # https://github.com/facebookresearch/dino/blob/main/visualize_attention.py patch_size = 16 def get_cls_attention_map( attention_scores, return_saliency=False, ) -> np.ndarray: """ Returns attention scores from a particular attention block. Args: attention_scores: the attention scores from the attention block to visualize. return_saliency: a boolean flag if set to True also returns the salient representations of the attention block. """ w_featmap = preprocessed_image.shape[2] // patch_size h_featmap = preprocessed_image.shape[1] // patch_size nh = attention_scores.shape[1] # Number of attention heads. # Taking the representations from CLS token. attentions = attention_scores[0, :, 0, 1:].reshape(nh, -1) # Reshape the attention scores to resemble mini patches. attentions = attentions.reshape(nh, w_featmap, h_featmap) if not return_saliency: attentions = attentions.transpose((1, 2, 0)) else: attentions = np.mean(attentions, axis=0) attentions = (attentions - attentions.min()) / ( attentions.max() - attentions.min() ) attentions = np.expand_dims(attentions, -1) # Resize the attention patches to 224x224 (224: 14x16) attentions = ops.image.resize( attentions, size=(h_featmap * patch_size, w_featmap * patch_size), interpolation="bicubic", ) return attentions ``` In the first CA layer, we notice that the model is focusing solely on the region of interest. ```python attentions_ca_block_0 = get_cls_attention_map(ca_ffn_block_0_att) fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(13, 13)) img_count = 0 for i in range(attentions_ca_block_0.shape[-1]): if img_count < attentions_ca_block_0.shape[-1]: axes[i].imshow(attentions_ca_block_0[:, :, img_count]) axes[i].title.set_text(f"Attention head: {img_count}") axes[i].axis("off") img_count += 1 fig.tight_layout() plt.show() ``` ![png](/img/examples/vision/cait/cait_42_0.png) Whereas in the second CA layer, the model is trying to focus more on the context that contains discriminative signals. ```python attentions_ca_block_1 = get_cls_attention_map(ca_ffn_block_1_att) fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(13, 13)) img_count = 0 for i in range(attentions_ca_block_1.shape[-1]): if img_count < attentions_ca_block_1.shape[-1]: axes[i].imshow(attentions_ca_block_1[:, :, img_count]) axes[i].title.set_text(f"Attention head: {img_count}") axes[i].axis("off") img_count += 1 fig.tight_layout() plt.show() ``` ![png](/img/examples/vision/cait/cait_44_0.png) Finally, we obtain the saliency map for the given image. ```python saliency_attention = get_cls_attention_map(ca_ffn_block_0_att, return_saliency=True) image = np.array(image) image_resized = ops.expand_dims(image, 0) resize_size = int((256 / 224) * image_size) image_resized = ops.image.resize( image_resized, (resize_size, resize_size), interpolation="bicubic" ) image_resized = crop_layer(image_resized) plt.imshow(image_resized.numpy().squeeze().astype("int32")) plt.imshow(saliency_attention.numpy().squeeze(), cmap="cividis", alpha=0.9) plt.axis("off") plt.show() ``` ![png](/img/examples/vision/cait/cait_46_0.png) --- ## Conclusion In this notebook, we implemented the CaiT model. It shows how to mitigate the issues in ViTs when trying scale their depth while keeping the pretraining dataset fixed. I hope the additional visualizations provided in the notebook spark excitement in the community and people develop interesting methods to probe what models like ViT learn. --- ## Acknowledgement Thanks to the ML Developer Programs team at Google providing Google Cloud Platform support.
keras-io/examples/vision/md/cait.md/0
{ "file_path": "keras-io/examples/vision/md/cait.md", "repo_id": "keras-io", "token_count": 14363 }
123
# Grad-CAM class activation visualization **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2020/04/26<br> **Last modified:** 2021/03/07<br> **Description:** How to obtain a class activation heatmap for an image classification model. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/grad_cam.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/grad_cam.py) Adapted from Deep Learning with Python (2017). --- ## Setup ```python import os os.environ["KERAS_BACKEND"] = "tensorflow" import numpy as np import tensorflow as tf import keras # Display from IPython.display import Image, display import matplotlib as mpl import matplotlib.pyplot as plt ``` --- ## Configurable parameters You can change these to another model. To get the values for `last_conv_layer_name` use `model.summary()` to see the names of all layers in the model. ```python model_builder = keras.applications.xception.Xception img_size = (299, 299) preprocess_input = keras.applications.xception.preprocess_input decode_predictions = keras.applications.xception.decode_predictions last_conv_layer_name = "block14_sepconv2_act" # The local path to our target image img_path = keras.utils.get_file( "african_elephant.jpg", "https://i.imgur.com/Bvro0YD.png" ) display(Image(img_path)) ``` ![jpeg](/img/examples/vision/grad_cam/grad_cam_4_0.jpg) --- ## The Grad-CAM algorithm ```python def get_img_array(img_path, size): # `img` is a PIL image of size 299x299 img = keras.utils.load_img(img_path, target_size=size) # `array` is a float32 Numpy array of shape (299, 299, 3) array = keras.utils.img_to_array(img) # We add a dimension to transform our array into a "batch" # of size (1, 299, 299, 3) array = np.expand_dims(array, axis=0) return array def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None): # First, we create a model that maps the input image to the activations # of the last conv layer as well as the output predictions grad_model = keras.models.Model( model.inputs, [model.get_layer(last_conv_layer_name).output, model.output] ) # Then, we compute the gradient of the top predicted class for our input image # with respect to the activations of the last conv layer with tf.GradientTape() as tape: last_conv_layer_output, preds = grad_model(img_array) if pred_index is None: pred_index = tf.argmax(preds[0]) class_channel = preds[:, pred_index] # This is the gradient of the output neuron (top predicted or chosen) # with regard to the output feature map of the last conv layer grads = tape.gradient(class_channel, last_conv_layer_output) # This is a vector where each entry is the mean intensity of the gradient # over a specific feature map channel pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) # We multiply each channel in the feature map array # by "how important this channel is" with regard to the top predicted class # then sum all the channels to obtain the heatmap class activation last_conv_layer_output = last_conv_layer_output[0] heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] heatmap = tf.squeeze(heatmap) # For visualization purpose, we will also normalize the heatmap between 0 & 1 heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) return heatmap.numpy() ``` --- ## Let's test-drive it ```python # Prepare image img_array = preprocess_input(get_img_array(img_path, size=img_size)) # Make model model = model_builder(weights="imagenet") # Remove last layer's softmax model.layers[-1].activation = None # Print what the top predicted class is preds = model.predict(img_array) print("Predicted:", decode_predictions(preds, top=1)[0]) # Generate class activation heatmap heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name) # Display heatmap plt.matshow(heatmap) plt.show() ``` <div class="k-default-codeblock"> ``` 1/1 ━━━━━━━━━━━━━━━━━━━━ 3s 3s/step Predicted: [('n02504458', 'African_elephant', 9.860664)] ``` </div> ![png](/img/examples/vision/grad_cam/grad_cam_8_3.png) --- ## Create a superimposed visualization ```python def save_and_display_gradcam(img_path, heatmap, cam_path="cam.jpg", alpha=0.4): # Load the original image img = keras.utils.load_img(img_path) img = keras.utils.img_to_array(img) # Rescale heatmap to a range 0-255 heatmap = np.uint8(255 * heatmap) # Use jet colormap to colorize heatmap jet = mpl.colormaps["jet"] # Use RGB values of the colormap jet_colors = jet(np.arange(256))[:, :3] jet_heatmap = jet_colors[heatmap] # Create an image with RGB colorized heatmap jet_heatmap = keras.utils.array_to_img(jet_heatmap) jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0])) jet_heatmap = keras.utils.img_to_array(jet_heatmap) # Superimpose the heatmap on original image superimposed_img = jet_heatmap * alpha + img superimposed_img = keras.utils.array_to_img(superimposed_img) # Save the superimposed image superimposed_img.save(cam_path) # Display Grad CAM display(Image(cam_path)) save_and_display_gradcam(img_path, heatmap) ``` ![jpeg](/img/examples/vision/grad_cam/grad_cam_10_0.jpg) --- ## Let's try another image We will see how the grad cam explains the model's outputs for a multi-label image. Let's try an image with a cat and a dog together, and see how the grad cam behaves. ```python img_path = keras.utils.get_file( "cat_and_dog.jpg", "https://storage.googleapis.com/petbacker/images/blog/2017/dog-and-cat-cover.jpg", ) display(Image(img_path)) # Prepare image img_array = preprocess_input(get_img_array(img_path, size=img_size)) # Print what the two top predicted classes are preds = model.predict(img_array) print("Predicted:", decode_predictions(preds, top=2)[0]) ``` ![jpeg](/img/examples/vision/grad_cam/grad_cam_12_0.jpg) <div class="k-default-codeblock"> ``` 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step Predicted: [('n02112137', 'chow', 4.610808), ('n02124075', 'Egyptian_cat', 4.3835773)] ``` </div> We generate class activation heatmap for "chow," the class index is 260 ```python heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=260) save_and_display_gradcam(img_path, heatmap) ``` ![jpeg](/img/examples/vision/grad_cam/grad_cam_14_0.jpg) We generate class activation heatmap for "egyptian cat," the class index is 285 ```python heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=285) save_and_display_gradcam(img_path, heatmap) ``` ![jpeg](/img/examples/vision/grad_cam/grad_cam_16_0.jpg)
keras-io/examples/vision/md/grad_cam.md/0
{ "file_path": "keras-io/examples/vision/md/grad_cam.md", "repo_id": "keras-io", "token_count": 2686 }
124
# RandAugment for Image Classification for Improved Robustness **Authors:** [Sayak Paul](https://twitter.com/RisingSayak)[Sachin Prasad](https://github.com/sachinprasadhs)<br> **Date created:** 2021/03/13<br> **Last modified:** 2023/12/12<br> **Description:** RandAugment for training an image classification model with improved robustness. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/randaugment.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/randaugment.py) Data augmentation is a very useful technique that can help to improve the translational invariance of convolutional neural networks (CNN). RandAugment is a stochastic data augmentation routine for vision data and was proposed in [RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719). It is composed of strong augmentation transforms like color jitters, Gaussian blurs, saturations, etc. along with more traditional augmentation transforms such as random crops. These parameters are tuned for a given dataset and a network architecture. The authors of RandAugment also provide pseudocode of RandAugment in the original paper (Figure 2). Recently, it has been a key component of works like [Noisy Student Training](https://arxiv.org/abs/1911.04252) and [Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848). It has been also central to the success of [EfficientNets](https://arxiv.org/abs/1905.11946). ```python pip install keras-cv ``` --- ## Imports & setup ```python import os os.environ["KERAS_BACKEND"] = "tensorflow" import keras import keras_cv from keras import ops from keras import layers import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import tensorflow_datasets as tfds tfds.disable_progress_bar() keras.utils.set_random_seed(42) ``` --- ## Load the CIFAR10 dataset For this example, we will be using the [CIFAR10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). ```python (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() print(f"Total training examples: {len(x_train)}") print(f"Total test examples: {len(x_test)}") ``` <div class="k-default-codeblock"> ``` Total training examples: 50000 Total test examples: 10000 ``` </div> --- ## Define hyperparameters ```python AUTO = tf.data.AUTOTUNE BATCH_SIZE = 128 EPOCHS = 1 IMAGE_SIZE = 72 ``` --- ## Initialize `RandAugment` object Now, we will initialize a `RandAugment` object from the `imgaug.augmenters` module with the parameters suggested by the RandAugment authors. ```python rand_augment = keras_cv.layers.RandAugment( value_range=(0, 255), augmentations_per_image=3, magnitude=0.8 ) ``` --- ## Create TensorFlow `Dataset` objects ```python train_ds_rand = ( tf.data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(BATCH_SIZE * 100) .batch(BATCH_SIZE) .map( lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y), num_parallel_calls=AUTO, ) .map( lambda x, y: (rand_augment(tf.cast(x, tf.uint8)), y), num_parallel_calls=AUTO, ) .prefetch(AUTO) ) test_ds = ( tf.data.Dataset.from_tensor_slices((x_test, y_test)) .batch(BATCH_SIZE) .map( lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y), num_parallel_calls=AUTO, ) .prefetch(AUTO) ) ``` For comparison purposes, let's also define a simple augmentation pipeline consisting of random flips, random rotations, and random zoomings. ```python simple_aug = keras.Sequential( [ layers.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.RandomFlip("horizontal"), layers.RandomRotation(factor=0.02), layers.RandomZoom(height_factor=0.2, width_factor=0.2), ] ) # Now, map the augmentation pipeline to our training dataset train_ds_simple = ( tf.data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(BATCH_SIZE * 100) .batch(BATCH_SIZE) .map(lambda x, y: (simple_aug(x), y), num_parallel_calls=AUTO) .prefetch(AUTO) ) ``` --- ## Visualize the dataset augmented with RandAugment ```python sample_images, _ = next(iter(train_ds_rand)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off") ``` ![png](/img/examples/vision/randaugment/randaugment_15_0.png) You are encouraged to run the above code block a couple of times to see different variations. --- ## Visualize the dataset augmented with `simple_aug` ```python sample_images, _ = next(iter(train_ds_simple)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off") ``` ![png](/img/examples/vision/randaugment/randaugment_18_0.png) --- ## Define a model building utility function Now, we define a CNN model that is based on the [ResNet50V2 architecture](https://arxiv.org/abs/1603.05027). Also, notice that the network already has a rescaling layer inside it. This eliminates the need to do any separate preprocessing on our dataset and is specifically very useful for deployment purposes. ```python def get_training_model(): resnet50_v2 = keras.applications.ResNet50V2( weights=None, include_top=True, input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), classes=10, ) model = keras.Sequential( [ layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)), layers.Rescaling(scale=1.0 / 127.5, offset=-1), resnet50_v2, ] ) return model get_training_model().summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential_1"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ rescaling (<span style="color: #0087ff; text-decoration-color: #0087ff">Rescaling</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">72</span>, <span style="color: #00af00; text-decoration-color: #00af00">72</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ resnet50v2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">23,585,290</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">23,585,290</span> (89.97 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">23,539,850</span> (89.80 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">45,440</span> (177.50 KB) </pre> We will train this network on two different versions of our dataset: * One augmented with RandAugment. * Another one augmented with `simple_aug`. Since RandAugment is known to enhance the robustness of models to common perturbations and corruptions, we will also evaluate our models on the CIFAR-10-C dataset, proposed in [Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261) by Hendrycks et al. The CIFAR-10-C dataset consists of 19 different image corruptions and perturbations (for example speckle noise, fog, Gaussian blur, etc.) that too at varying severity levels. For this example we will be using the following configuration: [`cifar10_corrupted/saturate_5`](https://www.tensorflow.org/datasets/catalog/cifar10_corrupted#cifar10_corruptedsaturate_5). The images from this configuration look like so: ![](https://storage.googleapis.com/tfds-data/visualization/fig/cifar10_corrupted-saturate_5-1.0.0.png) In the interest of reproducibility, we serialize the initial random weights of our shallow network. ```python initial_model = get_training_model() initial_model.save_weights("initial.weights.h5") ``` --- ## Train model with RandAugment ```python rand_aug_model = get_training_model() rand_aug_model.load_weights("initial.weights.h5") rand_aug_model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) rand_aug_model.fit(train_ds_rand, validation_data=test_ds, epochs=EPOCHS) _, test_acc = rand_aug_model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_acc * 100)) ``` <div class="k-default-codeblock"> ``` 391/391 ━━━━━━━━━━━━━━━━━━━━ 1146s 3s/step - accuracy: 0.1677 - loss: 2.3232 - val_accuracy: 0.2818 - val_loss: 1.9966 79/79 ━━━━━━━━━━━━━━━━━━━━ 39s 489ms/step - accuracy: 0.2803 - loss: 2.0073 Test accuracy: 28.18% ``` </div> --- ## Train model with `simple_aug` ```python simple_aug_model = get_training_model() simple_aug_model.load_weights("initial.weights.h5") simple_aug_model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) simple_aug_model.fit(train_ds_simple, validation_data=test_ds, epochs=EPOCHS) _, test_acc = simple_aug_model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_acc * 100)) ``` <div class="k-default-codeblock"> ``` 391/391 ━━━━━━━━━━━━━━━━━━━━ 1132s 3s/step - accuracy: 0.3673 - loss: 1.7929 - val_accuracy: 0.4789 - val_loss: 1.4296 79/79 ━━━━━━━━━━━━━━━━━━━━ 39s 494ms/step - accuracy: 0.4762 - loss: 1.4368 Test accuracy: 47.89% ``` </div> --- ## Load the CIFAR-10-C dataset and evaluate performance ```python # Load and prepare the CIFAR-10-C dataset # (If it's not already downloaded, it takes ~10 minutes of time to download) cifar_10_c = tfds.load("cifar10_corrupted/saturate_5", split="test", as_supervised=True) cifar_10_c = cifar_10_c.batch(BATCH_SIZE).map( lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y), num_parallel_calls=AUTO, ) # Evaluate `rand_aug_model` _, test_acc = rand_aug_model.evaluate(cifar_10_c, verbose=0) print( "Accuracy with RandAugment on CIFAR-10-C (saturate_5): {:.2f}%".format( test_acc * 100 ) ) # Evaluate `simple_aug_model` _, test_acc = simple_aug_model.evaluate(cifar_10_c, verbose=0) print( "Accuracy with simple_aug on CIFAR-10-C (saturate_5): {:.2f}%".format( test_acc * 100 ) ) ``` <div class="k-default-codeblock"> ``` Downloading and preparing dataset 2.72 GiB (download: 2.72 GiB, generated: Unknown size, total: 2.72 GiB) to /home/sachinprasad/tensorflow_datasets/cifar10_corrupted/saturate_5/1.0.0... Dataset cifar10_corrupted downloaded and prepared to /home/sachinprasad/tensorflow_datasets/cifar10_corrupted/saturate_5/1.0.0. Subsequent calls will reuse this data. Accuracy with RandAugment on CIFAR-10-C (saturate_5): 30.36% Accuracy with simple_aug on CIFAR-10-C (saturate_5): 37.18% ``` </div> For the purpose of this example, we trained the models for only a single epoch. On the CIFAR-10-C dataset, the model with RandAugment can perform better with a higher accuracy (for example, 76.64% in one experiment) compared with the model trained with `simple_aug` (e.g., 64.80%). RandAugment can also help stabilize the training. In the notebook, you may notice that, at the expense of increased training time with RandAugment, we are able to carve out far better performance on the CIFAR-10-C dataset. You can experiment on the other corruption and perturbation settings that come with the run the same CIFAR-10-C dataset and see if RandAugment helps. You can also experiment with the different values of `n` and `m` in the `RandAugment` object. In the [original paper](https://arxiv.org/abs/1909.13719), the authors show the impact of the individual augmentation transforms for a particular task and a range of ablation studies. You are welcome to check them out. RandAugment has shown great progress in improving the robustness of deep models for computer vision as shown in works like [Noisy Student Training](https://arxiv.org/abs/1911.04252) and [FixMatch](https://arxiv.org/abs/2001.07685). This makes RandAugment quite a useful recipe for training different vision models. You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/randaugment) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/randaugment).
keras-io/examples/vision/md/randaugment.md/0
{ "file_path": "keras-io/examples/vision/md/randaugment.md", "repo_id": "keras-io", "token_count": 5350 }
125
# Video Classification with a CNN-RNN Architecture **Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br> **Date created:** 2021/05/28<br> **Last modified:** 2023/12/08<br> **Description:** Training a video classifier with transfer learning and a recurrent model on the UCF101 dataset. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/video_classification.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/video_classification.py) This example demonstrates video classification, an important use-case with applications in recommendations, security, and so on. We will be using the [UCF101 dataset](https://www.crcv.ucf.edu/data/UCF101.php) to build our video classifier. The dataset consists of videos categorized into different actions, like cricket shot, punching, biking, etc. This dataset is commonly used to build action recognizers, which are an application of video classification. A video consists of an ordered sequence of frames. Each frame contains *spatial* information, and the sequence of those frames contains *temporal* information. To model both of these aspects, we use a hybrid architecture that consists of convolutions (for spatial processing) as well as recurrent layers (for temporal processing). Specifically, we'll use a Convolutional Neural Network (CNN) and a Recurrent Neural Network (RNN) consisting of [GRU layers](https://keras.io/api/layers/recurrent_layers/gru/). This kind of hybrid architecture is popularly known as a **CNN-RNN**. This example requires TensorFlow 2.5 or higher, as well as TensorFlow Docs, which can be installed using the following command: ```python !pip install -q git+https://github.com/tensorflow/docs ``` --- ## Data collection In order to keep the runtime of this example relatively short, we will be using a subsampled version of the original UCF101 dataset. You can refer to [this notebook](https://colab.research.google.com/github/sayakpaul/Action-Recognition-in-TensorFlow/blob/main/Data_Preparation_UCF101.ipynb) to know how the subsampling was done. ```python !!wget -q https://github.com/sayakpaul/Action-Recognition-in-TensorFlow/releases/download/v1.0.0/ucf101_top5.tar.gz !tar xf ucf101_top5.tar.gz ``` --- ## Setup ```python import os import keras from imutils import paths import matplotlib.pyplot as plt import pandas as pd import numpy as np import imageio import cv2 from IPython.display import Image ``` --- ## Define hyperparameters ```python IMG_SIZE = 224 BATCH_SIZE = 64 EPOCHS = 10 MAX_SEQ_LENGTH = 20 NUM_FEATURES = 2048 ``` --- ## Data preparation ```python train_df = pd.read_csv("train.csv") test_df = pd.read_csv("test.csv") print(f"Total videos for training: {len(train_df)}") print(f"Total videos for testing: {len(test_df)}") train_df.sample(10) ``` <div class="k-default-codeblock"> ``` Total videos for training: 594 Total videos for testing: 224 ``` </div> <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } <div class="k-default-codeblock"> ``` .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } ``` </div> </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>video_name</th> <th>tag</th> </tr> </thead> <tbody> <tr> <th>492</th> <td>v_TennisSwing_g10_c03.avi</td> <td>TennisSwing</td> </tr> <tr> <th>536</th> <td>v_TennisSwing_g16_c05.avi</td> <td>TennisSwing</td> </tr> <tr> <th>413</th> <td>v_ShavingBeard_g16_c05.avi</td> <td>ShavingBeard</td> </tr> <tr> <th>268</th> <td>v_Punch_g12_c04.avi</td> <td>Punch</td> </tr> <tr> <th>288</th> <td>v_Punch_g15_c03.avi</td> <td>Punch</td> </tr> <tr> <th>30</th> <td>v_CricketShot_g12_c03.avi</td> <td>CricketShot</td> </tr> <tr> <th>449</th> <td>v_ShavingBeard_g21_c07.avi</td> <td>ShavingBeard</td> </tr> <tr> <th>524</th> <td>v_TennisSwing_g14_c07.avi</td> <td>TennisSwing</td> </tr> <tr> <th>145</th> <td>v_PlayingCello_g12_c01.avi</td> <td>PlayingCello</td> </tr> <tr> <th>566</th> <td>v_TennisSwing_g21_c03.avi</td> <td>TennisSwing</td> </tr> </tbody> </table> </div> One of the many challenges of training video classifiers is figuring out a way to feed the videos to a network. [This blog post](https://blog.coast.ai/five-video-classification-methods-implemented-in-keras-and-tensorflow-99cad29cc0b5) discusses five such methods. Since a video is an ordered sequence of frames, we could just extract the frames and put them in a 3D tensor. But the number of frames may differ from video to video which would prevent us from stacking them into batches (unless we use padding). As an alternative, we can **save video frames at a fixed interval until a maximum frame count is reached**. In this example we will do the following: 1. Capture the frames of a video. 2. Extract frames from the videos until a maximum frame count is reached. 3. In the case, where a video's frame count is lesser than the maximum frame count we will pad the video with zeros. Note that this workflow is identical to [problems involving texts sequences](https://developers.google.com/machine-learning/guides/text-classification/). Videos of the UCF101 dataset is [known](https://www.crcv.ucf.edu/papers/UCF101_CRCV-TR-12-01.pdf) to not contain extreme variations in objects and actions across frames. Because of this, it may be okay to only consider a few frames for the learning task. But this approach may not generalize well to other video classification problems. We will be using [OpenCV's `VideoCapture()` method](https://docs.opencv.org/master/dd/d43/tutorial_py_video_display.html) to read frames from videos. ```python # The following two methods are taken from this tutorial: # https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub def crop_center_square(frame): y, x = frame.shape[0:2] min_dim = min(y, x) start_x = (x // 2) - (min_dim // 2) start_y = (y // 2) - (min_dim // 2) return frame[start_y : start_y + min_dim, start_x : start_x + min_dim] def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE)): cap = cv2.VideoCapture(path) frames = [] try: while True: ret, frame = cap.read() if not ret: break frame = crop_center_square(frame) frame = cv2.resize(frame, resize) frame = frame[:, :, [2, 1, 0]] frames.append(frame) if len(frames) == max_frames: break finally: cap.release() return np.array(frames) ``` We can use a pre-trained network to extract meaningful features from the extracted frames. The [`Keras Applications`](https://keras.io/api/applications/) module provides a number of state-of-the-art models pre-trained on the [ImageNet-1k dataset](http://image-net.org/). We will be using the [InceptionV3 model](https://arxiv.org/abs/1512.00567) for this purpose. ```python def build_feature_extractor(): feature_extractor = keras.applications.InceptionV3( weights="imagenet", include_top=False, pooling="avg", input_shape=(IMG_SIZE, IMG_SIZE, 3), ) preprocess_input = keras.applications.inception_v3.preprocess_input inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3)) preprocessed = preprocess_input(inputs) outputs = feature_extractor(preprocessed) return keras.Model(inputs, outputs, name="feature_extractor") feature_extractor = build_feature_extractor() ``` The labels of the videos are strings. Neural networks do not understand string values, so they must be converted to some numerical form before they are fed to the model. Here we will use the [`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup) layer encode the class labels as integers. ```python label_processor = keras.layers.StringLookup( num_oov_indices=0, vocabulary=np.unique(train_df["tag"]) ) print(label_processor.get_vocabulary()) ``` <div class="k-default-codeblock"> ``` ['CricketShot', 'PlayingCello', 'Punch', 'ShavingBeard', 'TennisSwing'] ``` </div> Finally, we can put all the pieces together to create our data processing utility. ```python def prepare_all_videos(df, root_dir): num_samples = len(df) video_paths = df["video_name"].values.tolist() labels = df["tag"].values labels = keras.ops.convert_to_numpy(label_processor(labels[..., None])) # `frame_masks` and `frame_features` are what we will feed to our sequence model. # `frame_masks` will contain a bunch of booleans denoting if a timestep is # masked with padding or not. frame_masks = np.zeros(shape=(num_samples, MAX_SEQ_LENGTH), dtype="bool") frame_features = np.zeros( shape=(num_samples, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32" ) # For each video. for idx, path in enumerate(video_paths): # Gather all its frames and add a batch dimension. frames = load_video(os.path.join(root_dir, path)) frames = frames[None, ...] # Initialize placeholders to store the masks and features of the current video. temp_frame_mask = np.zeros( shape=( 1, MAX_SEQ_LENGTH, ), dtype="bool", ) temp_frame_features = np.zeros( shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32" ) # Extract features from the frames of the current video. for i, batch in enumerate(frames): video_length = batch.shape[0] length = min(MAX_SEQ_LENGTH, video_length) for j in range(length): temp_frame_features[i, j, :] = feature_extractor.predict( batch[None, j, :], verbose=0, ) temp_frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked frame_features[idx,] = temp_frame_features.squeeze() frame_masks[idx,] = temp_frame_mask.squeeze() return (frame_features, frame_masks), labels train_data, train_labels = prepare_all_videos(train_df, "train") test_data, test_labels = prepare_all_videos(test_df, "test") print(f"Frame features in train set: {train_data[0].shape}") print(f"Frame masks in train set: {train_data[1].shape}") ``` <div class="k-default-codeblock"> ``` Frame features in train set: (594, 20, 2048) Frame masks in train set: (594, 20) ``` </div> The above code block will take ~20 minutes to execute depending on the machine it's being executed. --- ## The sequence model Now, we can feed this data to a sequence model consisting of recurrent layers like `GRU`. ```python # Utility for our sequence model. def get_sequence_model(): class_vocab = label_processor.get_vocabulary() frame_features_input = keras.Input((MAX_SEQ_LENGTH, NUM_FEATURES)) mask_input = keras.Input((MAX_SEQ_LENGTH,), dtype="bool") # Refer to the following tutorial to understand the significance of using `mask`: # https://keras.io/api/layers/recurrent_layers/gru/ x = keras.layers.GRU(16, return_sequences=True)( frame_features_input, mask=mask_input ) x = keras.layers.GRU(8)(x) x = keras.layers.Dropout(0.4)(x) x = keras.layers.Dense(8, activation="relu")(x) output = keras.layers.Dense(len(class_vocab), activation="softmax")(x) rnn_model = keras.Model([frame_features_input, mask_input], output) rnn_model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) return rnn_model # Utility for running experiments. def run_experiment(): filepath = "/tmp/video_classifier/ckpt.weights.h5" checkpoint = keras.callbacks.ModelCheckpoint( filepath, save_weights_only=True, save_best_only=True, verbose=1 ) seq_model = get_sequence_model() history = seq_model.fit( [train_data[0], train_data[1]], train_labels, validation_split=0.3, epochs=EPOCHS, callbacks=[checkpoint], ) seq_model.load_weights(filepath) _, accuracy = seq_model.evaluate([test_data[0], test_data[1]], test_labels) print(f"Test accuracy: {round(accuracy * 100, 2)}%") return history, seq_model _, sequence_model = run_experiment() ``` <div class="k-default-codeblock"> ``` Epoch 1/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.3058 - loss: 1.5597 Epoch 1: val_loss improved from inf to 1.78077, saving model to /tmp/video_classifier/ckpt.weights.h5 13/13 ━━━━━━━━━━━━━━━━━━━━ 2s 36ms/step - accuracy: 0.3127 - loss: 1.5531 - val_accuracy: 0.1397 - val_loss: 1.7808 Epoch 2/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.5216 - loss: 1.2704 Epoch 2: val_loss improved from 1.78077 to 1.78026, saving model to /tmp/video_classifier/ckpt.weights.h5 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - accuracy: 0.5226 - loss: 1.2684 - val_accuracy: 0.1788 - val_loss: 1.7803 Epoch 3/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.6189 - loss: 1.1656 Epoch 3: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.6174 - loss: 1.1651 - val_accuracy: 0.2849 - val_loss: 1.8322 Epoch 4/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.6518 - loss: 1.0645 Epoch 4: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - accuracy: 0.6515 - loss: 1.0647 - val_accuracy: 0.2793 - val_loss: 2.0419 Epoch 5/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.6833 - loss: 0.9976 Epoch 5: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.6843 - loss: 0.9965 - val_accuracy: 0.3073 - val_loss: 1.9077 Epoch 6/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.7229 - loss: 0.9312 Epoch 6: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.7241 - loss: 0.9305 - val_accuracy: 0.3017 - val_loss: 2.1513 Epoch 7/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.8023 - loss: 0.9132 Epoch 7: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.8035 - loss: 0.9093 - val_accuracy: 0.3184 - val_loss: 2.1705 Epoch 8/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.8127 - loss: 0.8380 Epoch 8: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.8128 - loss: 0.8356 - val_accuracy: 0.3296 - val_loss: 2.2043 Epoch 9/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.8494 - loss: 0.7641 Epoch 9: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.8494 - loss: 0.7622 - val_accuracy: 0.3017 - val_loss: 2.3734 Epoch 10/10 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.8634 - loss: 0.6883 Epoch 10: val_loss did not improve from 1.78026 13/13 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - accuracy: 0.8649 - loss: 0.6882 - val_accuracy: 0.3240 - val_loss: 2.4410 7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.7816 - loss: 1.0624 Test accuracy: 56.7% ``` </div> **Note**: To keep the runtime of this example relatively short, we just used a few training examples. This number of training examples is low with respect to the sequence model being used that has 99,909 trainable parameters. You are encouraged to sample more data from the UCF101 dataset using [the notebook](https://colab.research.google.com/github/sayakpaul/Action-Recognition-in-TensorFlow/blob/main/Data_Preparation_UCF101.ipynb) mentioned above and train the same model. --- ## Inference ```python def prepare_single_video(frames): frames = frames[None, ...] frame_mask = np.zeros( shape=( 1, MAX_SEQ_LENGTH, ), dtype="bool", ) frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32") for i, batch in enumerate(frames): video_length = batch.shape[0] length = min(MAX_SEQ_LENGTH, video_length) for j in range(length): frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :]) frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked return frame_features, frame_mask def sequence_prediction(path): class_vocab = label_processor.get_vocabulary() frames = load_video(os.path.join("test", path)) frame_features, frame_mask = prepare_single_video(frames) probabilities = sequence_model.predict([frame_features, frame_mask])[0] for i in np.argsort(probabilities)[::-1]: print(f" {class_vocab[i]}: {probabilities[i] * 100:5.2f}%") return frames # This utility is for visualization. # Referenced from: # https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub def to_gif(images): converted_images = images.astype(np.uint8) imageio.mimsave("animation.gif", converted_images, duration=100) return Image("animation.gif") test_video = np.random.choice(test_df["video_name"].values.tolist()) print(f"Test video path: {test_video}") test_frames = sequence_prediction(test_video) to_gif(test_frames[:MAX_SEQ_LENGTH]) ``` <div class="k-default-codeblock"> ``` Test video path: v_TennisSwing_g03_c01.avi 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 35ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 35ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 32ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 166ms/step CricketShot: 46.99% ShavingBeard: 18.83% TennisSwing: 14.65% Punch: 12.41% PlayingCello: 7.12% <IPython.core.display.Image object> ``` </div> --- ## Next steps * In this example, we made use of transfer learning for extracting meaningful features from video frames. You could also fine-tune the pre-trained network to notice how that affects the end results. * For speed-accuracy trade-offs, you can try out other models present inside `keras.applications`. * Try different combinations of `MAX_SEQ_LENGTH` to observe how that affects the performance. * Train on a higher number of classes and see if you are able to get good performance. * Following [this tutorial](https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub), try a [pre-trained action recognition model](https://arxiv.org/abs/1705.07750) from DeepMind. * Rolling-averaging can be useful technique for video classification and it can be combined with a standard image classification model to infer on videos. [This tutorial](https://www.pyimagesearch.com/2019/07/15/video-classification-with-keras-and-deep-learning/) will help understand how to use rolling-averaging with an image classifier. * When there are variations in between the frames of a video not all the frames might be equally important to decide its category. In those situations, putting a [self-attention layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Attention) in the sequence model will likely yield better results. * Following [this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-11), you can implement Transformers-based models for processing videos.
keras-io/examples/vision/md/video_classification.md/0
{ "file_path": "keras-io/examples/vision/md/video_classification.md", "repo_id": "keras-io", "token_count": 8378 }
126
""" Title: 3D volumetric rendering with NeRF Authors: [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Ritwik Raha](https://twitter.com/ritwik_raha) Date created: 2021/08/09 Last modified: 2023/11/13 Description: Minimal implementation of volumetric rendering as shown in NeRF. Accelerator: GPU """ """ ## Introduction In this example, we present a minimal implementation of the research paper [**NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis**](https://arxiv.org/abs/2003.08934) by Ben Mildenhall et. al. The authors have proposed an ingenious way to *synthesize novel views of a scene* by modelling the *volumetric scene function* through a neural network. To help you understand this intuitively, let's start with the following question: *would it be possible to give to a neural network the position of a pixel in an image, and ask the network to predict the color at that position?* | ![2d-train](https://i.imgur.com/DQM92vN.png) | | :---: | | **Figure 1**: A neural network being given coordinates of an image as input and asked to predict the color at the coordinates. | The neural network would hypothetically *memorize* (overfit on) the image. This means that our neural network would have encoded the entire image in its weights. We could query the neural network with each position, and it would eventually reconstruct the entire image. | ![2d-test](https://i.imgur.com/6Qz5Hp1.png) | | :---: | | **Figure 2**: The trained neural network recreates the image from scratch. | A question now arises, how do we extend this idea to learn a 3D volumetric scene? Implementing a similar process as above would require the knowledge of every voxel (volume pixel). Turns out, this is quite a challenging task to do. The authors of the paper propose a minimal and elegant way to learn a 3D scene using a few images of the scene. They discard the use of voxels for training. The network learns to model the volumetric scene, thus generating novel views (images) of the 3D scene that the model was not shown at training time. There are a few prerequisites one needs to understand to fully appreciate the process. We structure the example in such a way that you will have all the required knowledge before starting the implementation. """ """ ## Setup """ import os os.environ["KERAS_BACKEND"] = "tensorflow" # Setting random seed to obtain reproducible results. import tensorflow as tf tf.random.set_seed(42) import keras from keras import layers import os import glob import imageio.v2 as imageio import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt # Initialize global variables. AUTO = tf.data.AUTOTUNE BATCH_SIZE = 5 NUM_SAMPLES = 32 POS_ENCODE_DIMS = 16 EPOCHS = 20 """ ## Download and load the data The `npz` data file contains images, camera poses, and a focal length. The images are taken from multiple camera angles as shown in **Figure 3**. | ![camera-angles](https://i.imgur.com/FLsi2is.png) | | :---: | | **Figure 3**: Multiple camera angles <br> [Source: NeRF](https://arxiv.org/abs/2003.08934) | To understand camera poses in this context we have to first allow ourselves to think that a *camera is a mapping between the real-world and the 2-D image*. | ![mapping](https://www.mathworks.com/help/vision/ug/calibration_coordinate_blocks.png) | | :---: | | **Figure 4**: 3-D world to 2-D image mapping through a camera <br> [Source: Mathworks](https://www.mathworks.com/help/vision/ug/camera-calibration.html) | Consider the following equation: <img src="https://i.imgur.com/TQHKx5v.pngg" width="100" height="50"/> Where **x** is the 2-D image point, **X** is the 3-D world point and **P** is the camera-matrix. **P** is a 3 x 4 matrix that plays the crucial role of mapping the real world object onto an image plane. <img src="https://i.imgur.com/chvJct5.png" width="300" height="100"/> The camera-matrix is an *affine transform matrix* that is concatenated with a 3 x 1 column `[image height, image width, focal length]` to produce the *pose matrix*. This matrix is of dimensions 3 x 5 where the first 3 x 3 block is in the camera’s point of view. The axes are `[down, right, backwards]` or `[-y, x, z]` where the camera is facing forwards `-z`. | ![camera-mapping](https://i.imgur.com/kvjqbiO.png) | | :---: | | **Figure 5**: The affine transformation. | The COLMAP frame is `[right, down, forwards]` or `[x, -y, -z]`. Read more about COLMAP [here](https://colmap.github.io/). """ # Download the data if it does not already exist. url = ( "http://cseweb.ucsd.edu/~viscomp/projects/LF/papers/ECCV20/nerf/tiny_nerf_data.npz" ) data = keras.utils.get_file(origin=url) data = np.load(data) images = data["images"] im_shape = images.shape (num_images, H, W, _) = images.shape (poses, focal) = (data["poses"], data["focal"]) # Plot a random image from the dataset for visualization. plt.imshow(images[np.random.randint(low=0, high=num_images)]) plt.show() """ ## Data pipeline Now that you've understood the notion of camera matrix and the mapping from a 3D scene to 2D images, let's talk about the inverse mapping, i.e. from 2D image to the 3D scene. We'll need to talk about volumetric rendering with ray casting and tracing, which are common computer graphics techniques. This section will help you get to speed with these techniques. Consider an image with `N` pixels. We shoot a ray through each pixel and sample some points on the ray. A ray is commonly parameterized by the equation `r(t) = o + td` where `t` is the parameter, `o` is the origin and `d` is the unit directional vector as shown in **Figure 6**. | ![img](https://i.imgur.com/ywrqlzt.gif) | | :---: | | **Figure 6**: `r(t) = o + td` where t is 3 | In **Figure 7**, we consider a ray, and we sample some random points on the ray. These sample points each have a unique location `(x, y, z)` and the ray has a viewing angle `(theta, phi)`. The viewing angle is particularly interesting as we can shoot a ray through a single pixel in a lot of different ways, each with a unique viewing angle. Another interesting thing to notice here is the noise that is added to the sampling process. We add a uniform noise to each sample so that the samples correspond to a continuous distribution. In **Figure 7** the blue points are the evenly distributed samples and the white points `(t1, t2, t3)` are randomly placed between the samples. | ![img](https://i.imgur.com/r9TS2wv.gif) | | :---: | | **Figure 7**: Sampling the points from a ray. | **Figure 8** showcases the entire sampling process in 3D, where you can see the rays coming out of the white image. This means that each pixel will have its corresponding rays and each ray will be sampled at distinct points. | ![3-d rays](https://i.imgur.com/hr4D2g2.gif) | | :---: | | **Figure 8**: Shooting rays from all the pixels of an image in 3-D | These sampled points act as the input to the NeRF model. The model is then asked to predict the RGB color and the volume density at that point. | ![3-Drender](https://i.imgur.com/HHb6tlQ.png) | | :---: | | **Figure 9**: Data pipeline <br> [Source: NeRF](https://arxiv.org/abs/2003.08934) | """ def encode_position(x): """Encodes the position into its corresponding Fourier feature. Args: x: The input coordinate. Returns: Fourier features tensors of the position. """ positions = [x] for i in range(POS_ENCODE_DIMS): for fn in [tf.sin, tf.cos]: positions.append(fn(2.0**i * x)) return tf.concat(positions, axis=-1) def get_rays(height, width, focal, pose): """Computes origin point and direction vector of rays. Args: height: Height of the image. width: Width of the image. focal: The focal length between the images and the camera. pose: The pose matrix of the camera. Returns: Tuple of origin point and direction vector for rays. """ # Build a meshgrid for the rays. i, j = tf.meshgrid( tf.range(width, dtype=tf.float32), tf.range(height, dtype=tf.float32), indexing="xy", ) # Normalize the x axis coordinates. transformed_i = (i - width * 0.5) / focal # Normalize the y axis coordinates. transformed_j = (j - height * 0.5) / focal # Create the direction unit vectors. directions = tf.stack([transformed_i, -transformed_j, -tf.ones_like(i)], axis=-1) # Get the camera matrix. camera_matrix = pose[:3, :3] height_width_focal = pose[:3, -1] # Get origins and directions for the rays. transformed_dirs = directions[..., None, :] camera_dirs = transformed_dirs * camera_matrix ray_directions = tf.reduce_sum(camera_dirs, axis=-1) ray_origins = tf.broadcast_to(height_width_focal, tf.shape(ray_directions)) # Return the origins and directions. return (ray_origins, ray_directions) def render_flat_rays(ray_origins, ray_directions, near, far, num_samples, rand=False): """Renders the rays and flattens it. Args: ray_origins: The origin points for rays. ray_directions: The direction unit vectors for the rays. near: The near bound of the volumetric scene. far: The far bound of the volumetric scene. num_samples: Number of sample points in a ray. rand: Choice for randomising the sampling strategy. Returns: Tuple of flattened rays and sample points on each rays. """ # Compute 3D query points. # Equation: r(t) = o+td -> Building the "t" here. t_vals = tf.linspace(near, far, num_samples) if rand: # Inject uniform noise into sample space to make the sampling # continuous. shape = list(ray_origins.shape[:-1]) + [num_samples] noise = tf.random.uniform(shape=shape) * (far - near) / num_samples t_vals = t_vals + noise # Equation: r(t) = o + td -> Building the "r" here. rays = ray_origins[..., None, :] + ( ray_directions[..., None, :] * t_vals[..., None] ) rays_flat = tf.reshape(rays, [-1, 3]) rays_flat = encode_position(rays_flat) return (rays_flat, t_vals) def map_fn(pose): """Maps individual pose to flattened rays and sample points. Args: pose: The pose matrix of the camera. Returns: Tuple of flattened rays and sample points corresponding to the camera pose. """ (ray_origins, ray_directions) = get_rays(height=H, width=W, focal=focal, pose=pose) (rays_flat, t_vals) = render_flat_rays( ray_origins=ray_origins, ray_directions=ray_directions, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=True, ) return (rays_flat, t_vals) # Create the training split. split_index = int(num_images * 0.8) # Split the images into training and validation. train_images = images[:split_index] val_images = images[split_index:] # Split the poses into training and validation. train_poses = poses[:split_index] val_poses = poses[split_index:] # Make the training pipeline. train_img_ds = tf.data.Dataset.from_tensor_slices(train_images) train_pose_ds = tf.data.Dataset.from_tensor_slices(train_poses) train_ray_ds = train_pose_ds.map(map_fn, num_parallel_calls=AUTO) training_ds = tf.data.Dataset.zip((train_img_ds, train_ray_ds)) train_ds = ( training_ds.shuffle(BATCH_SIZE) .batch(BATCH_SIZE, drop_remainder=True, num_parallel_calls=AUTO) .prefetch(AUTO) ) # Make the validation pipeline. val_img_ds = tf.data.Dataset.from_tensor_slices(val_images) val_pose_ds = tf.data.Dataset.from_tensor_slices(val_poses) val_ray_ds = val_pose_ds.map(map_fn, num_parallel_calls=AUTO) validation_ds = tf.data.Dataset.zip((val_img_ds, val_ray_ds)) val_ds = ( validation_ds.shuffle(BATCH_SIZE) .batch(BATCH_SIZE, drop_remainder=True, num_parallel_calls=AUTO) .prefetch(AUTO) ) """ ## NeRF model The model is a multi-layer perceptron (MLP), with ReLU as its non-linearity. An excerpt from the paper: *"We encourage the representation to be multiview-consistent by restricting the network to predict the volume density sigma as a function of only the location `x`, while allowing the RGB color `c` to be predicted as a function of both location and viewing direction. To accomplish this, the MLP first processes the input 3D coordinate `x` with 8 fully-connected layers (using ReLU activations and 256 channels per layer), and outputs sigma and a 256-dimensional feature vector. This feature vector is then concatenated with the camera ray's viewing direction and passed to one additional fully-connected layer (using a ReLU activation and 128 channels) that output the view-dependent RGB color."* Here we have gone for a minimal implementation and have used 64 Dense units instead of 256 as mentioned in the paper. """ def get_nerf_model(num_layers, num_pos): """Generates the NeRF neural network. Args: num_layers: The number of MLP layers. num_pos: The number of dimensions of positional encoding. Returns: The `keras` model. """ inputs = keras.Input(shape=(num_pos, 2 * 3 * POS_ENCODE_DIMS + 3)) x = inputs for i in range(num_layers): x = layers.Dense(units=64, activation="relu")(x) if i % 4 == 0 and i > 0: # Inject residual connection. x = layers.concatenate([x, inputs], axis=-1) outputs = layers.Dense(units=4)(x) return keras.Model(inputs=inputs, outputs=outputs) def render_rgb_depth(model, rays_flat, t_vals, rand=True, train=True): """Generates the RGB image and depth map from model prediction. Args: model: The MLP model that is trained to predict the rgb and volume density of the volumetric scene. rays_flat: The flattened rays that serve as the input to the NeRF model. t_vals: The sample points for the rays. rand: Choice to randomise the sampling strategy. train: Whether the model is in the training or testing phase. Returns: Tuple of rgb image and depth map. """ # Get the predictions from the nerf model and reshape it. if train: predictions = model(rays_flat) else: predictions = model.predict(rays_flat) predictions = tf.reshape(predictions, shape=(BATCH_SIZE, H, W, NUM_SAMPLES, 4)) # Slice the predictions into rgb and sigma. rgb = tf.sigmoid(predictions[..., :-1]) sigma_a = tf.nn.relu(predictions[..., -1]) # Get the distance of adjacent intervals. delta = t_vals[..., 1:] - t_vals[..., :-1] # delta shape = (num_samples) if rand: delta = tf.concat( [delta, tf.broadcast_to([1e10], shape=(BATCH_SIZE, H, W, 1))], axis=-1 ) alpha = 1.0 - tf.exp(-sigma_a * delta) else: delta = tf.concat( [delta, tf.broadcast_to([1e10], shape=(BATCH_SIZE, 1))], axis=-1 ) alpha = 1.0 - tf.exp(-sigma_a * delta[:, None, None, :]) # Get transmittance. exp_term = 1.0 - alpha epsilon = 1e-10 transmittance = tf.math.cumprod(exp_term + epsilon, axis=-1, exclusive=True) weights = alpha * transmittance rgb = tf.reduce_sum(weights[..., None] * rgb, axis=-2) if rand: depth_map = tf.reduce_sum(weights * t_vals, axis=-1) else: depth_map = tf.reduce_sum(weights * t_vals[:, None, None], axis=-1) return (rgb, depth_map) """ ## Training The training step is implemented as part of a custom `keras.Model` subclass so that we can make use of the `model.fit` functionality. """ class NeRF(keras.Model): def __init__(self, nerf_model): super().__init__() self.nerf_model = nerf_model def compile(self, optimizer, loss_fn): super().compile() self.optimizer = optimizer self.loss_fn = loss_fn self.loss_tracker = keras.metrics.Mean(name="loss") self.psnr_metric = keras.metrics.Mean(name="psnr") def train_step(self, inputs): # Get the images and the rays. (images, rays) = inputs (rays_flat, t_vals) = rays with tf.GradientTape() as tape: # Get the predictions from the model. rgb, _ = render_rgb_depth( model=self.nerf_model, rays_flat=rays_flat, t_vals=t_vals, rand=True ) loss = self.loss_fn(images, rgb) # Get the trainable variables. trainable_variables = self.nerf_model.trainable_variables # Get the gradeints of the trainiable variables with respect to the loss. gradients = tape.gradient(loss, trainable_variables) # Apply the grads and optimize the model. self.optimizer.apply_gradients(zip(gradients, trainable_variables)) # Get the PSNR of the reconstructed images and the source images. psnr = tf.image.psnr(images, rgb, max_val=1.0) # Compute our own metrics self.loss_tracker.update_state(loss) self.psnr_metric.update_state(psnr) return {"loss": self.loss_tracker.result(), "psnr": self.psnr_metric.result()} def test_step(self, inputs): # Get the images and the rays. (images, rays) = inputs (rays_flat, t_vals) = rays # Get the predictions from the model. rgb, _ = render_rgb_depth( model=self.nerf_model, rays_flat=rays_flat, t_vals=t_vals, rand=True ) loss = self.loss_fn(images, rgb) # Get the PSNR of the reconstructed images and the source images. psnr = tf.image.psnr(images, rgb, max_val=1.0) # Compute our own metrics self.loss_tracker.update_state(loss) self.psnr_metric.update_state(psnr) return {"loss": self.loss_tracker.result(), "psnr": self.psnr_metric.result()} @property def metrics(self): return [self.loss_tracker, self.psnr_metric] test_imgs, test_rays = next(iter(train_ds)) test_rays_flat, test_t_vals = test_rays loss_list = [] class TrainMonitor(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): loss = logs["loss"] loss_list.append(loss) test_recons_images, depth_maps = render_rgb_depth( model=self.model.nerf_model, rays_flat=test_rays_flat, t_vals=test_t_vals, rand=True, train=False, ) # Plot the rgb, depth and the loss plot. fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(20, 5)) ax[0].imshow(keras.utils.array_to_img(test_recons_images[0])) ax[0].set_title(f"Predicted Image: {epoch:03d}") ax[1].imshow(keras.utils.array_to_img(depth_maps[0, ..., None])) ax[1].set_title(f"Depth Map: {epoch:03d}") ax[2].plot(loss_list) ax[2].set_xticks(np.arange(0, EPOCHS + 1, 5.0)) ax[2].set_title(f"Loss Plot: {epoch:03d}") fig.savefig(f"images/{epoch:03d}.png") plt.show() plt.close() num_pos = H * W * NUM_SAMPLES nerf_model = get_nerf_model(num_layers=8, num_pos=num_pos) model = NeRF(nerf_model) model.compile( optimizer=keras.optimizers.Adam(), loss_fn=keras.losses.MeanSquaredError() ) # Create a directory to save the images during training. if not os.path.exists("images"): os.makedirs("images") model.fit( train_ds, validation_data=val_ds, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=[TrainMonitor()], ) def create_gif(path_to_images, name_gif): filenames = glob.glob(path_to_images) filenames = sorted(filenames) images = [] for filename in tqdm(filenames): images.append(imageio.imread(filename)) kargs = {"duration": 0.25} imageio.mimsave(name_gif, images, "GIF", **kargs) create_gif("images/*.png", "training.gif") """ ## Visualize the training step Here we see the training step. With the decreasing loss, the rendered image and the depth maps are getting better. In your local system, you will see the `training.gif` file generated. ![training-20](https://i.imgur.com/ql5OcYA.gif) """ """ ## Inference In this section, we ask the model to build novel views of the scene. The model was given `106` views of the scene in the training step. The collections of training images cannot contain each and every angle of the scene. A trained model can represent the entire 3-D scene with a sparse set of training images. Here we provide different poses to the model and ask for it to give us the 2-D image corresponding to that camera view. If we infer the model for all the 360-degree views, it should provide an overview of the entire scenery from all around. """ # Get the trained NeRF model and infer. nerf_model = model.nerf_model test_recons_images, depth_maps = render_rgb_depth( model=nerf_model, rays_flat=test_rays_flat, t_vals=test_t_vals, rand=True, train=False, ) # Create subplots. fig, axes = plt.subplots(nrows=5, ncols=3, figsize=(10, 20)) for ax, ori_img, recons_img, depth_map in zip( axes, test_imgs, test_recons_images, depth_maps ): ax[0].imshow(keras.utils.array_to_img(ori_img)) ax[0].set_title("Original") ax[1].imshow(keras.utils.array_to_img(recons_img)) ax[1].set_title("Reconstructed") ax[2].imshow(keras.utils.array_to_img(depth_map[..., None]), cmap="inferno") ax[2].set_title("Depth Map") """ ## Render 3D Scene Here we will synthesize novel 3D views and stitch all of them together to render a video encompassing the 360-degree view. """ def get_translation_t(t): """Get the translation matrix for movement in t.""" matrix = [ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], [0, 0, 0, 1], ] return tf.convert_to_tensor(matrix, dtype=tf.float32) def get_rotation_phi(phi): """Get the rotation matrix for movement in phi.""" matrix = [ [1, 0, 0, 0], [0, tf.cos(phi), -tf.sin(phi), 0], [0, tf.sin(phi), tf.cos(phi), 0], [0, 0, 0, 1], ] return tf.convert_to_tensor(matrix, dtype=tf.float32) def get_rotation_theta(theta): """Get the rotation matrix for movement in theta.""" matrix = [ [tf.cos(theta), 0, -tf.sin(theta), 0], [0, 1, 0, 0], [tf.sin(theta), 0, tf.cos(theta), 0], [0, 0, 0, 1], ] return tf.convert_to_tensor(matrix, dtype=tf.float32) def pose_spherical(theta, phi, t): """ Get the camera to world matrix for the corresponding theta, phi and t. """ c2w = get_translation_t(t) c2w = get_rotation_phi(phi / 180.0 * np.pi) @ c2w c2w = get_rotation_theta(theta / 180.0 * np.pi) @ c2w c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w return c2w rgb_frames = [] batch_flat = [] batch_t = [] # Iterate over different theta value and generate scenes. for index, theta in tqdm(enumerate(np.linspace(0.0, 360.0, 120, endpoint=False))): # Get the camera to world matrix. c2w = pose_spherical(theta, -30.0, 4.0) # ray_oris, ray_dirs = get_rays(H, W, focal, c2w) rays_flat, t_vals = render_flat_rays( ray_oris, ray_dirs, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=False ) if index % BATCH_SIZE == 0 and index > 0: batched_flat = tf.stack(batch_flat, axis=0) batch_flat = [rays_flat] batched_t = tf.stack(batch_t, axis=0) batch_t = [t_vals] rgb, _ = render_rgb_depth( nerf_model, batched_flat, batched_t, rand=False, train=False ) temp_rgb = [np.clip(255 * img, 0.0, 255.0).astype(np.uint8) for img in rgb] rgb_frames = rgb_frames + temp_rgb else: batch_flat.append(rays_flat) batch_t.append(t_vals) rgb_video = "rgb_video.mp4" imageio.mimwrite(rgb_video, rgb_frames, fps=30, quality=7, macro_block_size=None) """ ### Visualize the video Here we can see the rendered 360 degree view of the scene. The model has successfully learned the entire volumetric space through the sparse set of images in **only 20 epochs**. You can view the rendered video saved locally, named `rgb_video.mp4`. ![rendered-video](https://i.imgur.com/j2sIkzW.gif) """ """ ## Conclusion We have produced a minimal implementation of NeRF to provide an intuition of its core ideas and methodology. This method has been used in various other works in the computer graphics space. We would like to encourage our readers to use this code as an example and play with the hyperparameters and visualize the outputs. Below we have also provided the outputs of the model trained for more epochs. | Epochs | GIF of the training step | | :--- | :---: | | **100** | ![100-epoch-training](https://i.imgur.com/2k9p8ez.gif) | | **200** | ![200-epoch-training](https://i.imgur.com/l3rG4HQ.gif) | ## Way forward If anyone is interested to go deeper into NeRF, we have built a 3-part blog series at [PyImageSearch](https://pyimagesearch.com/). - [Prerequisites of NeRF](https://www.pyimagesearch.com/2021/11/10/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-1/) - [Concepts of NeRF](https://www.pyimagesearch.com/2021/11/17/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-2/) - [Implementing NeRF](https://www.pyimagesearch.com/2021/11/24/computer-graphics-and-deep-learning-with-nerf-using-tensorflow-and-keras-part-3/) ## Reference - [NeRF repository](https://github.com/bmild/nerf): The official repository for NeRF. - [NeRF paper](https://arxiv.org/abs/2003.08934): The paper on NeRF. - [Manim Repository](https://github.com/3b1b/manim): We have used manim to build all the animations. - [Mathworks](https://www.mathworks.com/help/vision/ug/camera-calibration.html): Mathworks for the camera calibration article. - [Mathew's video](https://www.youtube.com/watch?v=dPWLybp4LL0): A great video on NeRF. You can try the model on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/NeRF). """
keras-io/examples/vision/nerf.py/0
{ "file_path": "keras-io/examples/vision/nerf.py", "repo_id": "keras-io", "token_count": 9868 }
127
""" Title: Semi-supervised image classification using contrastive pretraining with SimCLR Author: [András Béres](https://www.linkedin.com/in/andras-beres-789190210) Date created: 2021/04/24 Last modified: 2021/04/24 Description: Contrastive pretraining with SimCLR for semi-supervised image classification on the STL-10 dataset. Accelerator: GPU """ """ ## Introduction ### Semi-supervised learning Semi-supervised learning is a machine learning paradigm that deals with **partially labeled datasets**. When applying deep learning in the real world, one usually has to gather a large dataset to make it work well. However, while the cost of labeling scales linearly with the dataset size (labeling each example takes a constant time), model performance only scales [sublinearly](https://arxiv.org/abs/2001.08361) with it. This means that labeling more and more samples becomes less and less cost-efficient, while gathering unlabeled data is generally cheap, as it is usually readily available in large quantities. Semi-supervised learning offers to solve this problem by only requiring a partially labeled dataset, and by being label-efficient by utilizing the unlabeled examples for learning as well. In this example, we will pretrain an encoder with contrastive learning on the [STL-10](https://ai.stanford.edu/~acoates/stl10/) semi-supervised dataset using no labels at all, and then fine-tune it using only its labeled subset. ### Contrastive learning On the highest level, the main idea behind contrastive learning is to **learn representations that are invariant to image augmentations** in a self-supervised manner. One problem with this objective is that it has a trivial degenerate solution: the case where the representations are constant, and do not depend at all on the input images. Contrastive learning avoids this trap by modifying the objective in the following way: it pulls representations of augmented versions/views of the same image closer to each other (contracting positives), while simultaneously pushing different images away from each other (contrasting negatives) in representation space. One such contrastive approach is [SimCLR](https://arxiv.org/abs/2002.05709), which essentially identifies the core components needed to optimize this objective, and can achieve high performance by scaling this simple approach. Another approach is [SimSiam](https://arxiv.org/abs/2011.10566) ([Keras example](https://keras.io/examples/vision/simsiam/)), whose main difference from SimCLR is that the former does not use any negatives in its loss. Therefore, it does not explicitly prevent the trivial solution, and, instead, avoids it implicitly by architecture design (asymmetric encoding paths using a predictor network and batch normalization (BatchNorm) are applied in the final layers). For further reading about SimCLR, check out [the official Google AI blog post](https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html), and for an overview of self-supervised learning across both vision and language check out [this blog post](https://ai.facebook.com/blog/self-supervised-learning-the-dark-matter-of-intelligence/). """ """ ## Setup """ import os os.environ["KERAS_BACKEND"] = "tensorflow" # Make sure we are able to handle large datasets import resource low, high = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (high, high)) import math import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds import keras from keras import layers """ ## Hyperparameter setup """ # Dataset hyperparameters unlabeled_dataset_size = 100000 labeled_dataset_size = 5000 image_channels = 3 # Algorithm hyperparameters num_epochs = 20 batch_size = 525 # Corresponds to 200 steps per epoch width = 128 temperature = 0.1 # Stronger augmentations for contrastive, weaker ones for supervised training contrastive_augmentation = {"min_area": 0.25, "brightness": 0.6, "jitter": 0.2} classification_augmentation = { "min_area": 0.75, "brightness": 0.3, "jitter": 0.1, } """ ## Dataset During training we will simultaneously load a large batch of unlabeled images along with a smaller batch of labeled images. """ def prepare_dataset(): # Labeled and unlabeled samples are loaded synchronously # with batch sizes selected accordingly steps_per_epoch = (unlabeled_dataset_size + labeled_dataset_size) // batch_size unlabeled_batch_size = unlabeled_dataset_size // steps_per_epoch labeled_batch_size = labeled_dataset_size // steps_per_epoch print( f"batch size is {unlabeled_batch_size} (unlabeled) + {labeled_batch_size} (labeled)" ) # Turning off shuffle to lower resource usage unlabeled_train_dataset = ( tfds.load("stl10", split="unlabelled", as_supervised=True, shuffle_files=False) .shuffle(buffer_size=10 * unlabeled_batch_size) .batch(unlabeled_batch_size) ) labeled_train_dataset = ( tfds.load("stl10", split="train", as_supervised=True, shuffle_files=False) .shuffle(buffer_size=10 * labeled_batch_size) .batch(labeled_batch_size) ) test_dataset = ( tfds.load("stl10", split="test", as_supervised=True) .batch(batch_size) .prefetch(buffer_size=tf.data.AUTOTUNE) ) # Labeled and unlabeled datasets are zipped together train_dataset = tf.data.Dataset.zip( (unlabeled_train_dataset, labeled_train_dataset) ).prefetch(buffer_size=tf.data.AUTOTUNE) return train_dataset, labeled_train_dataset, test_dataset # Load STL10 dataset train_dataset, labeled_train_dataset, test_dataset = prepare_dataset() """ ## Image augmentations The two most important image augmentations for contrastive learning are the following: - Cropping: forces the model to encode different parts of the same image similarly, we implement it with the [RandomTranslation](https://keras.io/api/layers/preprocessing_layers/image_augmentation/random_translation/) and [RandomZoom](https://keras.io/api/layers/preprocessing_layers/image_augmentation/random_zoom/) layers - Color jitter: prevents a trivial color histogram-based solution to the task by distorting color histograms. A principled way to implement that is by affine transformations in color space. In this example we use random horizontal flips as well. Stronger augmentations are applied for contrastive learning, along with weaker ones for supervised classification to avoid overfitting on the few labeled examples. We implement random color jitter as a custom preprocessing layer. Using preprocessing layers for data augmentation has the following two advantages: - The data augmentation will run on GPU in batches, so the training will not be bottlenecked by the data pipeline in environments with constrained CPU resources (such as a Colab Notebook, or a personal machine) - Deployment is easier as the data preprocessing pipeline is encapsulated in the model, and does not have to be reimplemented when deploying it """ # Distorts the color distibutions of images class RandomColorAffine(layers.Layer): def __init__(self, brightness=0, jitter=0, **kwargs): super().__init__(**kwargs) self.brightness = brightness self.jitter = jitter def get_config(self): config = super().get_config() config.update({"brightness": self.brightness, "jitter": self.jitter}) return config def call(self, images, training=True): if training: batch_size = tf.shape(images)[0] # Same for all colors brightness_scales = 1 + tf.random.uniform( (batch_size, 1, 1, 1), minval=-self.brightness, maxval=self.brightness, ) # Different for all colors jitter_matrices = tf.random.uniform( (batch_size, 1, 3, 3), minval=-self.jitter, maxval=self.jitter ) color_transforms = ( tf.eye(3, batch_shape=[batch_size, 1]) * brightness_scales + jitter_matrices ) images = tf.clip_by_value(tf.matmul(images, color_transforms), 0, 1) return images # Image augmentation module def get_augmenter(min_area, brightness, jitter): zoom_factor = 1.0 - math.sqrt(min_area) return keras.Sequential( [ layers.Rescaling(1 / 255), layers.RandomFlip("horizontal"), layers.RandomTranslation(zoom_factor / 2, zoom_factor / 2), layers.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)), RandomColorAffine(brightness, jitter), ] ) def visualize_augmentations(num_images): # Sample a batch from a dataset images = next(iter(train_dataset))[0][0][:num_images] # Apply augmentations augmented_images = zip( images, get_augmenter(**classification_augmentation)(images), get_augmenter(**contrastive_augmentation)(images), get_augmenter(**contrastive_augmentation)(images), ) row_titles = [ "Original:", "Weakly augmented:", "Strongly augmented:", "Strongly augmented:", ] plt.figure(figsize=(num_images * 2.2, 4 * 2.2), dpi=100) for column, image_row in enumerate(augmented_images): for row, image in enumerate(image_row): plt.subplot(4, num_images, row * num_images + column + 1) plt.imshow(image) if column == 0: plt.title(row_titles[row], loc="left") plt.axis("off") plt.tight_layout() visualize_augmentations(num_images=8) """ ## Encoder architecture """ # Define the encoder architecture def get_encoder(): return keras.Sequential( [ layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Flatten(), layers.Dense(width, activation="relu"), ], name="encoder", ) """ ## Supervised baseline model A baseline supervised model is trained using random initialization. """ # Baseline supervised training with random initialization baseline_model = keras.Sequential( [ get_augmenter(**classification_augmentation), get_encoder(), layers.Dense(10), ], name="baseline_model", ) baseline_model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")], ) baseline_history = baseline_model.fit( labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset ) print( "Maximal validation accuracy: {:.2f}%".format( max(baseline_history.history["val_acc"]) * 100 ) ) """ ## Self-supervised model for contrastive pretraining We pretrain an encoder on unlabeled images with a contrastive loss. A nonlinear projection head is attached to the top of the encoder, as it improves the quality of representations of the encoder. We use the InfoNCE/NT-Xent/N-pairs loss, which can be interpreted in the following way: 1. We treat each image in the batch as if it had its own class. 2. Then, we have two examples (a pair of augmented views) for each "class". 3. Each view's representation is compared to every possible pair's one (for both augmented versions). 4. We use the temperature-scaled cosine similarity of compared representations as logits. 5. Finally, we use categorical cross-entropy as the "classification" loss The following two metrics are used for monitoring the pretraining performance: - [Contrastive accuracy (SimCLR Table 5)](https://arxiv.org/abs/2002.05709): Self-supervised metric, the ratio of cases in which the representation of an image is more similar to its differently augmented version's one, than to the representation of any other image in the current batch. Self-supervised metrics can be used for hyperparameter tuning even in the case when there are no labeled examples. - [Linear probing accuracy](https://arxiv.org/abs/1603.08511): Linear probing is a popular metric to evaluate self-supervised classifiers. It is computed as the accuracy of a logistic regression classifier trained on top of the encoder's features. In our case, this is done by training a single dense layer on top of the frozen encoder. Note that contrary to traditional approach where the classifier is trained after the pretraining phase, in this example we train it during pretraining. This might slightly decrease its accuracy, but that way we can monitor its value during training, which helps with experimentation and debugging. Another widely used supervised metric is the [KNN accuracy](https://arxiv.org/abs/1805.01978), which is the accuracy of a KNN classifier trained on top of the encoder's features, which is not implemented in this example. """ # Define the contrastive model with model-subclassing class ContrastiveModel(keras.Model): def __init__(self): super().__init__() self.temperature = temperature self.contrastive_augmenter = get_augmenter(**contrastive_augmentation) self.classification_augmenter = get_augmenter(**classification_augmentation) self.encoder = get_encoder() # Non-linear MLP as projection head self.projection_head = keras.Sequential( [ keras.Input(shape=(width,)), layers.Dense(width, activation="relu"), layers.Dense(width), ], name="projection_head", ) # Single dense layer for linear probing self.linear_probe = keras.Sequential( [layers.Input(shape=(width,)), layers.Dense(10)], name="linear_probe", ) self.encoder.summary() self.projection_head.summary() self.linear_probe.summary() def compile(self, contrastive_optimizer, probe_optimizer, **kwargs): super().compile(**kwargs) self.contrastive_optimizer = contrastive_optimizer self.probe_optimizer = probe_optimizer # self.contrastive_loss will be defined as a method self.probe_loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) self.contrastive_loss_tracker = keras.metrics.Mean(name="c_loss") self.contrastive_accuracy = keras.metrics.SparseCategoricalAccuracy( name="c_acc" ) self.probe_loss_tracker = keras.metrics.Mean(name="p_loss") self.probe_accuracy = keras.metrics.SparseCategoricalAccuracy(name="p_acc") @property def metrics(self): return [ self.contrastive_loss_tracker, self.contrastive_accuracy, self.probe_loss_tracker, self.probe_accuracy, ] def contrastive_loss(self, projections_1, projections_2): # InfoNCE loss (information noise-contrastive estimation) # NT-Xent loss (normalized temperature-scaled cross entropy) # Cosine similarity: the dot product of the l2-normalized feature vectors projections_1 = tf.math.l2_normalize(projections_1, axis=1) projections_2 = tf.math.l2_normalize(projections_2, axis=1) similarities = ( tf.matmul(projections_1, projections_2, transpose_b=True) / self.temperature ) # The similarity between the representations of two augmented views of the # same image should be higher than their similarity with other views batch_size = tf.shape(projections_1)[0] contrastive_labels = tf.range(batch_size) self.contrastive_accuracy.update_state(contrastive_labels, similarities) self.contrastive_accuracy.update_state( contrastive_labels, tf.transpose(similarities) ) # The temperature-scaled similarities are used as logits for cross-entropy # a symmetrized version of the loss is used here loss_1_2 = keras.losses.sparse_categorical_crossentropy( contrastive_labels, similarities, from_logits=True ) loss_2_1 = keras.losses.sparse_categorical_crossentropy( contrastive_labels, tf.transpose(similarities), from_logits=True ) return (loss_1_2 + loss_2_1) / 2 def train_step(self, data): (unlabeled_images, _), (labeled_images, labels) = data # Both labeled and unlabeled images are used, without labels images = tf.concat((unlabeled_images, labeled_images), axis=0) # Each image is augmented twice, differently augmented_images_1 = self.contrastive_augmenter(images, training=True) augmented_images_2 = self.contrastive_augmenter(images, training=True) with tf.GradientTape() as tape: features_1 = self.encoder(augmented_images_1, training=True) features_2 = self.encoder(augmented_images_2, training=True) # The representations are passed through a projection mlp projections_1 = self.projection_head(features_1, training=True) projections_2 = self.projection_head(features_2, training=True) contrastive_loss = self.contrastive_loss(projections_1, projections_2) gradients = tape.gradient( contrastive_loss, self.encoder.trainable_weights + self.projection_head.trainable_weights, ) self.contrastive_optimizer.apply_gradients( zip( gradients, self.encoder.trainable_weights + self.projection_head.trainable_weights, ) ) self.contrastive_loss_tracker.update_state(contrastive_loss) # Labels are only used in evalutation for an on-the-fly logistic regression preprocessed_images = self.classification_augmenter( labeled_images, training=True ) with tf.GradientTape() as tape: # the encoder is used in inference mode here to avoid regularization # and updating the batch normalization paramers if they are used features = self.encoder(preprocessed_images, training=False) class_logits = self.linear_probe(features, training=True) probe_loss = self.probe_loss(labels, class_logits) gradients = tape.gradient(probe_loss, self.linear_probe.trainable_weights) self.probe_optimizer.apply_gradients( zip(gradients, self.linear_probe.trainable_weights) ) self.probe_loss_tracker.update_state(probe_loss) self.probe_accuracy.update_state(labels, class_logits) return {m.name: m.result() for m in self.metrics} def test_step(self, data): labeled_images, labels = data # For testing the components are used with a training=False flag preprocessed_images = self.classification_augmenter( labeled_images, training=False ) features = self.encoder(preprocessed_images, training=False) class_logits = self.linear_probe(features, training=False) probe_loss = self.probe_loss(labels, class_logits) self.probe_loss_tracker.update_state(probe_loss) self.probe_accuracy.update_state(labels, class_logits) # Only the probe metrics are logged at test time return {m.name: m.result() for m in self.metrics[2:]} # Contrastive pretraining pretraining_model = ContrastiveModel() pretraining_model.compile( contrastive_optimizer=keras.optimizers.Adam(), probe_optimizer=keras.optimizers.Adam(), ) pretraining_history = pretraining_model.fit( train_dataset, epochs=num_epochs, validation_data=test_dataset ) print( "Maximal validation accuracy: {:.2f}%".format( max(pretraining_history.history["val_p_acc"]) * 100 ) ) """ ## Supervised finetuning of the pretrained encoder We then finetune the encoder on the labeled examples, by attaching a single randomly initalized fully connected classification layer on its top. """ # Supervised finetuning of the pretrained encoder finetuning_model = keras.Sequential( [ get_augmenter(**classification_augmentation), pretraining_model.encoder, layers.Dense(10), ], name="finetuning_model", ) finetuning_model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")], ) finetuning_history = finetuning_model.fit( labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset ) print( "Maximal validation accuracy: {:.2f}%".format( max(finetuning_history.history["val_acc"]) * 100 ) ) """ ## Comparison against the baseline """ # The classification accuracies of the baseline and the pretraining + finetuning process: def plot_training_curves(pretraining_history, finetuning_history, baseline_history): for metric_key, metric_name in zip(["acc", "loss"], ["accuracy", "loss"]): plt.figure(figsize=(8, 5), dpi=100) plt.plot( baseline_history.history[f"val_{metric_key}"], label="supervised baseline", ) plt.plot( pretraining_history.history[f"val_p_{metric_key}"], label="self-supervised pretraining", ) plt.plot( finetuning_history.history[f"val_{metric_key}"], label="supervised finetuning", ) plt.legend() plt.title(f"Classification {metric_name} during training") plt.xlabel("epochs") plt.ylabel(f"validation {metric_name}") plot_training_curves(pretraining_history, finetuning_history, baseline_history) """ By comparing the training curves, we can see that when using contrastive pretraining, a higher validation accuracy can be reached, paired with a lower validation loss, which means that the pretrained network was able to generalize better when seeing only a small amount of labeled examples. """ """ ## Improving further ### Architecture The experiment in the original paper demonstrated that increasing the width and depth of the models improves performance at a higher rate than for supervised learning. Also, using a [ResNet-50](https://keras.io/api/applications/resnet/#resnet50-function) encoder is quite standard in the literature. However keep in mind, that more powerful models will not only increase training time but will also require more memory and will limit the maximal batch size you can use. It has [been](https://arxiv.org/abs/1905.09272) [reported](https://arxiv.org/abs/1911.05722) that the usage of BatchNorm layers could sometimes degrade performance, as it introduces an intra-batch dependency between samples, which is why I did not have used them in this example. In my experiments however, using BatchNorm, especially in the projection head, improves performance. ### Hyperparameters The hyperparameters used in this example have been tuned manually for this task and architecture. Therefore, without changing them, only marginal gains can be expected from further hyperparameter tuning. However for a different task or model architecture these would need tuning, so here are my notes on the most important ones: - **Batch size**: since the objective can be interpreted as a classification over a batch of images (loosely speaking), the batch size is actually a more important hyperparameter than usual. The higher, the better. - **Temperature**: the temperature defines the "softness" of the softmax distribution that is used in the cross-entropy loss, and is an important hyperparameter. Lower values generally lead to a higher contrastive accuracy. A recent trick (in [ALIGN](https://arxiv.org/abs/2102.05918)) is to learn the temperature's value as well (which can be done by defining it as a tf.Variable, and applying gradients on it). Even though this provides a good baseline value, in my experiments the learned temperature was somewhat lower than optimal, as it is optimized with respect to the contrastive loss, which is not a perfect proxy for representation quality. - **Image augmentation strength**: during pretraining stronger augmentations increase the difficulty of the task, however after a point too strong augmentations will degrade performance. During finetuning stronger augmentations reduce overfitting while in my experience too strong augmentations decrease the performance gains from pretraining. The whole data augmentation pipeline can be seen as an important hyperparameter of the algorithm, implementations of other custom image augmentation layers in Keras can be found in [this repository](https://github.com/beresandras/image-augmentation-layers-keras). - **Learning rate schedule**: a constant schedule is used here, but it is quite common in the literature to use a [cosine decay schedule](https://www.tensorflow.org/api_docs/python/tf/keras/experimental/CosineDecay), which can further improve performance. - **Optimizer**: Adam is used in this example, as it provides good performance with default parameters. SGD with momentum requires more tuning, however it could slightly increase performance. """ """ ## Related works Other instance-level (image-level) contrastive learning methods: - [MoCo](https://arxiv.org/abs/1911.05722) ([v2](https://arxiv.org/abs/2003.04297), [v3](https://arxiv.org/abs/2104.02057)): uses a momentum-encoder as well, whose weights are an exponential moving average of the target encoder - [SwAV](https://arxiv.org/abs/2006.09882): uses clustering instead of pairwise comparison - [BarlowTwins](https://arxiv.org/abs/2103.03230): uses a cross correlation-based objective instead of pairwise comparison Keras implementations of **MoCo** and **BarlowTwins** can be found in [this repository](https://github.com/beresandras/contrastive-classification-keras), which includes a Colab notebook. There is also a new line of works, which optimize a similar objective, but without the use of any negatives: - [BYOL](https://arxiv.org/abs/2006.07733): momentum-encoder + no negatives - [SimSiam](https://arxiv.org/abs/2011.10566) ([Keras example](https://keras.io/examples/vision/simsiam/)): no momentum-encoder + no negatives In my experience, these methods are more brittle (they can collapse to a constant representation, I could not get them to work using this encoder architecture). Even though they are generally more dependent on the [model](https://generallyintelligent.ai/understanding-self-supervised-contrastive-learning.html) [architecture](https://arxiv.org/abs/2010.10241), they can improve performance at smaller batch sizes. You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/semi-supervised-classification-simclr) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/semi-supervised-classification). """
keras-io/examples/vision/semisupervised_simclr.py/0
{ "file_path": "keras-io/examples/vision/semisupervised_simclr.py", "repo_id": "keras-io", "token_count": 9444 }
128
""" Title: Efficient Object Detection with YOLOV8 and KerasCV Author: [Gitesh Chawda](https://twitter.com/gitesh12_) Date created: 2023/06/26 Last modified: 2023/06/26 Description: Train custom YOLOV8 object detection model with KerasCV. Accelerator: GPU """ """ ## Introduction """ """ KerasCV is an extension of Keras for computer vision tasks. In this example, we'll see how to train a YOLOV8 object detection model using KerasCV. KerasCV includes pre-trained models for popular computer vision datasets, such as ImageNet, COCO, and Pascal VOC, which can be used for transfer learning. KerasCV also provides a range of visualization tools for inspecting the intermediate representations learned by the model and for visualizing the results of object detection and segmentation tasks. """ """ If you're interested in learning about object detection using KerasCV, I highly suggest taking a look at the guide created by lukewood. This resource, available at [Object Detection With KerasCV](https://keras.io/guides/keras_cv/object_detection_keras_cv/#object-detection-introduction), provides a comprehensive overview of the fundamental concepts and techniques required for building object detection models with KerasCV. """ """shell pip install --upgrade git+https://github.com/keras-team/keras-cv -q """ """ ## Setup """ import os from tqdm.auto import tqdm import xml.etree.ElementTree as ET import tensorflow as tf from tensorflow import keras import keras_cv from keras_cv import bounding_box from keras_cv import visualization """ ## Load Data """ """ For this guide, we will be utilizing the Self-Driving Car Dataset obtained from [roboflow](https://public.roboflow.com/object-detection/self-driving-car). In order to make the dataset more manageable, I have extracted a subset of the larger dataset, which originally consisted of 15,000 data samples. From this subset, I have chosen 7,316 samples for model training. To simplify the task at hand and focus our efforts, we will be working with a reduced number of object classes. Specifically, we will be considering five primary classes for detection and classification: car, pedestrian, traffic light, biker, and truck. These classes represent some of the most common and significant objects encountered in the context of self-driving cars. By narrowing down the dataset to these specific classes, we can concentrate on building a robust object detection model that can accurately identify and classify these important objects. """ """ The TensorFlow Datasets library provides a convenient way to download and use various datasets, including the object detection dataset. This can be a great option for those who want to quickly start working with the data without having to manually download and preprocess it. You can view various object detection datasets here [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview#object_detection) However, in this code example, we will demonstrate how to load the dataset from scratch using TensorFlow's `tf.data` pipeline. This approach provides more flexibility and allows you to customize the preprocessing steps as needed. Loading custom datasets that are not available in the TensorFlow Datasets library is one of the main advantages of using the `tf.data` pipeline. This approach allows you to create a custom data preprocessing pipeline tailored to the specific needs and requirements of your dataset. """ """ ## Hyperparameters """ SPLIT_RATIO = 0.2 BATCH_SIZE = 4 LEARNING_RATE = 0.001 EPOCH = 5 GLOBAL_CLIPNORM = 10.0 """ A dictionary is created to map each class name to a unique numerical identifier. This mapping is used to encode and decode the class labels during training and inference in object detection tasks. """ class_ids = [ "car", "pedestrian", "trafficLight", "biker", "truck", ] class_mapping = dict(zip(range(len(class_ids)), class_ids)) # Path to images and annotations path_images = "/kaggle/input/dataset/data/images/" path_annot = "/kaggle/input/dataset/data/annotations/" # Get all XML file paths in path_annot and sort them xml_files = sorted( [ os.path.join(path_annot, file_name) for file_name in os.listdir(path_annot) if file_name.endswith(".xml") ] ) # Get all JPEG image file paths in path_images and sort them jpg_files = sorted( [ os.path.join(path_images, file_name) for file_name in os.listdir(path_images) if file_name.endswith(".jpg") ] ) """ The function below reads the XML file and finds the image name and path, and then iterates over each object in the XML file to extract the bounding box coordinates and class labels for each object. The function returns three values: the image path, a list of bounding boxes (each represented as a list of four floats: xmin, ymin, xmax, ymax), and a list of class IDs (represented as integers) corresponding to each bounding box. The class IDs are obtained by mapping the class labels to integer values using a dictionary called `class_mapping`. """ def parse_annotation(xml_file): tree = ET.parse(xml_file) root = tree.getroot() image_name = root.find("filename").text image_path = os.path.join(path_images, image_name) boxes = [] classes = [] for obj in root.iter("object"): cls = obj.find("name").text classes.append(cls) bbox = obj.find("bndbox") xmin = float(bbox.find("xmin").text) ymin = float(bbox.find("ymin").text) xmax = float(bbox.find("xmax").text) ymax = float(bbox.find("ymax").text) boxes.append([xmin, ymin, xmax, ymax]) class_ids = [ list(class_mapping.keys())[list(class_mapping.values()).index(cls)] for cls in classes ] return image_path, boxes, class_ids image_paths = [] bbox = [] classes = [] for xml_file in tqdm(xml_files): image_path, boxes, class_ids = parse_annotation(xml_file) image_paths.append(image_path) bbox.append(boxes) classes.append(class_ids) """ Here we are using `tf.ragged.constant` to create ragged tensors from the `bbox` and `classes` lists. A ragged tensor is a type of tensor that can handle varying lengths of data along one or more dimensions. This is useful when dealing with data that has variable-length sequences, such as text or time series data. ```python classes = [ [8, 8, 8, 8, 8], # 5 classes [12, 14, 14, 14], # 4 classes [1], # 1 class [7, 7], # 2 classes ...] ``` ```python bbox = [ [[199.0, 19.0, 390.0, 401.0], [217.0, 15.0, 270.0, 157.0], [393.0, 18.0, 432.0, 162.0], [1.0, 15.0, 226.0, 276.0], [19.0, 95.0, 458.0, 443.0]], #image 1 has 4 objects [[52.0, 117.0, 109.0, 177.0]], #image 2 has 1 object [[88.0, 87.0, 235.0, 322.0], [113.0, 117.0, 218.0, 471.0]], #image 3 has 2 objects ...] ``` In this case, the `bbox` and `classes` lists have different lengths for each image, depending on the number of objects in the image and the corresponding bounding boxes and classes. To handle this variability, ragged tensors are used instead of regular tensors. Later, these ragged tensors are used to create a `tf.data.Dataset` using the `from_tensor_slices` method. This method creates a dataset from the input tensors by slicing them along the first dimension. By using ragged tensors, the dataset can handle varying lengths of data for each image and provide a flexible input pipeline for further processing. """ bbox = tf.ragged.constant(bbox) classes = tf.ragged.constant(classes) image_paths = tf.ragged.constant(image_paths) data = tf.data.Dataset.from_tensor_slices((image_paths, classes, bbox)) """ Splitting data in training and validation data """ # Determine the number of validation samples num_val = int(len(xml_files) * SPLIT_RATIO) # Split the dataset into train and validation sets val_data = data.take(num_val) train_data = data.skip(num_val) """ Let's see about data loading and bounding box formatting to get things going. Bounding boxes in KerasCV have a predetermined format. To do this, you must bundle your bounding boxes into a dictionary that complies with the requirements listed below: ```python bounding_boxes = { # num_boxes may be a Ragged dimension 'boxes': Tensor(shape=[batch, num_boxes, 4]), 'classes': Tensor(shape=[batch, num_boxes]) } ``` The dictionary has two keys, `'boxes'` and `'classes'`, each of which maps to a TensorFlow RaggedTensor or Tensor object. The `'boxes'` Tensor has a shape of `[batch, num_boxes, 4]`, where batch is the number of images in the batch and num_boxes is the maximum number of bounding boxes in any image. The 4 represents the four values needed to define a bounding box: xmin, ymin, xmax, ymax. The `'classes'` Tensor has a shape of `[batch, num_boxes]`, where each element represents the class label for the corresponding bounding box in the `'boxes'` Tensor. The num_boxes dimension may be ragged, which means that the number of boxes may vary across images in the batch. Final dict should be: ```python {"images": images, "bounding_boxes": bounding_boxes} ``` """ def load_image(image_path): image = tf.io.read_file(image_path) image = tf.image.decode_jpeg(image, channels=3) return image def load_dataset(image_path, classes, bbox): # Read Image image = load_image(image_path) bounding_boxes = { "classes": tf.cast(classes, dtype=tf.float32), "boxes": bbox, } return {"images": tf.cast(image, tf.float32), "bounding_boxes": bounding_boxes} """ Here we create a layer that resizes images to 640x640 pixels, while maintaining the original aspect ratio. The bounding boxes associated with the image are specified in the `xyxy` format. If necessary, the resized image will be padded with zeros to maintain the original aspect ratio. Bounding Box Formats supported by KerasCV: 1. CENTER_XYWH 2. XYWH 3. XYXY 4. REL_XYXY 5. REL_XYWH 6. YXYX 7. REL_YXYX You can read more about KerasCV bounding box formats in [docs](https://keras.io/api/keras_cv/bounding_box/formats/). Furthermore, it is possible to perform format conversion between any two pairs: ```python boxes = keras_cv.bounding_box.convert_format( bounding_box, images=image, source="xyxy", # Original Format target="xywh", # Target Format (to which we want to convert) ) ``` """ """ ## Data Augmentation One of the most challenging tasks when constructing object detection pipelines is data augmentation. It involves applying various transformations to the input images to increase the diversity of the training data and improve the model's ability to generalize. However, when working with object detection tasks, it becomes even more complex as these transformations need to be aware of the underlying bounding boxes and update them accordingly. KerasCV provides native support for bounding box augmentation. KerasCV offers an extensive collection of data augmentation layers specifically designed to handle bounding boxes. These layers intelligently adjust the bounding box coordinates as the image is transformed, ensuring that the bounding boxes remain accurate and aligned with the augmented images. By leveraging KerasCV's capabilities, developers can conveniently integrate bounding box-friendly data augmentation into their object detection pipelines. By performing on-the-fly augmentation within a tf.data pipeline, the process becomes seamless and efficient, enabling better training and more accurate object detection results. """ augmenter = keras.Sequential( layers=[ keras_cv.layers.RandomFlip(mode="horizontal", bounding_box_format="xyxy"), keras_cv.layers.RandomShear( x_factor=0.2, y_factor=0.2, bounding_box_format="xyxy" ), keras_cv.layers.JitteredResize( target_size=(640, 640), scale_factor=(0.75, 1.3), bounding_box_format="xyxy" ), ] ) """ ## Creating Training Dataset """ train_ds = train_data.map(load_dataset, num_parallel_calls=tf.data.AUTOTUNE) train_ds = train_ds.shuffle(BATCH_SIZE * 4) train_ds = train_ds.ragged_batch(BATCH_SIZE, drop_remainder=True) train_ds = train_ds.map(augmenter, num_parallel_calls=tf.data.AUTOTUNE) """ ## Creating Validation Dataset """ resizing = keras_cv.layers.JitteredResize( target_size=(640, 640), scale_factor=(0.75, 1.3), bounding_box_format="xyxy", ) val_ds = val_data.map(load_dataset, num_parallel_calls=tf.data.AUTOTUNE) val_ds = val_ds.shuffle(BATCH_SIZE * 4) val_ds = val_ds.ragged_batch(BATCH_SIZE, drop_remainder=True) val_ds = val_ds.map(resizing, num_parallel_calls=tf.data.AUTOTUNE) """ ## Visualization """ def visualize_dataset(inputs, value_range, rows, cols, bounding_box_format): inputs = next(iter(inputs.take(1))) images, bounding_boxes = inputs["images"], inputs["bounding_boxes"] visualization.plot_bounding_box_gallery( images, value_range=value_range, rows=rows, cols=cols, y_true=bounding_boxes, scale=5, font_scale=0.7, bounding_box_format=bounding_box_format, class_mapping=class_mapping, ) visualize_dataset( train_ds, bounding_box_format="xyxy", value_range=(0, 255), rows=2, cols=2 ) visualize_dataset( val_ds, bounding_box_format="xyxy", value_range=(0, 255), rows=2, cols=2 ) """ We need to extract the inputs from the preprocessing dictionary and get them ready to be fed into the model. """ def dict_to_tuple(inputs): return inputs["images"], inputs["bounding_boxes"] train_ds = train_ds.map(dict_to_tuple, num_parallel_calls=tf.data.AUTOTUNE) train_ds = train_ds.prefetch(tf.data.AUTOTUNE) val_ds = val_ds.map(dict_to_tuple, num_parallel_calls=tf.data.AUTOTUNE) val_ds = val_ds.prefetch(tf.data.AUTOTUNE) """ ## Creating Model """ """ YOLOv8 is a cutting-edge YOLO model that is used for a variety of computer vision tasks, such as object detection, image classification, and instance segmentation. Ultralytics, the creators of YOLOv5, also developed YOLOv8, which incorporates many improvements and changes in architecture and developer experience compared to its predecessor. YOLOv8 is the latest state-of-the-art model that is highly regarded in the industry. """ """ Below table compares the performance metrics of five different YOLOv8 models with different sizes (measured in pixels): YOLOv8n, YOLOv8s, YOLOv8m, YOLOv8l, and YOLOv8x. The metrics include mean average precision (mAP) values at different intersection-over-union (IoU) thresholds for validation data, inference speed on CPU with ONNX format and A100 TensorRT, number of parameters, and number of floating-point operations (FLOPs) (both in millions and billions, respectively). As the size of the model increases, the mAP, parameters, and FLOPs generally increase while the speed decreases. YOLOv8x has the highest mAP, parameters, and FLOPs but also the slowest inference speed, while YOLOv8n has the smallest size, fastest inference speed, and lowest mAP, parameters, and FLOPs. | Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) | | ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | YOLOv8n | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | | YOLOv8s | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | | YOLOv8m | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | | YOLOv8l | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | | YOLOv8x | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | """ """ You can read more about YOLOV8 and its architecture in this [RoboFlow Blog](https://blog.roboflow.com/whats-new-in-yolov8/) """ """ First we will create a instance of backbone which will be used by our yolov8 detector class. YOLOV8 Backbones available in KerasCV: 1. Without Weights: 1. yolo_v8_xs_backbone 2. yolo_v8_s_backbone 3. yolo_v8_m_backbone 4. yolo_v8_l_backbone 5. yolo_v8_xl_backbone 2. With Pre-trained coco weight: 1. yolo_v8_xs_backbone_coco 2. yolo_v8_s_backbone_coco 2. yolo_v8_m_backbone_coco 2. yolo_v8_l_backbone_coco 2. yolo_v8_xl_backbone_coco """ backbone = keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_s_backbone_coco" # We will use yolov8 small backbone with coco weights ) """ Next, let's build a YOLOV8 model using the `YOLOV8Detector`, which accepts a feature extractor as the `backbone` argument, a `num_classes` argument that specifies the number of object classes to detect based on the size of the `class_mapping` list, a `bounding_box_format` argument that informs the model of the format of the bbox in the dataset, and a finally, the feature pyramid network (FPN) depth is specified by the `fpn_depth` argument. It is simple to build a YOLOV8 using any of the aforementioned backbones thanks to KerasCV. """ yolo = keras_cv.models.YOLOV8Detector( num_classes=len(class_mapping), bounding_box_format="xyxy", backbone=backbone, fpn_depth=1, ) """ ## Compile the Model """ """ Loss used for YOLOV8 1. Classification Loss: This loss function calculates the discrepancy between anticipated class probabilities and actual class probabilities. In this instance, `binary_crossentropy`, a prominent solution for binary classification issues, is Utilized. We Utilized binary crossentropy since each thing that is identified is either classed as belonging to or not belonging to a certain object class (such as a person, a car, etc.). 2. Box Loss: `box_loss` is the loss function used to measure the difference between the predicted bounding boxes and the ground truth. In this case, the Complete IoU (CIoU) metric is used, which not only measures the overlap between predicted and ground truth bounding boxes but also considers the difference in aspect ratio, center distance, and box size. Together, these loss functions help optimize the model for object detection by minimizing the difference between the predicted and ground truth class probabilities and bounding boxes. """ optimizer = tf.keras.optimizers.Adam( learning_rate=LEARNING_RATE, global_clipnorm=GLOBAL_CLIPNORM, ) yolo.compile( optimizer=optimizer, classification_loss="binary_crossentropy", box_loss="ciou" ) """ ## COCO Metric Callback We will be using `BoxCOCOMetrics` from KerasCV to evaluate the model and calculate the Map(Mean Average Precision) score, Recall and Precision. We also save our model when the mAP score improves. """ class EvaluateCOCOMetricsCallback(keras.callbacks.Callback): def __init__(self, data, save_path): super().__init__() self.data = data self.metrics = keras_cv.metrics.BoxCOCOMetrics( bounding_box_format="xyxy", evaluate_freq=1e9, ) self.save_path = save_path self.best_map = -1.0 def on_epoch_end(self, epoch, logs): self.metrics.reset_state() for batch in self.data: images, y_true = batch[0], batch[1] y_pred = self.model.predict(images, verbose=0) self.metrics.update_state(y_true, y_pred) metrics = self.metrics.result(force=True) logs.update(metrics) current_map = metrics["MaP"] if current_map > self.best_map: self.best_map = current_map self.model.save(self.save_path) # Save the model when mAP improves return logs """ ## Train the Model """ yolo.fit( train_ds, validation_data=val_ds, epochs=3, callbacks=[EvaluateCOCOMetricsCallback(val_ds, "model.h5")], ) """ ## Visualize Predictions """ def visualize_detections(model, dataset, bounding_box_format): images, y_true = next(iter(dataset.take(1))) y_pred = model.predict(images) y_pred = bounding_box.to_ragged(y_pred) visualization.plot_bounding_box_gallery( images, value_range=(0, 255), bounding_box_format=bounding_box_format, y_true=y_true, y_pred=y_pred, scale=4, rows=2, cols=2, show=True, font_scale=0.7, class_mapping=class_mapping, ) visualize_detections(yolo, dataset=val_ds, bounding_box_format="xyxy")
keras-io/examples/vision/yolov8.py/0
{ "file_path": "keras-io/examples/vision/yolov8.py", "repo_id": "keras-io", "token_count": 7615 }
129
""" Title: Introduction to Keras for engineers Author: [fchollet](https://twitter.com/fchollet) Date created: 2023/07/10 Last modified: 2023/07/10 Description: First contact with Keras 3. Accelerator: GPU """ """ ## Introduction Keras 3 is a deep learning framework works with TensorFlow, JAX, and PyTorch interchangeably. This notebook will walk you through key Keras 3 workflows. Let's start by installing Keras 3: """ """shell pip install keras --upgrade --quiet """ """ ## Setup We're going to be using the JAX backend here -- but you can edit the string below to `"tensorflow"` or `"torch"` and hit "Restart runtime", and the whole notebook will run just the same! This entire guide is backend-agnostic. """ import numpy as np import os os.environ["KERAS_BACKEND"] = "jax" # Note that Keras should only be imported after the backend # has been configured. The backend cannot be changed once the # package is imported. import keras """ ## A first example: A MNIST convnet Let's start with the Hello World of ML: training a convnet to classify MNIST digits. Here's the data: """ # Load the data and split it between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print("x_train shape:", x_train.shape) print("y_train shape:", y_train.shape) print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") """ Here's our model. Different model-building options that Keras offers include: - [The Sequential API](https://keras.io/guides/sequential_model/) (what we use below) - [The Functional API](https://keras.io/guides/functional_api/) (most typical) - [Writing your own models yourself via subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) (for advanced use cases) """ # Model parameters num_classes = 10 input_shape = (28, 28, 1) model = keras.Sequential( [ keras.layers.Input(shape=input_shape), keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), keras.layers.MaxPooling2D(pool_size=(2, 2)), keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"), keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"), keras.layers.GlobalAveragePooling2D(), keras.layers.Dropout(0.5), keras.layers.Dense(num_classes, activation="softmax"), ] ) """ Here's our model summary: """ model.summary() """ We use the `compile()` method to specify the optimizer, loss function, and the metrics to monitor. Note that with the JAX and TensorFlow backends, XLA compilation is turned on by default. """ model.compile( loss=keras.losses.SparseCategoricalCrossentropy(), optimizer=keras.optimizers.Adam(learning_rate=1e-3), metrics=[ keras.metrics.SparseCategoricalAccuracy(name="acc"), ], ) """ Let's train and evaluate the model. We'll set aside a validation split of 15% of the data during training to monitor generalization on unseen data. """ batch_size = 128 epochs = 20 callbacks = [ keras.callbacks.ModelCheckpoint(filepath="model_at_epoch_{epoch}.keras"), keras.callbacks.EarlyStopping(monitor="val_loss", patience=2), ] model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.15, callbacks=callbacks, ) score = model.evaluate(x_test, y_test, verbose=0) """ During training, we were saving a model at the end of each epoch. You can also save the model in its latest state like this: """ model.save("final_model.keras") """ And reload it like this: """ model = keras.saving.load_model("final_model.keras") """ Next, you can query predictions of class probabilities with `predict()`: """ predictions = model.predict(x_test) """ That's it for the basics! """ """ ## Writing cross-framework custom components Keras enables you to write custom Layers, Models, Metrics, Losses, and Optimizers that work across TensorFlow, JAX, and PyTorch with the same codebase. Let's take a look at custom layers first. The `keras.ops` namespace contains: - An implementation of the NumPy API, e.g. `keras.ops.stack` or `keras.ops.matmul`. - A set of neural network specific ops that are absent from NumPy, such as `keras.ops.conv` or `keras.ops.binary_crossentropy`. Let's make a custom `Dense` layer that works with all backends: """ class MyDense(keras.layers.Layer): def __init__(self, units, activation=None, name=None): super().__init__(name=name) self.units = units self.activation = keras.activations.get(activation) def build(self, input_shape): input_dim = input_shape[-1] self.w = self.add_weight( shape=(input_dim, self.units), initializer=keras.initializers.GlorotNormal(), name="kernel", trainable=True, ) self.b = self.add_weight( shape=(self.units,), initializer=keras.initializers.Zeros(), name="bias", trainable=True, ) def call(self, inputs): # Use Keras ops to create backend-agnostic layers/metrics/etc. x = keras.ops.matmul(inputs, self.w) + self.b return self.activation(x) """ Next, let's make a custom `Dropout` layer that relies on the `keras.random` namespace: """ class MyDropout(keras.layers.Layer): def __init__(self, rate, name=None): super().__init__(name=name) self.rate = rate # Use seed_generator for managing RNG state. # It is a state element and its seed variable is # tracked as part of `layer.variables`. self.seed_generator = keras.random.SeedGenerator(1337) def call(self, inputs): # Use `keras.random` for random ops. return keras.random.dropout(inputs, self.rate, seed=self.seed_generator) """ Next, let's write a custom subclassed model that uses our two custom layers: """ class MyModel(keras.Model): def __init__(self, num_classes): super().__init__() self.conv_base = keras.Sequential( [ keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), keras.layers.MaxPooling2D(pool_size=(2, 2)), keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"), keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"), keras.layers.GlobalAveragePooling2D(), ] ) self.dp = MyDropout(0.5) self.dense = MyDense(num_classes, activation="softmax") def call(self, x): x = self.conv_base(x) x = self.dp(x) return self.dense(x) """ Let's compile it and fit it: """ model = MyModel(num_classes=10) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(), optimizer=keras.optimizers.Adam(learning_rate=1e-3), metrics=[ keras.metrics.SparseCategoricalAccuracy(name="acc"), ], ) model.fit( x_train, y_train, batch_size=batch_size, epochs=1, # For speed validation_split=0.15, ) """ ## Training models on arbitrary data sources All Keras models can be trained and evaluated on a wide variety of data sources, independently of the backend you're using. This includes: - NumPy arrays - Pandas dataframes - TensorFlow `tf.data.Dataset` objects - PyTorch `DataLoader` objects - Keras `PyDataset` objects They all work whether you're using TensorFlow, JAX, or PyTorch as your Keras backend. Let's try it out with PyTorch `DataLoaders`: """ import torch # Create a TensorDataset train_torch_dataset = torch.utils.data.TensorDataset( torch.from_numpy(x_train), torch.from_numpy(y_train) ) val_torch_dataset = torch.utils.data.TensorDataset( torch.from_numpy(x_test), torch.from_numpy(y_test) ) # Create a DataLoader train_dataloader = torch.utils.data.DataLoader( train_torch_dataset, batch_size=batch_size, shuffle=True ) val_dataloader = torch.utils.data.DataLoader( val_torch_dataset, batch_size=batch_size, shuffle=False ) model = MyModel(num_classes=10) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(), optimizer=keras.optimizers.Adam(learning_rate=1e-3), metrics=[ keras.metrics.SparseCategoricalAccuracy(name="acc"), ], ) model.fit(train_dataloader, epochs=1, validation_data=val_dataloader) """ Now let's try this out with `tf.data`: """ import tensorflow as tf train_dataset = ( tf.data.Dataset.from_tensor_slices((x_train, y_train)) .batch(batch_size) .prefetch(tf.data.AUTOTUNE) ) test_dataset = ( tf.data.Dataset.from_tensor_slices((x_test, y_test)) .batch(batch_size) .prefetch(tf.data.AUTOTUNE) ) model = MyModel(num_classes=10) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(), optimizer=keras.optimizers.Adam(learning_rate=1e-3), metrics=[ keras.metrics.SparseCategoricalAccuracy(name="acc"), ], ) model.fit(train_dataset, epochs=1, validation_data=test_dataset) """ ## Further reading This concludes our short overview of the new multi-backend capabilities of Keras 3. Next, you can learn about: ### How to customize what happens in `fit()` Want to implement a non-standard training algorithm yourself but still want to benefit from the power and usability of `fit()`? It's easy to customize `fit()` to support arbitrary use cases: - [Customizing what happens in `fit()` with TensorFlow](http://keras.io/guides/custom_train_step_in_tensorflow/) - [Customizing what happens in `fit()` with JAX](http://keras.io/guides/custom_train_step_in_jax/) - [Customizing what happens in `fit()` with PyTorch](http://keras.io/guides/custom_train_step_in_torch/) ## How to write custom training loops - [Writing a training loop from scratch in TensorFlow](http://keras.io/guides/writing_a_custom_training_loop_in_tensorflow/) - [Writing a training loop from scratch in JAX](http://keras.io/guides/writing_a_custom_training_loop_in_jax/) - [Writing a training loop from scratch in PyTorch](http://keras.io/guides/writing_a_custom_training_loop_in_torch/) ## How to distribute training - [Guide to distributed training with TensorFlow](http://keras.io/guides/distributed_training_with_tensorflow/) - [JAX distributed training example](https://github.com/keras-team/keras/blob/master/examples/demo_jax_distributed.py) - [PyTorch distributed training example](https://github.com/keras-team/keras/blob/master/examples/demo_torch_multi_gpu.py) Enjoy the library! 🚀 """
keras-io/guides/intro_to_keras_for_engineers.py/0
{ "file_path": "keras-io/guides/intro_to_keras_for_engineers.py", "repo_id": "keras-io", "token_count": 4165 }
130
<jupyter_start><jupyter_text>Classification with KerasCV**Author:** [lukewood](https://lukewood.xyz)**Date created:** 03/28/2023**Last modified:** 03/28/2023**Description:** Use KerasCV to train powerful image classifiers. Classification is the process of predicting a categorical label for a giveninput image.While classification is a relatively straightforward computer vision task,modern approaches still are built of several complex components.Luckily, KerasCV provides APIs to construct commonly used components.This guide demonstrates KerasCV's modular approach to solving imageclassification problems at three levels of complexity:- Inference with a pretrained classifier- Fine-tuning a pretrained backbone- Training a image classifier from scratchKerasCV uses Keras 3 to work with any of TensorFlow, PyTorch or Jax. In theguide below, we will use the `jax` backend. This guide runs inTensorFlow or PyTorch backends with zero changes, simply update the`KERAS_BACKEND` below.We use Professor Keras, the official Keras mascot, as avisual reference for the complexity of the material:<jupyter_code>!pip install -q --upgrade keras-cv !pip install -q --upgrade keras # Upgrade to Keras 3. import os os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"] import json import math import numpy as np import keras from keras import losses from keras import ops from keras import optimizers from keras.optimizers import schedules from keras import metrics import keras_cv # Import tensorflow for `tf.data` and its preprocessing functions import tensorflow as tf import tensorflow_datasets as tfds<jupyter_output><empty_output><jupyter_text>Inference with a pretrained classifierLet's get started with the simplest KerasCV API: a pretrained classifier.In this example, we will construct a classifier that waspretrained on the ImageNet dataset.We'll use this model to solve the age old "Cat or Dog" problem.The highest level module in KerasCV is a *task*. A *task* is a `keras.Model`consisting of a (generally pretrained) backbone model and task-specific layers.Here's an example using `keras_cv.models.ImageClassifier` with anEfficientNetV2B0 Backbone.EfficientNetV2B0 is a great starting model when constructing an imageclassification pipeline.This architecture manages to achieve high accuracy, while using aparameter count of 7M.If an EfficientNetV2B0 is not powerful enough for the task you are hoping tosolve, be sure to check out [KerasCV's other available Backbones](https://github.com/keras-team/keras-cv/tree/master/keras_cv/models/backbones)!<jupyter_code>classifier = keras_cv.models.ImageClassifier.from_preset( "efficientnetv2_b0_imagenet_classifier" )<jupyter_output><empty_output><jupyter_text>You may notice a small deviation from the old `keras.applications` API; whereyou would construct the class with `EfficientNetV2B0(weights="imagenet")`.While the old API was great for classification, it did not scale effectively toother use cases that required complex architectures, like object deteciton andsemantic segmentation.Now that our classifier is built, let's apply it to this cute cat picture!<jupyter_code>filepath = keras.utils.get_file(origin="https://i.imgur.com/9i63gLN.jpg") image = keras.utils.load_img(filepath) image = np.array(image) keras_cv.visualization.plot_image_gallery( np.array([image]), rows=1, cols=1, value_range=(0, 255), show=True, scale=4 )<jupyter_output><empty_output><jupyter_text>Next, let's get some predictions from our classifier:<jupyter_code>predictions = classifier.predict(np.expand_dims(image, axis=0))<jupyter_output><empty_output><jupyter_text>Predictions come in the form of softmax-ed category rankings.We can find the index of the top classes using a simple argsort function:<jupyter_code>top_classes = predictions[0].argsort(axis=-1)<jupyter_output><empty_output><jupyter_text>In order to decode the class mappings, we can construct a mapping fromcategory indices to ImageNet class names.For convenience, I've stored the ImageNet class mapping in a GitHub gist.Let's download and load it now.<jupyter_code>classes = keras.utils.get_file( origin="https://gist.githubusercontent.com/LukeWood/62eebcd5c5c4a4d0e0b7845780f76d55/raw/fde63e5e4c09e2fa0a3436680f436bdcb8325aac/ImagenetClassnames.json" ) with open(classes, "rb") as f: classes = json.load(f)<jupyter_output><empty_output><jupyter_text>Now we can simply look up the class names via index:<jupyter_code>top_two = [classes[str(i)] for i in top_classes[-2:]] print("Top two classes are:", top_two)<jupyter_output><empty_output><jupyter_text>Great! Both of these appear to be correct!However, one of the classes is "Velvet".We're trying to classify Cats VS Dogs.We don't care about the velvet blanket!Ideally, we'd have a classifier that only performs computation to determine ifan image is a cat or a dog, and has all of its resources dedicated to this task.This can be solved by fine tuning our own classifier. Fine tuning a pretrained classifierWhen labeled images specific to our task are available, fine-tuning a customclassifier can improve performance.If we want to train a Cats vs Dogs Classifier, using explicitly labeled Cat vsDog data should perform better than the generic classifier!For many tasks, no relevant pretrained modelwill be available (e.g., categorizing images specific to your application).First, let's get started by loading some data:<jupyter_code>BATCH_SIZE = 32 IMAGE_SIZE = (224, 224) AUTOTUNE = tf.data.AUTOTUNE tfds.disable_progress_bar() data, dataset_info = tfds.load("cats_vs_dogs", with_info=True, as_supervised=True) train_steps_per_epoch = dataset_info.splits["train"].num_examples // BATCH_SIZE train_dataset = data["train"] num_classes = dataset_info.features["label"].num_classes resizing = keras_cv.layers.Resizing( IMAGE_SIZE[0], IMAGE_SIZE[1], crop_to_aspect_ratio=True ) def preprocess_inputs(image, label): image = tf.cast(image, tf.float32) # Staticly resize images as we only iterate the dataset once. return resizing(image), tf.one_hot(label, num_classes) # Shuffle the dataset to increase diversity of batches. # 10*BATCH_SIZE follows the assumption that bigger machines can handle bigger # shuffle buffers. train_dataset = train_dataset.shuffle( 10 * BATCH_SIZE, reshuffle_each_iteration=True ).map(preprocess_inputs, num_parallel_calls=AUTOTUNE) train_dataset = train_dataset.batch(BATCH_SIZE) images = next(iter(train_dataset.take(1)))[0] keras_cv.visualization.plot_image_gallery(images, value_range=(0, 255))<jupyter_output><empty_output><jupyter_text>Meow!Next let's construct our model.The use of imagenet in the preset name indicates that the backbone waspretrained on the ImageNet dataset.Pretrained backbones extract more information from our labeled examples byleveraging patterns extracted from potentially much larger datasets.Next lets put together our classifier:<jupyter_code>model = keras_cv.models.ImageClassifier.from_preset( "efficientnetv2_b0_imagenet", num_classes=2 ) model.compile( loss="categorical_crossentropy", optimizer=keras.optimizers.SGD(learning_rate=0.01), metrics=["accuracy"], )<jupyter_output><empty_output><jupyter_text>Here our classifier is just a simple `keras.Sequential`.All that is left to do is call `model.fit()`:<jupyter_code>model.fit(train_dataset)<jupyter_output><empty_output><jupyter_text>Let's look at how our model performs after the fine tuning:<jupyter_code>predictions = model.predict(np.expand_dims(image, axis=0)) classes = {0: "cat", 1: "dog"} print("Top class is:", classes[predictions[0].argmax()])<jupyter_output><empty_output><jupyter_text>Awesome - looks like the model correctly classified the image. Train a Classifier from ScratchNow that we've gotten our hands dirty with classification, let's take on onelast task: training a classification model from scratch!A standard benchmark for image classification is the ImageNet dataset, howeverdue to licensing constraints we will use the CalTech 101 image classificationdataset in this tutorial.While we use the simpler CalTech 101 dataset in this guide, the same trainingtemplate may be used on ImageNet to achieve near state-of-the-art scores.Let's start out by tackling data loading:<jupyter_code>NUM_CLASSES = 101 # Change epochs to 100~ to fully train. EPOCHS = 1 def package_inputs(image, label): return {"images": image, "labels": tf.one_hot(label, NUM_CLASSES)} train_ds, eval_ds = tfds.load( "caltech101", split=["train", "test"], as_supervised="true" ) train_ds = train_ds.map(package_inputs, num_parallel_calls=tf.data.AUTOTUNE) eval_ds = eval_ds.map(package_inputs, num_parallel_calls=tf.data.AUTOTUNE) train_ds = train_ds.shuffle(BATCH_SIZE * 16)<jupyter_output><empty_output><jupyter_text>The CalTech101 dataset has different sizes for every image, so we use the`ragged_batch()` API to batch them together while maintaining each individualimage's shape information.<jupyter_code>train_ds = train_ds.ragged_batch(BATCH_SIZE) eval_ds = eval_ds.ragged_batch(BATCH_SIZE) batch = next(iter(train_ds.take(1))) image_batch = batch["images"] label_batch = batch["labels"] keras_cv.visualization.plot_image_gallery( image_batch.to_tensor(), rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>Data AugmentationIn our previous finetuning exmaple, we performed a static resizing operation anddid not utilize any image augmentation.This is because a single pass over the training set was sufficient to achievedecent results.When training to solve a more difficult task, you'll want to include dataaugmentation in your data pipeline.Data augmentation is a technique to make your model robust to changes in inputdata such as lighting, cropping, and orientation.KerasCV includes some of the most useful augmentations in the `keras_cv.layers`API.Creating an optimal pipeline of augmentations is an art, but in this section ofthe guide we'll offer some tips on best practices for classification.One caveat to be aware of with image data augmentation is that you must be carefulto not shift your augmented data distribution too far from the original datadistribution.The goal is to prevent overfitting and increase generalization,but samples that lie completely out of the data distribution simply add noise tothe training process.The first augmentation we'll use is `RandomFlip`.This augmentation behaves more or less how you'd expect: it either flips theimage or not.While this augmentation is useful in CalTech101 and ImageNet, it should be notedthat it should not be used on tasks where the data distribution is not verticalmirror invariant.An example of a dataset where this occurs is MNIST hand written digits.Flipping a `6` over thevertical axis will make the digit appear more like a `7` than a `6`, but thelabel will still show a `6`.<jupyter_code>random_flip = keras_cv.layers.RandomFlip() augmenters = [random_flip] image_batch = random_flip(image_batch) keras_cv.visualization.plot_image_gallery( image_batch.to_tensor(), rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>Half of the images have been flipped!The next augmentation we'll use is `RandomCropAndResize`.This operation selects a random subset of the image, then resizes it to theprovided target size.By using this augmentation, we force our classifier to become spatially invariant.Additionally, this layer accepts an `aspect_ratio_factor` which can be used todistort the aspect ratio of the image.While this can improve model performance, it should be used with caution.It is very easy for an aspect ratio distortion to shift a sample too far fromthe original training set's data distribution.Remember - the goal of data augmentation is to produce more training samplesthat align with the data distribution of your training set!`RandomCropAndResize` also can handle `tf.RaggedTensor` inputs. In theCalTech101 image dataset images come in a wide variety of sizes.As such they cannot easily be batched together into a dense training batch.Luckily, `RandomCropAndResize` handles the Ragged -> Dense conversion processfor you!Let's add a `RandomCropAndResize` to our set of augmentations:<jupyter_code>crop_and_resize = keras_cv.layers.RandomCropAndResize( target_size=IMAGE_SIZE, crop_area_factor=(0.8, 1.0), aspect_ratio_factor=(0.9, 1.1), ) augmenters += [crop_and_resize] image_batch = crop_and_resize(image_batch) keras_cv.visualization.plot_image_gallery( image_batch, rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>Great! We are now working with a batch of dense images.Next up, lets include some spatial and color-based jitter to our training set.This will allow us to produce a classifier that is robust to lighting flickers,shadows, and more.There are limitless ways to augment an image by altering color and spatialfeatures, but perhaps the most battle tested technique is[`RandAugment`](https://arxiv.org/abs/1909.13719).`RandAugment` is actually a set of 10 different augmentations:`AutoContrast`, `Equalize`, `Solarize`, `RandomColorJitter`, `RandomContrast`,`RandomBrightness`, `ShearX`, `ShearY`, `TranslateX` and `TranslateY`.At inference time, `num_augmentations` augmenters are sampled for each image,and random magnitude factors are sampled for each.These augmentations are then applied sequentially.KerasCV makes tuning these parameters easy using the `augmentations_per_image`and `magnitude` parameters!Let's take it for a spin:<jupyter_code>rand_augment = keras_cv.layers.RandAugment( augmentations_per_image=3, value_range=(0, 255), magnitude=0.3, magnitude_stddev=0.2, rate=1.0, ) augmenters += [rand_augment] image_batch = rand_augment(image_batch) keras_cv.visualization.plot_image_gallery( image_batch, rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>Looks great; but we're not done yet!What if an image is missing one critical feature of a class? For example, whatif a leaf is blocking the view of a cat's ear, but our classifier learned toclassify cats simply by observing their ears?One easy approach to tackling this is to use `RandomCutout`, which randomlystrips out a sub-section of the image:<jupyter_code>random_cutout = keras_cv.layers.RandomCutout(width_factor=0.4, height_factor=0.4) keras_cv.visualization.plot_image_gallery( random_cutout(image_batch), rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>While this tackles the problem reasonably well, it can cause the classifier todevelop responses to borders between features and black pixel areas caused bythe cutout.[`CutMix`](https://arxiv.org/abs/1905.04899) solves the same issue by usinga more complex (and more effective) technique.Instead of replacing the cut-out areas with black pixels, `CutMix` replacesthese regions with regions of other images sampled from within your trainingset!Following this replacement, the image's classification label is updated to be ablend of the original and mixed image's class label.What does this look like in practice? Let's check it out:<jupyter_code>cut_mix = keras_cv.layers.CutMix() # CutMix needs to modify both images and labels inputs = {"images": image_batch, "labels": label_batch} keras_cv.visualization.plot_image_gallery( cut_mix(inputs)["images"], rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>Let's hold off from adding it to our augmenter for a minute - more on thatsoon!Next, let's look into `MixUp()`.Unfortunately, while `MixUp()` has been empirically shown to *substantially*improve both the robustness and the generalization of the trained model,it is not well-understood why such improvement occurs... buta little alchemy never hurt anyone!`MixUp()` works by sampling two images from a batch, then proceeding toliterally blend together their pixel intensities as well as their classificationlabels.Let's see it in action:<jupyter_code>mix_up = keras_cv.layers.MixUp() # MixUp needs to modify both images and labels inputs = {"images": image_batch, "labels": label_batch} keras_cv.visualization.plot_image_gallery( mix_up(inputs)["images"], rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>If you look closely, you'll see that the images have been blended together.Instead of applying `CutMix()` and `MixUp()` to every image, we instead pickone or the other to apply to each batch.This can be expressed using `keras_cv.layers.RandomChoice()`<jupyter_code>cut_mix_or_mix_up = keras_cv.layers.RandomChoice([cut_mix, mix_up], batchwise=True) augmenters += [cut_mix_or_mix_up]<jupyter_output><empty_output><jupyter_text>Now let's apply our final augmenter to the training data:<jupyter_code>def create_augmenter_fn(augmenters): def augmenter_fn(inputs): for augmenter in augmenters: inputs = augmenter(inputs) return inputs return augmenter_fn augmenter_fn = create_augmenter_fn(augmenters) train_ds = train_ds.map(augmenter_fn, num_parallel_calls=tf.data.AUTOTUNE) image_batch = next(iter(train_ds.take(1)))["images"] keras_cv.visualization.plot_image_gallery( image_batch, rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>We also need to resize our evaluation set to get dense batches of the image sizeexpected by our model. We use the deterministic `keras_cv.layers.Resizing` inthis case to avoid adding noise to our evaluation metric.<jupyter_code>inference_resizing = keras_cv.layers.Resizing( IMAGE_SIZE[0], IMAGE_SIZE[1], crop_to_aspect_ratio=True ) eval_ds = eval_ds.map(inference_resizing, num_parallel_calls=tf.data.AUTOTUNE) image_batch = next(iter(eval_ds.take(1)))["images"] keras_cv.visualization.plot_image_gallery( image_batch, rows=3, cols=3, value_range=(0, 255), show=True, )<jupyter_output><empty_output><jupyter_text>Finally, lets unpackage our datasets and prepare to pass them to `model.fit()`,which accepts a tuple of `(images, labels)`.<jupyter_code>def unpackage_dict(inputs): return inputs["images"], inputs["labels"] train_ds = train_ds.map(unpackage_dict, num_parallel_calls=tf.data.AUTOTUNE) eval_ds = eval_ds.map(unpackage_dict, num_parallel_calls=tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Data augmentation is by far the hardest piece of training a modernclassifier.Congratulations on making it this far! Optimizer TuningTo achieve optimal performance, we need to use a learning rate schedule insteadof a single learning rate. While we won't go into detail on the Cosine decaywith warmup schedule used here, [you can read more about ithere](https://scorrea92.medium.com/cosine-learning-rate-decay-e8b50aa455b).<jupyter_code>def lr_warmup_cosine_decay( global_step, warmup_steps, hold=0, total_steps=0, start_lr=0.0, target_lr=1e-2, ): # Cosine decay learning_rate = ( 0.5 * target_lr * ( 1 + ops.cos( math.pi * ops.convert_to_tensor( global_step - warmup_steps - hold, dtype="float32" ) / ops.convert_to_tensor( total_steps - warmup_steps - hold, dtype="float32" ) ) ) ) warmup_lr = target_lr * (global_step / warmup_steps) if hold > 0: learning_rate = ops.where( global_step > warmup_steps + hold, learning_rate, target_lr ) learning_rate = ops.where(global_step < warmup_steps, warmup_lr, learning_rate) return learning_rate class WarmUpCosineDecay(schedules.LearningRateSchedule): def __init__(self, warmup_steps, total_steps, hold, start_lr=0.0, target_lr=1e-2): super().__init__() self.start_lr = start_lr self.target_lr = target_lr self.warmup_steps = warmup_steps self.total_steps = total_steps self.hold = hold def __call__(self, step): lr = lr_warmup_cosine_decay( global_step=step, total_steps=self.total_steps, warmup_steps=self.warmup_steps, start_lr=self.start_lr, target_lr=self.target_lr, hold=self.hold, ) return ops.where(step > self.total_steps, 0.0, lr)<jupyter_output><empty_output><jupyter_text>The schedule looks a as we expect.Next let's construct this optimizer:<jupyter_code>total_images = 9000 total_steps = (total_images // BATCH_SIZE) * EPOCHS warmup_steps = int(0.1 * total_steps) hold_steps = int(0.45 * total_steps) schedule = WarmUpCosineDecay( start_lr=0.05, target_lr=1e-2, warmup_steps=warmup_steps, total_steps=total_steps, hold=hold_steps, ) optimizer = optimizers.SGD( weight_decay=5e-4, learning_rate=schedule, momentum=0.9, )<jupyter_output><empty_output><jupyter_text>At long last, we can now build our model and call `fit()`!`keras_cv.models.EfficientNetV2B0Backbone()` is a convenience alias for`keras_cv.models.EfficientNetV2Backbone.from_preset('efficientnetv2_b0')`.Note that this preset does not come with any pretrained weights.<jupyter_code>backbone = keras_cv.models.EfficientNetV2B0Backbone() model = keras.Sequential( [ backbone, keras.layers.GlobalMaxPooling2D(), keras.layers.Dropout(rate=0.5), keras.layers.Dense(101, activation="softmax"), ] )<jupyter_output><empty_output><jupyter_text>Since the labels produced by MixUp() and CutMix() are somewhat artificial, weemploy label smoothing to prevent the model from overfitting to artifacts ofthis augmentation process.<jupyter_code>loss = losses.CategoricalCrossentropy(label_smoothing=0.1)<jupyter_output><empty_output><jupyter_text>Let's compile our model:<jupyter_code>model.compile( loss=loss, optimizer=optimizer, metrics=[ metrics.CategoricalAccuracy(), metrics.TopKCategoricalAccuracy(k=5), ], )<jupyter_output><empty_output><jupyter_text>and finally call fit().<jupyter_code>model.fit( train_ds, epochs=EPOCHS, validation_data=eval_ds, )<jupyter_output><empty_output>
keras-io/guides/ipynb/keras_cv/classification_with_keras_cv.ipynb/0
{ "file_path": "keras-io/guides/ipynb/keras_cv/classification_with_keras_cv.ipynb", "repo_id": "keras-io", "token_count": 7468 }
131
<jupyter_start><jupyter_text>Visualize the hyperparameter tuning process**Author:** Haifeng Jin**Date created:** 2021/06/25**Last modified:** 2021/06/05**Description:** Using TensorBoard to visualize the hyperparameter tuning process in KerasTuner.<jupyter_code>!pip install keras-tuner -q<jupyter_output><empty_output><jupyter_text>IntroductionKerasTuner prints the logs to screen including the values of thehyperparameters in each trial for the user to monitor the progress. However,reading the logs is not intuitive enough to sense the influences ofhyperparameters have on the results, Therefore, we provide a method tovisualize the hyperparameter values and the corresponding evaluation resultswith interactive figures using TensorBaord.[TensorBoard](https://www.tensorflow.org/tensorboard) is a useful tool forvisualizing the machine learning experiments. It can monitor the losses andmetrics during the model training and visualize the model architectures.Running KerasTuner with TensorBoard will give you additional features forvisualizing hyperparameter tuning results using its HParams plugin. We will use a simple example of tuning a model for the MNIST imageclassification dataset to show how to use KerasTuner with TensorBoard.The first step is to download and format the data.<jupyter_code>import numpy as np import keras_tuner import keras from keras import layers (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Normalize the pixel values to the range of [0, 1]. x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Add the channel dimension to the images. x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) # Print the shapes of the data. print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape)<jupyter_output><empty_output><jupyter_text>Then, we write a `build_model` function to build the model with hyperparametersand return the model. The hyperparameters include the type of model to use(multi-layer perceptron or convolutional neural network), the number of layers,the number of units or filters, whether to use dropout.<jupyter_code>def build_model(hp): inputs = keras.Input(shape=(28, 28, 1)) # Model type can be MLP or CNN. model_type = hp.Choice("model_type", ["mlp", "cnn"]) x = inputs if model_type == "mlp": x = layers.Flatten()(x) # Number of layers of the MLP is a hyperparameter. for i in range(hp.Int("mlp_layers", 1, 3)): # Number of units of each layer are # different hyperparameters with different names. x = layers.Dense( units=hp.Int(f"units_{i}", 32, 128, step=32), activation="relu", )(x) else: # Number of layers of the CNN is also a hyperparameter. for i in range(hp.Int("cnn_layers", 1, 3)): x = layers.Conv2D( hp.Int(f"filters_{i}", 32, 128, step=32), kernel_size=(3, 3), activation="relu", )(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Flatten()(x) # A hyperparamter for whether to use dropout layer. if hp.Boolean("dropout"): x = layers.Dropout(0.5)(x) # The last layer contains 10 units, # which is the same as the number of classes. outputs = layers.Dense(units=10, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) # Compile the model. model.compile( loss="sparse_categorical_crossentropy", metrics=["accuracy"], optimizer="adam", ) return model<jupyter_output><empty_output><jupyter_text>We can do a quick test of the models to check if it build successfully for bothCNN and MLP.<jupyter_code># Initialize the `HyperParameters` and set the values. hp = keras_tuner.HyperParameters() hp.values["model_type"] = "cnn" # Build the model using the `HyperParameters`. model = build_model(hp) # Test if the model runs with our data. model(x_train[:100]) # Print a summary of the model. model.summary() # Do the same for MLP model. hp.values["model_type"] = "mlp" model = build_model(hp) model(x_train[:100]) model.summary()<jupyter_output><empty_output><jupyter_text>Initialize the `RandomSearch` tuner with 10 trials and using validationaccuracy as the metric for selecting models.<jupyter_code>tuner = keras_tuner.RandomSearch( build_model, max_trials=10, # Do not resume the previous search in the same directory. overwrite=True, objective="val_accuracy", # Set a directory to store the intermediate results. directory="/tmp/tb", )<jupyter_output><empty_output><jupyter_text>Start the search by calling `tuner.search(...)`. To use TensorBoard, we needto pass a `keras.callbacks.TensorBoard` instance to the callbacks.<jupyter_code>tuner.search( x_train, y_train, validation_split=0.2, epochs=2, # Use the TensorBoard callback. # The logs will be write to "/tmp/tb_logs". callbacks=[keras.callbacks.TensorBoard("/tmp/tb_logs")], )<jupyter_output><empty_output>
keras-io/guides/ipynb/keras_tuner/visualize_tuning.ipynb/0
{ "file_path": "keras-io/guides/ipynb/keras_tuner/visualize_tuning.ipynb", "repo_id": "keras-io", "token_count": 1844 }
132
""" Title: Visualize the hyperparameter tuning process Author: Haifeng Jin Date created: 2021/06/25 Last modified: 2021/06/05 Description: Using TensorBoard to visualize the hyperparameter tuning process in KerasTuner. Accelerator: GPU """ """shell pip install keras-tuner -q """ """ ## Introduction KerasTuner prints the logs to screen including the values of the hyperparameters in each trial for the user to monitor the progress. However, reading the logs is not intuitive enough to sense the influences of hyperparameters have on the results, Therefore, we provide a method to visualize the hyperparameter values and the corresponding evaluation results with interactive figures using TensorBaord. [TensorBoard](https://www.tensorflow.org/tensorboard) is a useful tool for visualizing the machine learning experiments. It can monitor the losses and metrics during the model training and visualize the model architectures. Running KerasTuner with TensorBoard will give you additional features for visualizing hyperparameter tuning results using its HParams plugin. """ """ We will use a simple example of tuning a model for the MNIST image classification dataset to show how to use KerasTuner with TensorBoard. The first step is to download and format the data. """ import numpy as np import keras_tuner import keras from keras import layers (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Normalize the pixel values to the range of [0, 1]. x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Add the channel dimension to the images. x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) # Print the shapes of the data. print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) """ Then, we write a `build_model` function to build the model with hyperparameters and return the model. The hyperparameters include the type of model to use (multi-layer perceptron or convolutional neural network), the number of layers, the number of units or filters, whether to use dropout. """ def build_model(hp): inputs = keras.Input(shape=(28, 28, 1)) # Model type can be MLP or CNN. model_type = hp.Choice("model_type", ["mlp", "cnn"]) x = inputs if model_type == "mlp": x = layers.Flatten()(x) # Number of layers of the MLP is a hyperparameter. for i in range(hp.Int("mlp_layers", 1, 3)): # Number of units of each layer are # different hyperparameters with different names. x = layers.Dense( units=hp.Int(f"units_{i}", 32, 128, step=32), activation="relu", )(x) else: # Number of layers of the CNN is also a hyperparameter. for i in range(hp.Int("cnn_layers", 1, 3)): x = layers.Conv2D( hp.Int(f"filters_{i}", 32, 128, step=32), kernel_size=(3, 3), activation="relu", )(x) x = layers.MaxPooling2D(pool_size=(2, 2))(x) x = layers.Flatten()(x) # A hyperparamter for whether to use dropout layer. if hp.Boolean("dropout"): x = layers.Dropout(0.5)(x) # The last layer contains 10 units, # which is the same as the number of classes. outputs = layers.Dense(units=10, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) # Compile the model. model.compile( loss="sparse_categorical_crossentropy", metrics=["accuracy"], optimizer="adam", ) return model """ We can do a quick test of the models to check if it build successfully for both CNN and MLP. """ # Initialize the `HyperParameters` and set the values. hp = keras_tuner.HyperParameters() hp.values["model_type"] = "cnn" # Build the model using the `HyperParameters`. model = build_model(hp) # Test if the model runs with our data. model(x_train[:100]) # Print a summary of the model. model.summary() # Do the same for MLP model. hp.values["model_type"] = "mlp" model = build_model(hp) model(x_train[:100]) model.summary() """ Initialize the `RandomSearch` tuner with 10 trials and using validation accuracy as the metric for selecting models. """ tuner = keras_tuner.RandomSearch( build_model, max_trials=10, # Do not resume the previous search in the same directory. overwrite=True, objective="val_accuracy", # Set a directory to store the intermediate results. directory="/tmp/tb", ) """ Start the search by calling `tuner.search(...)`. To use TensorBoard, we need to pass a `keras.callbacks.TensorBoard` instance to the callbacks. """ tuner.search( x_train, y_train, validation_split=0.2, epochs=2, # Use the TensorBoard callback. # The logs will be write to "/tmp/tb_logs". callbacks=[keras.callbacks.TensorBoard("/tmp/tb_logs")], ) """ If running in Colab, the following two commands will show you the TensorBoard inside Colab. `%load_ext tensorboard` `%tensorboard --logdir /tmp/tb_logs` You have access to all the common features of the TensorBoard. For example, you can view the loss and metrics curves and visualize the computational graph of the models in different trials. ![Loss and metrics curves](https://i.imgur.com/ShulDtI.png) ![Computational graphs](https://i.imgur.com/8sRiT1I.png) In addition to these features, we also have a HParams tab, in which there are three views. In the table view, you can view the 10 different trials in a table with the different hyperparameter values and evaluation metrics. ![Table view](https://i.imgur.com/OMcQdOw.png) On the left side, you can specify the filters for certain hyperparameters. For example, you can specify to only view the MLP models without the dropout layer and with 1 to 2 dense layers. ![Filtered table view](https://i.imgur.com/yZpfaxN.png) Besides the table view, it also provides two other views, parallel coordinates view and scatter plot matrix view. They are just different visualization methods for the same data. You can still use the panel on the left to filter the results. In the parallel coordinates view, each colored line is a trial. The axes are the hyperparameters and evaluation metrics. ![Parallel coordinates view](https://i.imgur.com/PJ7HQUQ.png) In the scatter plot matrix view, each dot is a trial. The plots are projections of the trials on planes with different hyperparameter and metrics as the axes. ![Scatter plot matrix view](https://i.imgur.com/zjPjh6o.png) """
keras-io/guides/keras_tuner/visualize_tuning.py/0
{ "file_path": "keras-io/guides/keras_tuner/visualize_tuning.py", "repo_id": "keras-io", "token_count": 2233 }
133
# Using KerasCV COCO Metrics **Author:** [lukewood](https://twitter.com/luke_wood_ml)<br> **Date created:** 2022/04/13<br> **Last modified:** 2022/04/13<br> **Description:** Use KerasCV COCO metrics to evaluate object detection models. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_cv/coco_metrics.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_cv/coco_metrics.py) --- ## Overview With KerasCV's COCO metrics implementation, you can easily evaluate your object detection model's performance all from within the TensorFlow graph. This guide shows you how to use KerasCV's COCO metrics and integrate it into your own model evaluation pipeline. Historically, users have evaluated COCO metrics as a post training step. KerasCV offers an in graph implementation of COCO metrics, enabling users to evaluate COCO metrics *during* training! Let's get started using KerasCV's COCO metrics. --- ## Input format All KerasCV components that process bounding boxes, including COCO metrics, require a `bounding_box_format` parameter. This parameter is used to tell the components what format your bounding boxes are in. While this guide uses the `xyxy` format, a full list of supported formats is available in [the bounding_box API documentation](https://keras.io/api/keras_cv/bounding_box/formats/). The metrics expect `y_true` and be a `float` Tensor with the shape `[batch, num_images, num_boxes, 5]`, with the ordering of last set of axes determined by the provided format. The same is true of `y_pred`, except that an additional `confidence` axis must be provided. Due to the fact that each image may have a different number of bounding boxes, the `num_boxes` dimension may actually have a mismatching shape between images. KerasCV works around this by allowing you to either pass a `RaggedTensor` as an input to the KerasCV COCO metrics, or padding unused bounding boxes with `-1`. Utility functions to manipulate bounding boxes, transform between formats, and pad bounding box Tensors with `-1s` are available from the [`keras_cv.bounding_box`](https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box) package. --- ## Independent metric use The usage first pattern for KerasCV COCO metrics is to manually call `update_state()` and `result()` methods. This pattern is recommended for users who want finer grained control of their metric evaluation, or want to use a different format for `y_pred` in their model. Let's run through a quick code example. 1.) First, we must construct our metric: ```python import keras_cv # import all modules we will need in this example import tensorflow as tf from tensorflow import keras # only consider boxes with areas less than a 32x32 square. metric = keras_cv.metrics.COCORecall( bounding_box_format="xyxy", class_ids=[1, 2, 3], area_range=(0, 32**2) ) ``` 2.) Create Some Bounding Boxes: ```python y_true = tf.ragged.stack( [ # image 1 tf.constant([[0, 0, 10, 10, 1], [11, 12, 30, 30, 2]], tf.float32), # image 2 tf.constant([[0, 0, 10, 10, 1]], tf.float32), ] ) y_pred = tf.ragged.stack( [ # predictions for image 1 tf.constant([[5, 5, 10, 10, 1, 0.9]], tf.float32), # predictions for image 2 tf.constant([[0, 0, 10, 10, 1, 1.0], [5, 5, 10, 10, 1, 0.9]], tf.float32), ] ) ``` 3.) Update metric state: ```python metric.update_state(y_true, y_pred) ``` 4.) Evaluate the result: ```python metric.result() ``` <div class="k-default-codeblock"> ``` <tf.Tensor: shape=(), dtype=float32, numpy=0.25> ``` </div> Evaluating COCORecall for your object detection model is as simple as that! --- ## Metric use in a model You can also leverage COCORecall in your model's training loop. Let's walk through this process. 1.) Construct your the metric and a dummy model ```python i = keras.layers.Input((None, 6)) model = keras.Model(i, i) ``` 2.) Create some fake bounding boxes: ```python y_true = tf.constant([[[0, 0, 10, 10, 1], [5, 5, 10, 10, 1]]], tf.float32) y_pred = tf.constant([[[0, 0, 10, 10, 1, 1.0], [5, 5, 10, 10, 1, 0.9]]], tf.float32) ``` 3.) Create the metric and compile the model ```python recall = keras_cv.metrics.COCORecall( bounding_box_format="xyxy", max_detections=100, class_ids=[1], area_range=(0, 64**2), name="coco_recall", ) model.compile(metrics=[recall]) ``` 4.) Use `model.evaluate()` to evaluate the metric ```python model.evaluate(y_pred, y_true, return_dict=True) ``` <div class="k-default-codeblock"> ``` 1/1 [==============================] - 1s 1s/step - loss: 0.0000e+00 - coco_recall: 1.0000 {'loss': 0.0, 'coco_recall': 1.0} ``` </div> Looks great! That's all it takes to use KerasCV's COCO metrics to evaluate object detection models. --- ## Supported constructor parameters KerasCV COCO Metrics are sufficiently parameterized to support all of the permutations evaluated in the original COCO challenge, all metrics evaluated in the accompanying `pycocotools` library, and more! Check out the full documentation for [`COCORecall`](/api/keras_cv/metrics/coco_recall/) and [`COCOMeanAveragePrecision`](/api/keras_cv/metrics/coco_mean_average_precision/). --- ## Conclusion & next steps KerasCV makes it easier than ever before to evaluate a Keras object detection model. Historically, users had to perform post training evaluation. With KerasCV, you can perform train time evaluation to see how these metrics evolve over time! As an additional exercise for readers, you can: - Configure `iou_thresholds`, `max_detections`, and `area_range` to reproduce the suite of metrics evaluted in `pycocotools` - Integrate COCO metrics into a RetinaNet using the [keras.io RetinaNet example](https://keras.io/examples/vision/retinanet/)
keras-io/guides/md/keras_cv/coco_metrics.md/0
{ "file_path": "keras-io/guides/md/keras_cv/coco_metrics.md", "repo_id": "keras-io", "token_count": 2087 }
134
# Making new layers and models via subclassing **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2019/03/01<br> **Last modified:** 2023/06/25<br> **Description:** Complete guide to writing `Layer` and `Model` objects from scratch. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/making_new_layers_and_models_via_subclassing.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/making_new_layers_and_models_via_subclassing.py) --- ## Introduction This guide will cover everything you need to know to build your own subclassed layers and models. In particular, you'll learn about the following features: - The `Layer` class - The `add_weight()` method - Trainable and non-trainable weights - The `build()` method - Making sure your layers can be used with any backend - The `add_loss()` method - The `training` argument in `call()` - The `mask` argument in `call()` - Making sure your layers can be serialized Let's dive in. --- ## Setup ```python import numpy as np import keras from keras import ops from keras import layers ``` --- ## The `Layer` class: the combination of state (weights) and some computation One of the central abstractions in Keras is the `Layer` class. A layer encapsulates both a state (the layer's "weights") and a transformation from inputs to outputs (a "call", the layer's forward pass). Here's a densely-connected layer. It has two state variables: the variables `w` and `b`. ```python class Linear(keras.layers.Layer): def __init__(self, units=32, input_dim=32): super().__init__() self.w = self.add_weight( shape=(input_dim, units), initializer="random_normal", trainable=True, ) self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True) def call(self, inputs): return ops.matmul(inputs, self.w) + self.b ``` You would use a layer by calling it on some tensor input(s), much like a Python function. ```python x = ops.ones((2, 2)) linear_layer = Linear(4, 2) y = linear_layer(x) print(y) ``` <div class="k-default-codeblock"> ``` [[ 0.085416 -0.06821361 -0.00741937 -0.03429271] [ 0.085416 -0.06821361 -0.00741937 -0.03429271]] ``` </div> Note that the weights `w` and `b` are automatically tracked by the layer upon being set as layer attributes: ```python assert linear_layer.weights == [linear_layer.w, linear_layer.b] ``` --- ## Layers can have non-trainable weights Besides trainable weights, you can add non-trainable weights to a layer as well. Such weights are meant not to be taken into account during backpropagation, when you are training the layer. Here's how to add and use a non-trainable weight: ```python class ComputeSum(keras.layers.Layer): def __init__(self, input_dim): super().__init__() self.total = self.add_weight( initializer="zeros", shape=(input_dim,), trainable=False ) def call(self, inputs): self.total.assign_add(ops.sum(inputs, axis=0)) return self.total x = ops.ones((2, 2)) my_sum = ComputeSum(2) y = my_sum(x) print(y.numpy()) y = my_sum(x) print(y.numpy()) ``` <div class="k-default-codeblock"> ``` [2. 2.] [4. 4.] ``` </div> It's part of `layer.weights`, but it gets categorized as a non-trainable weight: ```python print("weights:", len(my_sum.weights)) print("non-trainable weights:", len(my_sum.non_trainable_weights)) # It's not included in the trainable weights: print("trainable_weights:", my_sum.trainable_weights) ``` <div class="k-default-codeblock"> ``` weights: 1 non-trainable weights: 1 trainable_weights: [] ``` </div> --- ## Best practice: deferring weight creation until the shape of the inputs is known Our `Linear` layer above took an `input_dim` argument that was used to compute the shape of the weights `w` and `b` in `__init__()`: ```python class Linear(keras.layers.Layer): def __init__(self, units=32, input_dim=32): super().__init__() self.w = self.add_weight( shape=(input_dim, units), initializer="random_normal", trainable=True, ) self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True) def call(self, inputs): return ops.matmul(inputs, self.w) + self.b ``` In many cases, you may not know in advance the size of your inputs, and you would like to lazily create weights when that value becomes known, some time after instantiating the layer. In the Keras API, we recommend creating layer weights in the `build(self, inputs_shape)` method of your layer. Like this: ```python class Linear(keras.layers.Layer): def __init__(self, units=32): super().__init__() self.units = units def build(self, input_shape): self.w = self.add_weight( shape=(input_shape[-1], self.units), initializer="random_normal", trainable=True, ) self.b = self.add_weight( shape=(self.units,), initializer="random_normal", trainable=True ) def call(self, inputs): return ops.matmul(inputs, self.w) + self.b ``` The `__call__()` method of your layer will automatically run build the first time it is called. You now have a layer that's lazy and thus easier to use: ```python # At instantiation, we don't know on what inputs this is going to get called linear_layer = Linear(32) # The layer's weights are created dynamically the first time the layer is called y = linear_layer(x) ``` Implementing `build()` separately as shown above nicely separates creating weights only once from using weights in every call. --- ## Layers are recursively composable If you assign a Layer instance as an attribute of another Layer, the outer layer will start tracking the weights created by the inner layer. We recommend creating such sublayers in the `__init__()` method and leave it to the first `__call__()` to trigger building their weights. ```python class MLPBlock(keras.layers.Layer): def __init__(self): super().__init__() self.linear_1 = Linear(32) self.linear_2 = Linear(32) self.linear_3 = Linear(1) def call(self, inputs): x = self.linear_1(inputs) x = keras.activations.relu(x) x = self.linear_2(x) x = keras.activations.relu(x) return self.linear_3(x) mlp = MLPBlock() y = mlp(ops.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights print("weights:", len(mlp.weights)) print("trainable weights:", len(mlp.trainable_weights)) ``` <div class="k-default-codeblock"> ``` weights: 6 trainable weights: 6 ``` </div> --- ## Backend-agnostic layers and backend-specific layers As long as a layer only uses APIs from the `keras.ops` namespace (or other Keras namespaces such as `keras.activations`, `keras.random`, or `keras.layers`), then it can be used with any backend -- TensorFlow, JAX, or PyTorch. All layers you've seen so far in this guide work with all Keras backends. The `keras.ops` namespace gives you access to: - The NumPy API, e.g. `ops.matmul`, `ops.sum`, `ops.reshape`, `ops.stack`, etc. - Neural networks-specific APIs such as `ops.softmax`, `ops`.conv`, `ops.binary_crossentropy`, `ops.relu`, etc. You can also use backend-native APIs in your layers (such as `tf.nn` functions), but if you do this, then your layer will only be usable with the backend in question. For instance, you could write the following JAX-specific layer using `jax.numpy`: ```python import jax class Linear(keras.layers.Layer): ... def call(self, inputs): return jax.numpy.matmul(inputs, self.w) + self.b ``` This would be the equivalent TensorFlow-specific layer: ```python import tensorflow as tf class Linear(keras.layers.Layer): ... def call(self, inputs): return tf.matmul(inputs, self.w) + self.b ``` And this would be the equivalent PyTorch-specific layer: ```python import torch class Linear(keras.layers.Layer): ... def call(self, inputs): return torch.matmul(inputs, self.w) + self.b ``` Because cross-backend compatibility is a tremendously useful property, we strongly recommend that you seek to always make your layers backend-agnostic by leveraging only Keras APIs. --- ## The `add_loss()` method When writing the `call()` method of a layer, you can create loss tensors that you will want to use later, when writing your training loop. This is doable by calling `self.add_loss(value)`: ```python # A layer that creates an activity regularization loss class ActivityRegularizationLayer(keras.layers.Layer): def __init__(self, rate=1e-2): super().__init__() self.rate = rate def call(self, inputs): self.add_loss(self.rate * ops.mean(inputs)) return inputs ``` These losses (including those created by any inner layer) can be retrieved via `layer.losses`. This property is reset at the start of every `__call__()` to the top-level layer, so that `layer.losses` always contains the loss values created during the last forward pass. ```python class OuterLayer(keras.layers.Layer): def __init__(self): super().__init__() self.activity_reg = ActivityRegularizationLayer(1e-2) def call(self, inputs): return self.activity_reg(inputs) layer = OuterLayer() assert len(layer.losses) == 0 # No losses yet since the layer has never been called _ = layer(ops.zeros((1, 1))) assert len(layer.losses) == 1 # We created one loss value # `layer.losses` gets reset at the start of each __call__ _ = layer(ops.zeros((1, 1))) assert len(layer.losses) == 1 # This is the loss created during the call above ``` In addition, the `loss` property also contains regularization losses created for the weights of any inner layer: ```python class OuterLayerWithKernelRegularizer(keras.layers.Layer): def __init__(self): super().__init__() self.dense = keras.layers.Dense( 32, kernel_regularizer=keras.regularizers.l2(1e-3) ) def call(self, inputs): return self.dense(inputs) layer = OuterLayerWithKernelRegularizer() _ = layer(ops.zeros((1, 1))) # This is `1e-3 * sum(layer.dense.kernel ** 2)`, # created by the `kernel_regularizer` above. print(layer.losses) ``` <div class="k-default-codeblock"> ``` [Array(0.00217911, dtype=float32)] ``` </div> These losses are meant to be taken into account when writing custom training loops. They also work seamlessly with `fit()` (they get automatically summed and added to the main loss, if any): ```python inputs = keras.Input(shape=(3,)) outputs = ActivityRegularizationLayer()(inputs) model = keras.Model(inputs, outputs) # If there is a loss passed in `compile`, the regularization # losses get added to it model.compile(optimizer="adam", loss="mse") model.fit(np.random.random((2, 3)), np.random.random((2, 3))) # It's also possible not to pass any loss in `compile`, # since the model already has a loss to minimize, via the `add_loss` # call during the forward pass! model.compile(optimizer="adam") model.fit(np.random.random((2, 3)), np.random.random((2, 3))) ``` <div class="k-default-codeblock"> ``` 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 60ms/step - loss: 0.2650 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 15ms/step - loss: 0.0050 <keras.src.callbacks.history.History at 0x146f71960> ``` </div> --- ## You can optionally enable serialization on your layers If you need your custom layers to be serializable as part of a [Functional model](/guides/functional_api/), you can optionally implement a `get_config()` method: ```python class Linear(keras.layers.Layer): def __init__(self, units=32): super().__init__() self.units = units def build(self, input_shape): self.w = self.add_weight( shape=(input_shape[-1], self.units), initializer="random_normal", trainable=True, ) self.b = self.add_weight( shape=(self.units,), initializer="random_normal", trainable=True ) def call(self, inputs): return ops.matmul(inputs, self.w) + self.b def get_config(self): return {"units": self.units} # Now you can recreate the layer from its config: layer = Linear(64) config = layer.get_config() print(config) new_layer = Linear.from_config(config) ``` <div class="k-default-codeblock"> ``` {'units': 64} ``` </div> Note that the `__init__()` method of the base `Layer` class takes some keyword arguments, in particular a `name` and a `dtype`. It's good practice to pass these arguments to the parent class in `__init__()` and to include them in the layer config: ```python class Linear(keras.layers.Layer): def __init__(self, units=32, **kwargs): super().__init__(**kwargs) self.units = units def build(self, input_shape): self.w = self.add_weight( shape=(input_shape[-1], self.units), initializer="random_normal", trainable=True, ) self.b = self.add_weight( shape=(self.units,), initializer="random_normal", trainable=True ) def call(self, inputs): return ops.matmul(inputs, self.w) + self.b def get_config(self): config = super().get_config() config.update({"units": self.units}) return config layer = Linear(64) config = layer.get_config() print(config) new_layer = Linear.from_config(config) ``` <div class="k-default-codeblock"> ``` {'name': 'linear_7', 'trainable': True, 'dtype': 'float32', 'units': 64} ``` </div> If you need more flexibility when deserializing the layer from its config, you can also override the `from_config()` class method. This is the base implementation of `from_config()`: ```python def from_config(cls, config): return cls(**config) ``` To learn more about serialization and saving, see the complete [guide to saving and serializing models](/guides/serialization_and_saving/). --- ## Privileged `training` argument in the `call()` method Some layers, in particular the `BatchNormalization` layer and the `Dropout` layer, have different behaviors during training and inference. For such layers, it is standard practice to expose a `training` (boolean) argument in the `call()` method. By exposing this argument in `call()`, you enable the built-in training and evaluation loops (e.g. `fit()`) to correctly use the layer in training and inference. ```python class CustomDropout(keras.layers.Layer): def __init__(self, rate, **kwargs): super().__init__(**kwargs) self.rate = rate self.seed_generator = keras.random.SeedGenerator(1337) def call(self, inputs, training=None): if training: return keras.random.dropout( inputs, rate=self.rate, seed=self.seed_generator ) return inputs ``` --- ## Privileged `mask` argument in the `call()` method The other privileged argument supported by `call()` is the `mask` argument. You will find it in all Keras RNN layers. A mask is a boolean tensor (one boolean value per timestep in the input) used to skip certain input timesteps when processing timeseries data. Keras will automatically pass the correct `mask` argument to `__call__()` for layers that support it, when a mask is generated by a prior layer. Mask-generating layers are the `Embedding` layer configured with `mask_zero=True`, and the `Masking` layer. --- ## The `Model` class In general, you will use the `Layer` class to define inner computation blocks, and will use the `Model` class to define the outer model -- the object you will train. For instance, in a ResNet50 model, you would have several ResNet blocks subclassing `Layer`, and a single `Model` encompassing the entire ResNet50 network. The `Model` class has the same API as `Layer`, with the following differences: - It exposes built-in training, evaluation, and prediction loops (`model.fit()`, `model.evaluate()`, `model.predict()`). - It exposes the list of its inner layers, via the `model.layers` property. - It exposes saving and serialization APIs (`save()`, `save_weights()`...) Effectively, the `Layer` class corresponds to what we refer to in the literature as a "layer" (as in "convolution layer" or "recurrent layer") or as a "block" (as in "ResNet block" or "Inception block"). Meanwhile, the `Model` class corresponds to what is referred to in the literature as a "model" (as in "deep learning model") or as a "network" (as in "deep neural network"). So if you're wondering, "should I use the `Layer` class or the `Model` class?", ask yourself: will I need to call `fit()` on it? Will I need to call `save()` on it? If so, go with `Model`. If not (either because your class is just a block in a bigger system, or because you are writing training & saving code yourself), use `Layer`. For instance, we could take our mini-resnet example above, and use it to build a `Model` that we could train with `fit()`, and that we could save with `save_weights()`: ```python class ResNet(keras.Model): def __init__(self, num_classes=1000): super().__init__() self.block_1 = ResNetBlock() self.block_2 = ResNetBlock() self.global_pool = layers.GlobalAveragePooling2D() self.classifier = Dense(num_classes) def call(self, inputs): x = self.block_1(inputs) x = self.block_2(x) x = self.global_pool(x) return self.classifier(x) resnet = ResNet() dataset = ... resnet.fit(dataset, epochs=10) resnet.save(filepath.keras) ``` --- ## Putting it all together: an end-to-end example Here's what you've learned so far: - A `Layer` encapsulate a state (created in `__init__()` or `build()`) and some computation (defined in `call()`). - Layers can be recursively nested to create new, bigger computation blocks. - Layers are backend-agnostic as long as they only use Keras APIs. You can use backend-native APIs (such as `jax.numpy`, `torch.nn` or `tf.nn`), but then your layer will only be usable with that specific backend. - Layers can create and track losses (typically regularization losses) via `add_loss()`. - The outer container, the thing you want to train, is a `Model`. A `Model` is just like a `Layer`, but with added training and serialization utilities. Let's put all of these things together into an end-to-end example: we're going to implement a Variational AutoEncoder (VAE) in a backend-agnostic fashion -- so that it runs the same with TensorFlow, JAX, and PyTorch. We'll train it on MNIST digits. Our VAE will be a subclass of `Model`, built as a nested composition of layers that subclass `Layer`. It will feature a regularization loss (KL divergence). ```python class Sampling(layers.Layer): """Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.""" def __init__(self, **kwargs): super().__init__(**kwargs) self.seed_generator = keras.random.SeedGenerator(1337) def call(self, inputs): z_mean, z_log_var = inputs batch = ops.shape(z_mean)[0] dim = ops.shape(z_mean)[1] epsilon = keras.random.normal(shape=(batch, dim), seed=self.seed_generator) return z_mean + ops.exp(0.5 * z_log_var) * epsilon class Encoder(layers.Layer): """Maps MNIST digits to a triplet (z_mean, z_log_var, z).""" def __init__(self, latent_dim=32, intermediate_dim=64, name="encoder", **kwargs): super().__init__(name=name, **kwargs) self.dense_proj = layers.Dense(intermediate_dim, activation="relu") self.dense_mean = layers.Dense(latent_dim) self.dense_log_var = layers.Dense(latent_dim) self.sampling = Sampling() def call(self, inputs): x = self.dense_proj(inputs) z_mean = self.dense_mean(x) z_log_var = self.dense_log_var(x) z = self.sampling((z_mean, z_log_var)) return z_mean, z_log_var, z class Decoder(layers.Layer): """Converts z, the encoded digit vector, back into a readable digit.""" def __init__(self, original_dim, intermediate_dim=64, name="decoder", **kwargs): super().__init__(name=name, **kwargs) self.dense_proj = layers.Dense(intermediate_dim, activation="relu") self.dense_output = layers.Dense(original_dim, activation="sigmoid") def call(self, inputs): x = self.dense_proj(inputs) return self.dense_output(x) class VariationalAutoEncoder(keras.Model): """Combines the encoder and decoder into an end-to-end model for training.""" def __init__( self, original_dim, intermediate_dim=64, latent_dim=32, name="autoencoder", **kwargs ): super().__init__(name=name, **kwargs) self.original_dim = original_dim self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim) self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim) def call(self, inputs): z_mean, z_log_var, z = self.encoder(inputs) reconstructed = self.decoder(z) # Add KL divergence regularization loss. kl_loss = -0.5 * ops.mean( z_log_var - ops.square(z_mean) - ops.exp(z_log_var) + 1 ) self.add_loss(kl_loss) return reconstructed ``` Let's train it on MNIST using the `fit()` API: ```python (x_train, _), _ = keras.datasets.mnist.load_data() x_train = x_train.reshape(60000, 784).astype("float32") / 255 original_dim = 784 vae = VariationalAutoEncoder(784, 64, 32) optimizer = keras.optimizers.Adam(learning_rate=1e-3) vae.compile(optimizer, loss=keras.losses.MeanSquaredError()) vae.fit(x_train, x_train, epochs=2, batch_size=64) ``` <div class="k-default-codeblock"> ``` Epoch 1/2 938/938 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - loss: 0.0942 Epoch 2/2 938/938 ━━━━━━━━━━━━━━━━━━━━ 1s 859us/step - loss: 0.0677 <keras.src.callbacks.history.History at 0x146fe62f0> ``` </div>
keras-io/guides/md/making_new_layers_and_models_via_subclassing.md/0
{ "file_path": "keras-io/guides/md/making_new_layers_and_models_via_subclassing.md", "repo_id": "keras-io", "token_count": 8198 }
135
""" Title: Save, serialize, and export models Authors: Neel Kovelamudi, Francois Chollet Date created: 2023/06/14 Last modified: 2023/06/30 Description: Complete guide to saving, serializing, and exporting models. Accelerator: None """ """ ## Introduction A Keras model consists of multiple components: - The architecture, or configuration, which specifies what layers the model contain, and how they're connected. - A set of weights values (the "state of the model"). - An optimizer (defined by compiling the model). - A set of losses and metrics (defined by compiling the model). The Keras API saves all of these pieces together in a unified format, marked by the `.keras` extension. This is a zip archive consisting of the following: - A JSON-based configuration file (config.json): Records of model, layer, and other trackables' configuration. - A H5-based state file, such as `model.weights.h5` (for the whole model), with directory keys for layers and their weights. - A metadata file in JSON, storing things such as the current Keras version. Let's take a look at how this works. """ """ ## How to save and load a model If you only have 10 seconds to read this guide, here's what you need to know. **Saving a Keras model:** ```python model = ... # Get model (Sequential, Functional Model, or Model subclass) model.save('path/to/location.keras') # The file needs to end with the .keras extension ``` **Loading the model back:** ```python model = keras.models.load_model('path/to/location.keras') ``` Now, let's look at the details. """ """ ## Setup """ import numpy as np import keras from keras import ops """ ## Saving This section is about saving an entire model to a single file. The file will include: - The model's architecture/config - The model's weight values (which were learned during training) - The model's compilation information (if `compile()` was called) - The optimizer and its state, if any (this enables you to restart training where you left) #### APIs You can save a model with `model.save()` or `keras.models.save_model()` (which is equivalent). You can load it back with `keras.models.load_model()`. The only supported format in Keras 3 is the "Keras v3" format, which uses the `.keras` extension. **Example:** """ def get_model(): # Create a simple model. inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) model.compile(optimizer=keras.optimizers.Adam(), loss="mean_squared_error") return model model = get_model() # Train the model. test_input = np.random.random((128, 32)) test_target = np.random.random((128, 1)) model.fit(test_input, test_target) # Calling `save('my_model.keras')` creates a zip archive `my_model.keras`. model.save("my_model.keras") # It can be used to reconstruct the model identically. reconstructed_model = keras.models.load_model("my_model.keras") # Let's check: np.testing.assert_allclose( model.predict(test_input), reconstructed_model.predict(test_input) ) """ ### Custom objects This section covers the basic workflows for handling custom layers, functions, and models in Keras saving and reloading. When saving a model that includes custom objects, such as a subclassed Layer, you **must** define a `get_config()` method on the object class. If the arguments passed to the constructor (`__init__()` method) of the custom object aren't Python objects (anything other than base types like ints, strings, etc.), then you **must** also explicitly deserialize these arguments in the `from_config()` class method. Like this: ```python class CustomLayer(keras.layers.Layer): def __init__(self, sublayer, **kwargs): super().__init__(**kwargs) self.sublayer = layer def call(self, x): return self.sublayer(x) def get_config(self): base_config = super().get_config() config = { "sublayer": keras.saving.serialize_keras_object(self.sublayer), } return {**base_config, **config} @classmethod def from_config(cls, config): sublayer_config = config.pop("sublayer") sublayer = keras.saving.deserialize_keras_object(sublayer_config) return cls(sublayer, **config) ``` Please see the [Defining the config methods section](#config_methods) for more details and examples. The saved `.keras` file is lightweight and does not store the Python code for custom objects. Therefore, to reload the model, `load_model` requires access to the definition of any custom objects used through one of the following methods: 1. Registering custom objects **(preferred)**, 2. Passing custom objects directly when loading, or 3. Using a custom object scope Below are examples of each workflow: #### Registering custom objects (**preferred**) This is the preferred method, as custom object registration greatly simplifies saving and loading code. Adding the `@keras.saving.register_keras_serializable` decorator to the class definition of a custom object registers the object globally in a master list, allowing Keras to recognize the object when loading the model. Let's create a custom model involving both a custom layer and a custom activation function to demonstrate this. **Example:** """ # Clear all previously registered custom objects keras.saving.get_custom_objects().clear() # Upon registration, you can optionally specify a package or a name. # If left blank, the package defaults to `Custom` and the name defaults to # the class name. @keras.saving.register_keras_serializable(package="MyLayers") class CustomLayer(keras.layers.Layer): def __init__(self, factor): super().__init__() self.factor = factor def call(self, x): return x * self.factor def get_config(self): return {"factor": self.factor} @keras.saving.register_keras_serializable(package="my_package", name="custom_fn") def custom_fn(x): return x**2 # Create the model. def get_model(): inputs = keras.Input(shape=(4,)) mid = CustomLayer(0.5)(inputs) outputs = keras.layers.Dense(1, activation=custom_fn)(mid) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="mean_squared_error") return model # Train the model. def train_model(model): input = np.random.random((4, 4)) target = np.random.random((4, 1)) model.fit(input, target) return model test_input = np.random.random((4, 4)) test_target = np.random.random((4, 1)) model = get_model() model = train_model(model) model.save("custom_model.keras") # Now, we can simply load without worrying about our custom objects. reconstructed_model = keras.models.load_model("custom_model.keras") # Let's check: np.testing.assert_allclose( model.predict(test_input), reconstructed_model.predict(test_input) ) """ #### Passing custom objects to `load_model()` """ model = get_model() model = train_model(model) # Calling `save('my_model.keras')` creates a zip archive `my_model.keras`. model.save("custom_model.keras") # Upon loading, pass a dict containing the custom objects used in the # `custom_objects` argument of `keras.models.load_model()`. reconstructed_model = keras.models.load_model( "custom_model.keras", custom_objects={"CustomLayer": CustomLayer, "custom_fn": custom_fn}, ) # Let's check: np.testing.assert_allclose( model.predict(test_input), reconstructed_model.predict(test_input) ) """ #### Using a custom object scope Any code within the custom object scope will be able to recognize the custom objects passed to the scope argument. Therefore, loading the model within the scope will allow the loading of our custom objects. **Example:** """ model = get_model() model = train_model(model) model.save("custom_model.keras") # Pass the custom objects dictionary to a custom object scope and place # the `keras.models.load_model()` call within the scope. custom_objects = {"CustomLayer": CustomLayer, "custom_fn": custom_fn} with keras.saving.custom_object_scope(custom_objects): reconstructed_model = keras.models.load_model("custom_model.keras") # Let's check: np.testing.assert_allclose( model.predict(test_input), reconstructed_model.predict(test_input) ) """ ### Model serialization This section is about saving only the model's configuration, without its state. The model's configuration (or architecture) specifies what layers the model contains, and how these layers are connected. If you have the configuration of a model, then the model can be created with a freshly initialized state (no weights or compilation information). #### APIs The following serialization APIs are available: - `keras.models.clone_model(model)`: make a (randomly initialized) copy of a model. - `get_config()` and `cls.from_config()`: retrieve the configuration of a layer or model, and recreate a model instance from its config, respectively. - `keras.models.model_to_json()` and `keras.models.model_from_json()`: similar, but as JSON strings. - `keras.saving.serialize_keras_object()`: retrieve the configuration any arbitrary Keras object. - `keras.saving.deserialize_keras_object()`: recreate an object instance from its configuration. #### In-memory model cloning You can do in-memory cloning of a model via `keras.models.clone_model()`. This is equivalent to getting the config then recreating the model from its config (so it does not preserve compilation information or layer weights values). **Example:** """ new_model = keras.models.clone_model(model) """ #### `get_config()` and `from_config()` Calling `model.get_config()` or `layer.get_config()` will return a Python dict containing the configuration of the model or layer, respectively. You should define `get_config()` to contain arguments needed for the `__init__()` method of the model or layer. At loading time, the `from_config(config)` method will then call `__init__()` with these arguments to reconstruct the model or layer. **Layer example:** """ layer = keras.layers.Dense(3, activation="relu") layer_config = layer.get_config() print(layer_config) """ Now let's reconstruct the layer using the `from_config()` method: """ new_layer = keras.layers.Dense.from_config(layer_config) """ **Sequential model example:** """ model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)]) config = model.get_config() new_model = keras.Sequential.from_config(config) """ **Functional model example:** """ inputs = keras.Input((32,)) outputs = keras.layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) config = model.get_config() new_model = keras.Model.from_config(config) """ #### `to_json()` and `keras.models.model_from_json()` This is similar to `get_config` / `from_config`, except it turns the model into a JSON string, which can then be loaded without the original model class. It is also specific to models, it isn't meant for layers. **Example:** """ model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)]) json_config = model.to_json() new_model = keras.models.model_from_json(json_config) """ #### Arbitrary object serialization and deserialization The `keras.saving.serialize_keras_object()` and `keras.saving.deserialize_keras_object()` APIs are general-purpose APIs that can be used to serialize or deserialize any Keras object and any custom object. It is at the foundation of saving model architecture and is behind all `serialize()`/`deserialize()` calls in keras. **Example**: """ my_reg = keras.regularizers.L1(0.005) config = keras.saving.serialize_keras_object(my_reg) print(config) """ Note the serialization format containing all the necessary information for proper reconstruction: - `module` containing the name of the Keras module or other identifying module the object comes from - `class_name` containing the name of the object's class. - `config` with all the information needed to reconstruct the object - `registered_name` for custom objects. See [here](#custom_object_serialization). Now we can reconstruct the regularizer. """ new_reg = keras.saving.deserialize_keras_object(config) """ ### Model weights saving You can choose to only save & load a model's weights. This can be useful if: - You only need the model for inference: in this case you won't need to restart training, so you don't need the compilation information or optimizer state. - You are doing transfer learning: in this case you will be training a new model reusing the state of a prior model, so you don't need the compilation information of the prior model. #### APIs for in-memory weight transfer Weights can be copied between different objects by using `get_weights()` and `set_weights()`: * `keras.layers.Layer.get_weights()`: Returns a list of NumPy arrays of weight values. * `keras.layers.Layer.set_weights(weights)`: Sets the model weights to the values provided (as NumPy arrays). Examples: ***Transferring weights from one layer to another, in memory*** """ def create_layer(): layer = keras.layers.Dense(64, activation="relu", name="dense_2") layer.build((None, 784)) return layer layer_1 = create_layer() layer_2 = create_layer() # Copy weights from layer 1 to layer 2 layer_2.set_weights(layer_1.get_weights()) """ ***Transferring weights from one model to another model with a compatible architecture, in memory*** """ # Create a simple functional model inputs = keras.Input(shape=(784,), name="digits") x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs) x = keras.layers.Dense(64, activation="relu", name="dense_2")(x) outputs = keras.layers.Dense(10, name="predictions")(x) functional_model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp") # Define a subclassed model with the same architecture class SubclassedModel(keras.Model): def __init__(self, output_dim, name=None): super().__init__(name=name) self.output_dim = output_dim self.dense_1 = keras.layers.Dense(64, activation="relu", name="dense_1") self.dense_2 = keras.layers.Dense(64, activation="relu", name="dense_2") self.dense_3 = keras.layers.Dense(output_dim, name="predictions") def call(self, inputs): x = self.dense_1(inputs) x = self.dense_2(x) x = self.dense_3(x) return x def get_config(self): return {"output_dim": self.output_dim, "name": self.name} subclassed_model = SubclassedModel(10) # Call the subclassed model once to create the weights. subclassed_model(np.ones((1, 784))) # Copy weights from functional_model to subclassed_model. subclassed_model.set_weights(functional_model.get_weights()) assert len(functional_model.weights) == len(subclassed_model.weights) for a, b in zip(functional_model.weights, subclassed_model.weights): np.testing.assert_allclose(a.numpy(), b.numpy()) """ ***The case of stateless layers*** Because stateless layers do not change the order or number of weights, models can have compatible architectures even if there are extra/missing stateless layers. """ inputs = keras.Input(shape=(784,), name="digits") x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs) x = keras.layers.Dense(64, activation="relu", name="dense_2")(x) outputs = keras.layers.Dense(10, name="predictions")(x) functional_model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp") inputs = keras.Input(shape=(784,), name="digits") x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs) x = keras.layers.Dense(64, activation="relu", name="dense_2")(x) # Add a dropout layer, which does not contain any weights. x = keras.layers.Dropout(0.5)(x) outputs = keras.layers.Dense(10, name="predictions")(x) functional_model_with_dropout = keras.Model( inputs=inputs, outputs=outputs, name="3_layer_mlp" ) functional_model_with_dropout.set_weights(functional_model.get_weights()) """ #### APIs for saving weights to disk & loading them back Weights can be saved to disk by calling `model.save_weights(filepath)`. The filename should end in `.weights.h5`. **Example:** """ # Runnable example sequential_model = keras.Sequential( [ keras.Input(shape=(784,), name="digits"), keras.layers.Dense(64, activation="relu", name="dense_1"), keras.layers.Dense(64, activation="relu", name="dense_2"), keras.layers.Dense(10, name="predictions"), ] ) sequential_model.save_weights("my_model.weights.h5") sequential_model.load_weights("my_model.weights.h5") """ Note that changing `layer.trainable` may result in a different `layer.weights` ordering when the model contains nested layers. """ class NestedDenseLayer(keras.layers.Layer): def __init__(self, units, name=None): super().__init__(name=name) self.dense_1 = keras.layers.Dense(units, name="dense_1") self.dense_2 = keras.layers.Dense(units, name="dense_2") def call(self, inputs): return self.dense_2(self.dense_1(inputs)) nested_model = keras.Sequential([keras.Input((784,)), NestedDenseLayer(10, "nested")]) variable_names = [v.name for v in nested_model.weights] print("variables: {}".format(variable_names)) print("\nChanging trainable status of one of the nested layers...") nested_model.get_layer("nested").dense_1.trainable = False variable_names_2 = [v.name for v in nested_model.weights] print("\nvariables: {}".format(variable_names_2)) print("variable ordering changed:", variable_names != variable_names_2) """ ##### **Transfer learning example** When loading pretrained weights from a weights file, it is recommended to load the weights into the original checkpointed model, and then extract the desired weights/layers into a new model. **Example:** """ def create_functional_model(): inputs = keras.Input(shape=(784,), name="digits") x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs) x = keras.layers.Dense(64, activation="relu", name="dense_2")(x) outputs = keras.layers.Dense(10, name="predictions")(x) return keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp") functional_model = create_functional_model() functional_model.save_weights("pretrained.weights.h5") # In a separate program: pretrained_model = create_functional_model() pretrained_model.load_weights("pretrained.weights.h5") # Create a new model by extracting layers from the original model: extracted_layers = pretrained_model.layers[:-1] extracted_layers.append(keras.layers.Dense(5, name="dense_3")) model = keras.Sequential(extracted_layers) model.summary() """ ### Appendix: Handling custom objects <a name="config_methods"></a> #### Defining the config methods Specifications: * `get_config()` should return a JSON-serializable dictionary in order to be compatible with the Keras architecture- and model-saving APIs. * `from_config(config)` (a `classmethod`) should return a new layer or model object that is created from the config. The default implementation returns `cls(**config)`. **NOTE**: If all your constructor arguments are already serializable, e.g. strings and ints, or non-custom Keras objects, overriding `from_config` is not necessary. However, for more complex objects such as layers or models passed to `__init__`, deserialization must be handled explicitly either in `__init__` itself or overriding the `from_config()` method. **Example:** """ @keras.saving.register_keras_serializable(package="MyLayers", name="KernelMult") class MyDense(keras.layers.Layer): def __init__( self, units, *, kernel_regularizer=None, kernel_initializer=None, nested_model=None, **kwargs ): super().__init__(**kwargs) self.hidden_units = units self.kernel_regularizer = kernel_regularizer self.kernel_initializer = kernel_initializer self.nested_model = nested_model def get_config(self): config = super().get_config() # Update the config with the custom layer's parameters config.update( { "units": self.hidden_units, "kernel_regularizer": self.kernel_regularizer, "kernel_initializer": self.kernel_initializer, "nested_model": self.nested_model, } ) return config def build(self, input_shape): input_units = input_shape[-1] self.kernel = self.add_weight( name="kernel", shape=(input_units, self.hidden_units), regularizer=self.kernel_regularizer, initializer=self.kernel_initializer, ) def call(self, inputs): return ops.matmul(inputs, self.kernel) layer = MyDense(units=16, kernel_regularizer="l1", kernel_initializer="ones") layer3 = MyDense(units=64, nested_model=layer) config = keras.layers.serialize(layer3) print(config) new_layer = keras.layers.deserialize(config) print(new_layer) """ Note that overriding `from_config` is unnecessary above for `MyDense` because `hidden_units`, `kernel_initializer`, and `kernel_regularizer` are ints, strings, and a built-in Keras object, respectively. This means that the default `from_config` implementation of `cls(**config)` will work as intended. For more complex objects, such as layers and models passed to `__init__`, for example, you must explicitly deserialize these objects. Let's take a look at an example of a model where a `from_config` override is necessary. **Example:** <a name="registration_example"></a> """ @keras.saving.register_keras_serializable(package="ComplexModels") class CustomModel(keras.layers.Layer): def __init__(self, first_layer, second_layer=None, **kwargs): super().__init__(**kwargs) self.first_layer = first_layer if second_layer is not None: self.second_layer = second_layer else: self.second_layer = keras.layers.Dense(8) def get_config(self): config = super().get_config() config.update( { "first_layer": self.first_layer, "second_layer": self.second_layer, } ) return config @classmethod def from_config(cls, config): # Note that you can also use `keras.saving.deserialize_keras_object` here config["first_layer"] = keras.layers.deserialize(config["first_layer"]) config["second_layer"] = keras.layers.deserialize(config["second_layer"]) return cls(**config) def call(self, inputs): return self.first_layer(self.second_layer(inputs)) # Let's make our first layer the custom layer from the previous example (MyDense) inputs = keras.Input((32,)) outputs = CustomModel(first_layer=layer)(inputs) model = keras.Model(inputs, outputs) config = model.get_config() new_model = keras.Model.from_config(config) """ <a name="custom_object_serialization"></a> #### How custom objects are serialized The serialization format has a special key for custom objects registered via `@keras.saving.register_keras_serializable`. This `registered_name` key allows for easy retrieval at loading/deserialization time while also allowing users to add custom naming. Let's take a look at the config from serializing the custom layer `MyDense` we defined above. **Example**: """ layer = MyDense( units=16, kernel_regularizer=keras.regularizers.L1L2(l1=1e-5, l2=1e-4), kernel_initializer="ones", ) config = keras.layers.serialize(layer) print(config) """ As shown, the `registered_name` key contains the lookup information for the Keras master list, including the package `MyLayers` and the custom name `KernelMult` that we gave in the `@keras.saving.register_keras_serializable` decorator. Take a look again at the custom class definition/registration [here](#registration_example). Note that the `class_name` key contains the original name of the class, allowing for proper re-initialization in `from_config`. Additionally, note that the `module` key is `None` since this is a custom object. """
keras-io/guides/serialization_and_saving.py/0
{ "file_path": "keras-io/guides/serialization_and_saving.py", "repo_id": "keras-io", "token_count": 8004 }
136
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/modeling_layers/token_and_position_embedding/'" />
keras-io/redirects/api/keras_nlp/layers/token_and_position_embedding/index.html/0
{ "file_path": "keras-io/redirects/api/keras_nlp/layers/token_and_position_embedding/index.html", "repo_id": "keras-io", "token_count": 50 }
137
<meta http-equiv="refresh" content="0; URL='https://keras.io/examples/generative/text_generation_gpt/'" />
keras-io/redirects/examples/nlp/text_generation_gpt/index.html/0
{ "file_path": "keras-io/redirects/examples/nlp/text_generation_gpt/index.html", "repo_id": "keras-io", "token_count": 40 }
138
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/pooling_layers/'" />
keras-io/redirects/layers/pooling/index.html/0
{ "file_path": "keras-io/redirects/layers/pooling/index.html", "repo_id": "keras-io", "token_count": 38 }
139
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/utils/'" />
keras-io/redirects/utils/index.html/0
{ "file_path": "keras-io/redirects/utils/index.html", "repo_id": "keras-io", "token_count": 31 }
140
"""Custom rendering code for the /api/{keras_nlp|keras_cv}/models page. The model metadata is pulled from the library, each preset has a metadata dictionary as follows: { 'description': Description of the model, 'params': Parameter count of the model, 'official_name': Name of the model, 'path': Relative path of the model on keras.io, } """ import inspect try: import keras_cv except Exception as e: print(f"Could not import Keras CV. Exception: {e}") keras_cv = None TABLE_HEADER = ( "Preset name | Model | Parameters | Description\n" "------------|-------|------------|------------\n" ) TABLE_HEADER_PER_MODEL = ( "Preset name | Parameters | Description\n" "------------|------------|------------\n" ) def format_param_count(metadata): """Format a parameter count for the table.""" try: count = metadata["params"] except KeyError: return "Unknown" if count >= 1e9: return f"{(count / 1e9):.2f}B" if count >= 1e6: return f"{(count / 1e6):.2f}M" if count >= 1e3: return f"{(count / 1e3):.2f}K" return f"{count}" def format_path(metadata): """Returns Path for the given preset""" try: return f"[{metadata['official_name']}]({metadata['path']})" except KeyError: return "Unknown" def render_backbone_table(symbols): """Renders the markdown table for backbone presets as a string.""" table = TABLE_HEADER # Backbones has alias, which duplicates some presets. # Use a set to keep them unique. added_presets = set() # Bakcbone presets for name, symbol in symbols: if "Backbone" not in name: continue presets = symbol.presets # Only keep the ones with pretrained weights for KerasCV Backbones. if issubclass(symbol, keras_cv.models.Backbone): presets = symbol.presets_with_weights for preset in presets: if preset in added_presets: continue else: added_presets.add(preset) metadata = presets[preset]["metadata"] # KerasCV backbones docs' URL has a "backbones/" path. if issubclass(symbol, keras_cv.models.Backbone) and "path" in metadata: metadata["path"] = "backbones/" + metadata["path"] table += ( f"{preset} | " f"{format_path(metadata)} | " f"{format_param_count(metadata)} | " f"{metadata['description']}" ) if "model_card" in metadata: table += f" [Model Card]({metadata['model_card']})" table += "\n" return table def render_classifier_table(symbols): """Renders the markdown table for classifier presets as a string.""" table = TABLE_HEADER # Classifier presets for name, symbol in symbols: if "Classifier" not in name: continue for preset in symbol.presets: if preset not in symbol.backbone_cls.presets: metadata = symbol.presets[preset]["metadata"] table += ( f"{preset} | " f"{format_path(metadata)} | " f"{format_param_count(metadata)} | " f"{metadata['description']} \n" ) return table def render_task_table(symbols): """Renders the markdown table for Task presets as a string.""" table = TABLE_HEADER for name, symbol in symbols: if not inspect.isclass(symbol): continue if not issubclass(symbol, keras_cv.models.Task): continue for preset in symbol.presets: # Do not print all backbone presets for a task if ( preset in keras_cv.src.models.backbones.backbone_presets.backbone_presets ): continue if preset not in symbol.presets_with_weights: continue # Only render the ones with pretrained_weights for KerasCV. metadata = symbol.presets_with_weights[preset]["metadata"] # KerasCV tasks docs' URL has a "tasks/" path. metadata["path"] = "tasks/" + metadata["path"] table += ( f"{preset} | " f"{format_path(metadata)} | " f"{format_param_count(metadata)} | " f"{metadata['description']} \n" ) return table def render_table(symbol): table = TABLE_HEADER_PER_MODEL if len(symbol.presets) == 0: return None for preset in symbol.presets: # Do not print all backbone presets for a task if ( issubclass(symbol, keras_cv.models.Task) and preset in keras_cv.src.models.backbones.backbone_presets.backbone_presets ): continue metadata = symbol.presets[preset]["metadata"] table += ( f"{preset} | " f"{format_param_count(metadata)} | " f"{metadata['description']} \n" ) return table def render_tags(template, lib): """Replaces all custom KerasNLP/KerasCV tags with rendered content.""" symbols = lib.models.__dict__.items() if "{{backbone_presets_table}}" in template: template = template.replace( "{{backbone_presets_table}}", render_backbone_table(symbols) ) if "{{classifier_presets_table}}" in template: template = template.replace( "{{classifier_presets_table}}", render_classifier_table(symbols) ) if "{{task_presets_table}}" in template: template = template.replace( "{{task_presets_table}}", render_task_table(symbols) ) return template
keras-io/scripts/render_tags.py/0
{ "file_path": "keras-io/scripts/render_tags.py", "repo_id": "keras-io", "token_count": 2616 }
141
# KerasNLP KerasNLP is a toolbox of modular building blocks ranging from pretrained state-of-the-art models, to low-level Transformer Encoder layers. For an introduction to the library see the [KerasNLP home page](/keras_nlp). For a high-level introduction to the API see our [getting started guide](/guides/keras_nlp/getting_started/). {{toc}}
keras-io/templates/api/keras_nlp/index.md/0
{ "file_path": "keras-io/templates/api/keras_nlp/index.md", "repo_id": "keras-io", "token_count": 110 }
142